blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
647f594b1d03d31d1e230c66200c22b7fad4a48d
|
0a16415a4e7657570b95e96c16a9bffbeeddd279
|
/tools/kapture_import_nvm.py
|
51630405a5daa7db60b1f532cab574f15d4a7757
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
naver/kapture
|
13fda56217751ac12c77f0be658815ea6a598fa8
|
392677fb974aaa7c9c7c65d05c09abdb8a681124
|
refs/heads/main
| 2023-08-24T16:25:20.195565
| 2023-03-29T15:58:07
| 2023-03-29T15:58:07
| 275,164,995
| 386
| 64
|
BSD-3-Clause
| 2023-09-04T15:48:49
| 2020-06-26T13:40:30
|
Python
|
UTF-8
|
Python
| false
| false
| 16,410
|
py
|
kapture_import_nvm.py
|
#!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
This script imports an NVM model in the kapture format.
VisualSFM saves SfM workspaces into NVM files, which contain input image paths and multiple 3D models.
Below is the format description
NVM_V3 [optional calibration] # file version header
<Model1> <Model2> ... # multiple reconstructed models
<Empty Model containing the unregistered Images> # number of camera > 0, but number of points = 0
<0> # 0 camera to indicate the end of model section
<Some comments describing the PLY section>
<Number of PLY files> <List of indices of models that have associated PLY>
The [optional calibration] exists only if you use "Set Fixed Calibration" Function
FixedK fx cx fy cy
Each reconstructed <model> contains the following
<Number of cameras> <List of cameras>
<Number of 3D points> <List of points>
The cameras and 3D points are saved in the following format
<Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
<Point> = <XYZ> <RGB> <number of measurements> <List of Measurements>
<Measurement> = <Image index> <Feature Index> <xy>
"""
import logging
import os
import os.path as path
import PIL
from PIL import Image
import numpy as np
import quaternion
import argparse
from typing import List, Optional, Set
# kapture
import path_to_kapture # noqa: F401
import kapture
import kapture.utils.logging
from kapture.io.structure import delete_existing_kapture_files
from kapture.io.csv import kapture_to_dir
from kapture.io.records import TransferAction, import_record_data_from_dir_auto
import kapture.io.features
logger = logging.getLogger('nvm')
MODEL = kapture.CameraType.SIMPLE_RADIAL
LOCAL_FEATURE_TYPE = 'sift'
def import_nvm(nvm_file_path: str,
nvm_images_path: str,
kapture_path: str,
filter_list_path: Optional[str],
ignore_trajectories: bool,
add_reconstruction: bool,
force_overwrite_existing: bool = False,
images_import_method: TransferAction = TransferAction.skip) -> None:
"""
Imports nvm data to kapture format.
:param nvm_file_path: path to nvm file
:param nvm_images_path: path to NVM images directory.
:param kapture_path: path to kapture root directory.
:param filter_list_path: path to the optional file containing a list of images to process
:param ignore_trajectories: if True, will not create trajectories
:param add_reconstruction: if True, will add observations, keypoints and 3D points.
:param force_overwrite_existing: Silently overwrite kapture files if already exists.
:param images_import_method: choose how to import actual image files.
"""
# TODO implement [optional calibration]
# doc : http://ccwu.me/vsfm/doc.html#nvm
os.makedirs(kapture_path, exist_ok=True)
delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)
logger.info('loading all content...')
# if there is a filter list, parse it
# keep it as Set[str] to easily find images
if filter_list_path:
with open(filter_list_path) as file:
file_content = file.readlines()
# remove end line char and empty lines
filter_list = {line.rstrip() for line in file_content if line != '\n'}
else:
filter_list = None
# now do the nvm
with open(nvm_file_path) as file:
nvm_content = file.readlines()
# remove end line char and empty lines
nvm_content = [line.rstrip() for line in nvm_content if line != '\n']
# only NVM_V3 is supported
assert nvm_content[0] == "NVM_V3"
# offset represents the line pointer
offset = 1
# camera_id_offset keeps tracks of used camera_id in case of multiple reconstructed models
camera_id_offset = 0
# point_id_offset keeps tracks of used point_id in case of multiple reconstructed models
point_id_offset = 0
cameras = kapture.Sensors()
images = kapture.RecordsCamera()
trajectories = kapture.Trajectories() if not ignore_trajectories else None
if add_reconstruction:
observations = kapture.Observations()
keypoints = {LOCAL_FEATURE_TYPE: kapture.Keypoints(LOCAL_FEATURE_TYPE, np.float32, 2)}
points3d = []
else:
observations = None
keypoints = None
points3d = None
# break if number of cameras == 0 or reached end of file
while True:
# <Model1> <Model2> ...
# Each reconstructed <model> contains the following
# <Number of cameras> <List of cameras>
# <Number of 3D points> <List of points>
# In practice,
# <Number of cameras>
# <List of cameras>, one per line
# <Number of 3D points>
# <List of points>, one per line
number_of_cameras = int(nvm_content[offset])
offset += 1
if number_of_cameras == 0: # a line with <0> signify the end of models
break
logger.debug('importing model cameras...')
# parse all cameras for current model
image_idx_to_image_name = parse_cameras(number_of_cameras,
nvm_content,
offset,
camera_id_offset,
filter_list,
nvm_images_path,
cameras,
images,
trajectories)
offset += number_of_cameras
camera_id_offset += number_of_cameras
# parse all points3d
number_of_points = int(nvm_content[offset])
offset += 1
if points3d is not None and number_of_points > 0:
assert keypoints is not None
assert observations is not None
logger.debug('importing model points...')
parse_points3d(kapture_path,
number_of_points,
nvm_content,
offset,
point_id_offset,
image_idx_to_image_name,
filter_list,
points3d,
keypoints[LOCAL_FEATURE_TYPE],
observations)
point_id_offset += number_of_points
offset += number_of_points
# reached end of file?
if offset >= len(nvm_content):
break
# do not export values if none were found.
if points3d is not None:
points3d = kapture.Points3d(points3d)
# import (copy) image files.
logger.info('import image files ...')
images_filenames = [f for _, _, f in kapture.flatten(images)]
import_record_data_from_dir_auto(nvm_images_path, kapture_path, images_filenames, images_import_method)
# pack into kapture format
imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories,
points3d=points3d, keypoints=keypoints, observations=observations)
logger.info('writing imported data...')
kapture_to_dir(kapture_path, imported_kapture)
def parse_cameras(number_of_cameras: int,
nvm_content: List[str],
offset: int,
camera_id_offset: int,
filter_list: Optional[Set[str]],
nvm_images_path: str,
cameras: kapture.Sensors,
images: kapture.RecordsCamera,
trajectories: Optional[kapture.Trajectories]) -> List[str]:
"""
Parse the <List of cameras> section
Fill cameras, images, trajectories in place.
Image files must exist to be able to retrieve height and width.
:param number_of_cameras: number of cameras to process
:param nvm_content: content of NVM file
:param offset: number of characters to skip while reading every line
:param camera_id_offset:
:param filter_list: optional list of images to process
:param nvm_images_path: path to NVM images directory
:param cameras: kapture cameras to extend
:param images: kapture images to extend
:param trajectories: kapture trajectories to extend
:return: list of images with position = index
"""
image_idx_to_image_name = []
# parse all cameras
for i in range(0, number_of_cameras):
line = nvm_content[i + offset].split()
timestamp = i + camera_id_offset
camera_id = f'sensor{timestamp}'
image_file_name = line[0]
image_idx_to_image_name.append(image_file_name)
if filter_list is not None and image_file_name not in filter_list:
# file_name is not in the list, do not add it
continue
focal_length = float(line[1])
quaternion_wxyz = quaternion.from_float_array([float(v) for v in line[2:6]])
camera_center = np.array([float(v) for v in line[6:9]])
# https://github.com/colmap/colmap/blob/67e96894d4beed7cc93f1c0755a98d3664f85e63/src/base/reconstruction.cc#L891
radial_distortion = -float(line[9]) # SIGN !
try:
# lazy open
with Image.open(path.join(nvm_images_path, image_file_name)) as im:
width, height = im.size
except (OSError, PIL.UnidentifiedImageError):
# It is not a valid image: skip it
logger.info(f'Skipping invalid image file {image_file_name}')
continue
translation = - np.matmul(quaternion.as_rotation_matrix(quaternion_wxyz), camera_center)
pose = kapture.PoseTransform(quaternion_wxyz, translation)
camera = kapture.Camera(MODEL, [width, height, focal_length, width / 2, height / 2, radial_distortion])
cameras[camera_id] = camera
images[(timestamp, camera_id)] = image_file_name
if trajectories is not None:
trajectories[(timestamp, camera_id)] = pose
return image_idx_to_image_name
def parse_points3d(kapture_path: str,
number_of_points: int,
nvm_content: List[str],
offset: int,
point_id_offset: int,
image_idx_to_image_name: List[str],
filter_list: Optional[Set[str]],
points3d: List[List[float]],
keypoints: kapture.Keypoints,
observations: kapture.Observations) -> None:
"""
Parse the <List of points> section
Fill points3d, keypoints, observations in place
Write keypoints to disk.
:param kapture_path: path to kapture root directory.
:param number_of_points: number of points to process
:param nvm_content: content of NVM file
:param offset: number of characters to skip while reading every line
:param point_id_offset:
:param image_idx_to_image_name: list of images in their index order
:param filter_list: optional list of images to process
:param points3d: list of 3D points to extend
:param keypoints: kapture keypoints list to extend
:param observations: kapture observations to extend
"""
# (image_name, nvm_feature_id ) -> keypoint_id
known_keypoints = {}
local_keypoints = {}
for i in range(0, number_of_points):
fields = nvm_content[i + offset].split()
points3d.append([float(v) for v in fields[0:6]])
# parse observations
number_of_measurements = int(fields[6])
for j in range(0, number_of_measurements):
# parse measurement
image_index = int(fields[7 + 4 * j + 0])
feature_index = int(fields[7 + 4 * j + 1])
x = float(fields[7 + 4 * j + 2])
y = float(fields[7 + 4 * j + 3])
# retrieve filename. if added, then proceed to add features / observations
file_name = image_idx_to_image_name[image_index]
if filter_list is not None and file_name not in filter_list:
# file_name is not in the list, do not add it
continue
# init local_keypoints if needed
if file_name not in local_keypoints:
local_keypoints[file_name] = []
# do not add the same keypoint twice
if (file_name, feature_index) not in known_keypoints:
# in the kapture format, keypoint id is different. Note that it starts from 0
known_keypoints[(file_name, feature_index)] = len(local_keypoints[file_name])
local_keypoints[file_name].append([x, y])
keypoint_idx = known_keypoints[(file_name, feature_index)]
point3d_idx = i + point_id_offset
observations.add(point3d_idx, LOCAL_FEATURE_TYPE, file_name, keypoint_idx)
# finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable
for image_filename, keypoints_array in local_keypoints.items():
keypoints_np_array = np.array(keypoints_array, dtype=np.float32)
keypoints_filepath = kapture.io.features.get_keypoints_fullpath(LOCAL_FEATURE_TYPE,
kapture_path,
image_filename)
kapture.io.features.image_keypoints_to_file(keypoints_filepath, keypoints_np_array)
keypoints.add(image_filename)
def import_nvm_command_line() -> None:
"""
Do the NVM import to kapture using the parameters given on the command line.
"""
parser = argparse.ArgumentParser(
description='import nvm file to the kapture format.')
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument(
'-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument(
'-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='Force delete output if already exists.')
# import ###########################################################################################################
parser.add_argument('-i', '--input', required=True, help='input path to nvm file')
parser.add_argument('-im', '--images', default=None,
help='path to images directory.')
parser.add_argument('--image_transfer', type=TransferAction, default=TransferAction.link_absolute,
help=f'How to import images [link_absolute], '
f'choose among: {", ".join(a.name for a in TransferAction)}')
parser.add_argument('-o', '--output', required=True, help='output directory.')
parser.add_argument('--filter-list', default="",
help=('path to the filter list (optional), '
'this file contains a list a images, '
'one per line that will be kept in the output. '
'images not in the list will be skipped'))
parser.add_argument('--ignore-trajectories', action='store_true', default=False,
help='Do not export extrinsics.')
parser.add_argument('--add-reconstruction', action='store_true', default=False,
help='add the 3d points/keypoints/observations to the output')
####################################################################################################################
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
import_nvm(args.input, args.images, args.output, args.filter_list,
args.ignore_trajectories, args.add_reconstruction, args.force, args.image_transfer)
if __name__ == '__main__':
import_nvm_command_line()
|
ec19c205bb8d30cfcd469351f321e88872ca4542
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/battle_matters/battle_matters_exchange_rewards_model.py
|
2049d1de7682e1e458d4a583f79410dd76c233cf
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 933
|
py
|
battle_matters_exchange_rewards_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/battle_matters/battle_matters_exchange_rewards_model.py
from frameworks.wulf import ViewModel
class BattleMattersExchangeRewardsModel(ViewModel):
__slots__ = ()
def __init__(self, properties=2, commands=0):
super(BattleMattersExchangeRewardsModel, self).__init__(properties=properties, commands=commands)
def getVehicleName(self):
return self._getString(0)
def setVehicleName(self, value):
self._setString(0, value)
def getVehicleUserName(self):
return self._getString(1)
def setVehicleUserName(self, value):
self._setString(1, value)
def _initialize(self):
super(BattleMattersExchangeRewardsModel, self)._initialize()
self._addStringProperty('vehicleName', '')
self._addStringProperty('vehicleUserName', '')
|
0675ec33b47501852a2d6528b1dd5964a4a33e92
|
050fc5ca698dfd7612dee42aa980fc7b5eee40a2
|
/skywalking/plugins/sw_happybase.py
|
0df76ae1ccfb4b92531ba7c647401acc5610d99e
|
[
"Apache-2.0"
] |
permissive
|
apache/skywalking-python
|
8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6
|
1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d
|
refs/heads/master
| 2023-09-05T02:45:56.225937
| 2023-08-28T22:19:24
| 2023-08-28T22:19:24
| 261,456,329
| 178
| 122
|
Apache-2.0
| 2023-08-28T22:19:26
| 2020-05-05T12:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,734
|
py
|
sw_happybase.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from skywalking import Layer, Component
from skywalking.trace.context import get_context
from skywalking.trace.tags import TagDbType, TagDbStatement
link_vector = ['https://happybase.readthedocs.io']
support_matrix = {
'happybase': {
'>=3.7': ['1.2.0'],
}
}
note = """"""
def install():
from happybase import Table
from happybase import Connection
_row = Table.row
_rows = Table.rows
_cells = Table.cells
_scan = Table.scan
_put = Table.put
_delete = Table.delete
_create_table = Connection.create_table
def bytes2str(value):
if isinstance(value, bytes):
return value.decode()
return value
def _sw_create_table(this, name, families):
context = get_context()
peer = ','.join([f'{this.host}:{str(this.port)}'])
table_name = name
with context.new_exit_span(op=f'HBase/create/{table_name}', peer=peer,
component=Component.HBase) as span:
span.layer = Layer.Database
span.tag(TagDbType('HBase'))
span.tag(TagDbStatement(''))
_create_table(this, name, families)
def _sw_hbase_opt(table, name, fun, row, is_return=True):
context = get_context()
peer = ','.join([f'{table.connection.host}:{str(table.connection.port)}'])
table_name = bytes2str(table.name)
row = bytes2str(row)
with context.new_exit_span(op=f'HBase/{name}/{table_name}/{row}', peer=peer,
component=Component.HBase) as span:
span.layer = Layer.Database
span.tag(TagDbType('HBase'))
span.tag(TagDbStatement(''))
if is_return:
return fun()
else:
fun()
def _sw_row(this, row, columns=None, timestamp=None, include_timestamp=False):
def __sw_row():
return _row(this, row, columns, timestamp, include_timestamp)
res = _sw_hbase_opt(this, 'row', __sw_row, row)
return res
def _sw_rows(this, rows, columns=None, timestamp=None, include_timestamp=False):
def __sw_rows():
return _rows(this, rows, columns, timestamp, include_timestamp)
row = ''
if rows and isinstance(rows, list):
row = rows[0]
res = _sw_hbase_opt(this, 'rows', __sw_rows, row)
return res
def _sw_cells(this, row, column, versions=None, timestamp=None, include_timestamp=False):
def __sw_cells():
return _cells(this, row, column, versions, timestamp, include_timestamp)
res = _sw_hbase_opt(this, 'cells', __sw_cells, row)
return res
def _sw_scan(this, row_start=None, row_stop=None, row_prefix=None,
columns=None, filter=None, timestamp=None,
include_timestamp=False, batch_size=1000, scan_batching=None,
limit=None, sorted_columns=False, reverse=False):
def __sw_scan():
return _scan(this, row_start, row_stop, row_prefix,
columns, filter, timestamp,
include_timestamp, batch_size, scan_batching,
limit, sorted_columns, reverse)
res = _sw_hbase_opt(this, 'scan', __sw_scan, row_start)
return res
def _sw_put(this, row, data, timestamp=None, wal=True):
def __sw_put():
return _put(this, row, data, timestamp, wal)
_sw_hbase_opt(this, 'put', __sw_put, row, False)
def _sw_delete(this, row, columns=None, timestamp=None, wal=True):
def __sw_delete():
return _delete(this, row, columns, timestamp, wal)
_sw_hbase_opt(this, 'delete', __sw_delete, row, False)
Table.row = _sw_row
Table.rows = _sw_rows
Table.cells = _sw_cells
Table.scan = _sw_scan
Table.put = _sw_put
Table.delete = _sw_delete
Connection.create_table = _sw_create_table
|
388a4851404674362e3720ac81fc7fa27b7334b3
|
b1f587ee6eed481af0e453903e1c1ae7a2e7ef87
|
/pyxtal/miscellaneous/test_get_csd.py
|
284e1b350b8c7109701709dc1003b43a933bd132
|
[
"MIT"
] |
permissive
|
qzhu2017/PyXtal
|
cdae49664c876c8d2b452b0c3f0db36587c34532
|
9fdb4ec509da6a97a239a3ae4fcfa427dcf32eff
|
refs/heads/master
| 2023-08-20T08:20:37.452641
| 2023-08-11T15:15:36
| 2023-08-11T15:15:36
| 128,165,891
| 194
| 62
|
MIT
| 2023-07-01T13:00:11
| 2018-04-05T06:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
test_get_csd.py
|
import warnings
warnings.filterwarnings('ignore')
from ase.db import connect
from pyxtal import pyxtal
from pyxtal.util import search_csd_entries_by_code
strucs = []
for csd in [#'GLYCIN01', 'HUSVUM', 'HIHDUZ', 'DOPSIK', 'TABQOG03', 'DUPXOD',
#'GLYMSN10', 'GOBFAF', 'GOBZOO', 'ARAZEZ','HMBENZ21', 'BEMQUF', 'KUZJIB',
#NUMRET long
'SUZGUS', 'MABCOL', 'UMEQUB', 'XACXOS', 'AMBZPH',
'AMISAS', 'APAPOY', 'HIWZIZ', 'OHUXEV', 'QOJPAI',
'HOWTOF', 'NUGCOI', 'NUJMAH', 'NUNJIP', 'ZUZCEF',
'VITRUL', 'VOLCOP', 'QOLXEX02', 'UKULAQ',
'ACEMID02', 'AFIPIP', 'BUTHEE', 'CELKEK', 'HAHDID',
'HEXWIQ01', 'HIFWOJ', 'HIFZIF', 'HIRBAM', 'EHINOB',
'HUKYIW', 'HYQUIN06', 'ISIVIR', 'JATBIQ', 'JUDBUI',
'AFUVAZ', 'BZCBNL01', 'NUDREK',
'HEVRUV', 'JAPCIM', 'TCYETY02', 'ZZZWOU01', 'ELIFOX',
'GADLOQ', 'GAJLAI', 'MAMFIT', 'OKIXIS', 'NOLRUC',
'PACNOA', 'SABQEV', 'SAJWEJ', 'HAMQOC', 'NUMRET',
'HCHXDO', 'HETVUY', 'HIRYOY', 'HIYLOQ01', 'HONWIQ'
'TROXAN', 'PYRZIN', 'ACETAC', 'ADAMAN01',
'TRIZIN', 'HXMTAM', 'PYRZOL', 'CYHEXO', 'CYTSIN',
'IMAZOL01', 'URACIL', 'CYANAM01', 'FORMAM', 'SUCACB02',
'ECARBM01', 'XAFQAZ', 'KONTIQ', 'XATJOT', 'XAFQON'
]:
codes = search_csd_entries_by_code(csd)
for code in codes:
c = pyxtal(molecular=True)
c.from_CSD(code)
strucs.append(c)
print(c)
#if code == 'TRIZIN04': print(c.to_file()); print(c); import sys; sys.exit()
with connect('test.db') as db:
for xtal in strucs:
if xtal.tag['ccdc_number'] is None: xtal.tag['ccdc_number']=1240839
kvp = {
"csd_code": xtal.tag['csd_code'],
"mol_smi": xtal.tag['smiles'],
"ccdc_number": xtal.tag['ccdc_number'],
#"publication": xtal.tag['publication'],
}
print(kvp)
db.write(xtal.to_ase(), key_value_pairs=kvp)
|
93758fe9ec323250f80daac9bc1155046cb3e452
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/aea/test_tools/generic.py
|
0f5f8c36a26f44cf224a9603c7dc02ee43c5cf42
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
generic.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains generic tools for AEA end-to-end testing."""
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, cast
from aea.configurations.base import (
CRUDCollection,
ComponentConfiguration,
PackageConfiguration,
PackageType,
PublicId,
SkillConfig,
dependencies_from_json,
)
from aea.configurations.manager import handle_dotted_path
from aea.exceptions import enforce
from aea.helpers.file_io import write_envelope
from aea.helpers.io import open_file
from aea.helpers.yaml_utils import yaml_dump, yaml_dump_all
from aea.mail.base import Envelope
from aea.test_tools.constants import DEFAULT_AUTHOR
def write_envelope_to_file(envelope: Envelope, file_path: str) -> None:
"""
Write an envelope to a file.
:param envelope: Envelope.
:param file_path: the file path
"""
with open(Path(file_path), "ab+") as f:
write_envelope(envelope, f)
def read_envelope_from_file(file_path: str) -> Envelope:
"""
Read an envelope from a file.
:param file_path: the file path.
:return: envelope
"""
lines = []
with open(Path(file_path), "rb+") as f:
lines.extend(f.readlines())
enforce(len(lines) == 2, "Did not find two lines.")
line = lines[0] + lines[1]
to_b, sender_b, protocol_specification_id_b, message, end = line.strip().split(
b",", maxsplit=4
)
to = to_b.decode("utf-8")
sender = sender_b.decode("utf-8")
protocol_specification_id = PublicId.from_str(
protocol_specification_id_b.decode("utf-8")
)
enforce(end in [b"", b"\n"], "Envelope improperly formatted.")
return Envelope(
to=to,
sender=sender,
protocol_specification_id=protocol_specification_id,
message=message,
)
def _nested_set(
configuration_obj: PackageConfiguration, keys: List, value: Any
) -> None:
"""
Nested set a value to a dict. Force sets the values, overwriting any present values, but maintaining schema validation.
:param configuration_obj: configuration object
:param keys: list of keys.
:param value: a value to set.
"""
def get_nested_ordered_dict_from_dict(input_dict: Dict) -> Dict:
_dic = {}
for _key, _value in input_dict.items():
if isinstance(_value, dict):
_dic[_key] = OrderedDict(get_nested_ordered_dict_from_dict(_value))
else:
_dic[_key] = _value
return _dic
def get_nested_ordered_dict_from_keys_and_value(
keys: List[str], value: Any
) -> Dict:
_dic = (
OrderedDict(get_nested_ordered_dict_from_dict(value))
if isinstance(value, dict)
else value
)
for key in keys[::-1]:
_dic = OrderedDict({key: _dic})
return _dic
root_key = keys[0]
if (
isinstance(configuration_obj, SkillConfig)
and root_key in SkillConfig.FIELDS_WITH_NESTED_FIELDS
):
root_attr = getattr(configuration_obj, root_key)
length = len(keys)
if length < 3 or keys[2] not in SkillConfig.NESTED_FIELDS_ALLOWED_TO_UPDATE:
raise ValueError(f"Invalid keys={keys}.") # pragma: nocover
skill_component_id = keys[1]
skill_component_config = root_attr.read(skill_component_id)
if length == 3 and isinstance(value, dict): # root.skill_component_id.args
# set all args
skill_component_config.args = get_nested_ordered_dict_from_dict(value)
elif len(keys) >= 4: # root.skill_component_id.args.[keys]
# update some args
dic = get_nested_ordered_dict_from_keys_and_value(keys[3:], value)
skill_component_config.args.update(dic)
else:
raise ValueError( # pragma: nocover
f"Invalid keys={keys} and values={value}."
)
root_attr.update(skill_component_id, skill_component_config)
else:
root_attr = getattr(configuration_obj, root_key)
if isinstance(root_attr, CRUDCollection):
if isinstance(value, dict) and len(keys) == 1: # root.
for _key, _value in value.items():
dic = get_nested_ordered_dict_from_keys_and_value([_key], _value)
root_attr.update(_key, dic[_key])
elif len(keys) >= 2: # root.[keys]
dic = get_nested_ordered_dict_from_keys_and_value(keys[1:], value)
root_attr.update(keys[1], dic[keys[1]])
else:
raise ValueError( # pragma: nocover
f"Invalid keys={keys} and values={value}."
)
elif root_key == "dependencies":
enforce(
isinstance(configuration_obj, ComponentConfiguration),
"Cannot only set dependencies to ComponentConfiguration instances.",
)
configuration_obj = cast(ComponentConfiguration, configuration_obj)
new_pypi_dependencies = dependencies_from_json(value)
configuration_obj.pypi_dependencies = new_pypi_dependencies
else:
dic = get_nested_ordered_dict_from_keys_and_value(keys, value)
setattr(configuration_obj, root_key, dic[root_key])
def nested_set_config(
dotted_path: str, value: Any, author: str = DEFAULT_AUTHOR
) -> None:
"""
Set an AEA config with nested values.
Run from agent's directory.
Allowed dotted_path:
'agent.an_attribute_name'
'protocols.my_protocol.an_attribute_name'
'connections.my_connection.an_attribute_name'
'contracts.my_contract.an_attribute_name'
'skills.my_skill.an_attribute_name'
'vendor.author.[protocols|connections|skills].package_name.attribute_name
:param dotted_path: dotted path to a setting.
:param value: a value to assign. Must be of yaml serializable type.
:param author: the author name, used to parse the dotted path.
"""
settings_keys, config_file_path, config_loader, _ = handle_dotted_path(
dotted_path, author
)
with open_file(config_file_path) as fp:
config = config_loader.load(fp)
_nested_set(config, settings_keys, value)
if config.package_type == PackageType.AGENT:
json_data = config.ordered_json
component_configurations = json_data.pop("component_configurations")
with open_file(config_file_path, "w") as fp:
yaml_dump_all([json_data] + component_configurations, fp)
else:
with open_file(config_file_path, "w") as fp:
yaml_dump(config.ordered_json, fp)
|
8a7898c3cd1b03df464d24a9eddca810d037851a
|
6267a009f15f15381e7c35417f47afe64f13b618
|
/hvac/api/system_backend/wrapping.py
|
142561c1a5c869bfc6c31ade1244004a762e115d
|
[
"Apache-2.0"
] |
permissive
|
hvac/hvac
|
f46b114d34067a1563a31c8888dd791a11e52c3c
|
1f88016df7d8d882d154c491e351bac15b4093b5
|
refs/heads/main
| 2023-08-16T22:39:11.126707
| 2023-08-14T20:22:10
| 2023-08-14T20:22:10
| 35,126,062
| 731
| 304
|
Apache-2.0
| 2023-09-11T19:02:21
| 2015-05-05T21:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
wrapping.py
|
from hvac.api.system_backend.system_backend_mixin import SystemBackendMixin
class Wrapping(SystemBackendMixin):
def unwrap(self, token=None):
"""Return the original response inside the given wrapping token.
Unlike simply reading cubbyhole/response (which is deprecated), this endpoint provides additional validation
checks on the token, returns the original value on the wire rather than a JSON string representation of it, and
ensures that the response is properly audit-logged.
Supported methods:
POST: /sys/wrapping/unwrap. Produces: 200 application/json
:param token: Specifies the wrapping token ID. This is required if the client token is not the wrapping token.
Do not use the wrapping token in both locations.
:type token: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {}
if token is not None:
params["token"] = token
api_path = "/v1/sys/wrapping/unwrap"
return self._adapter.post(
url=api_path,
json=params,
)
|
2efdaf67b0d250387cc7b8843ecab1818cbf11e0
|
50dd46b8ece33f3cdd174284b15d1d51f89669d4
|
/third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_xrange.py
|
109c5ca2d08bfd9477085ed7a5ec2ff734a058e8
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"OpenSSL"
] |
permissive
|
google/google-ctf
|
f99da1ee07729bbccb869fff1cbaed6a80e43bcc
|
df02323eaf945d15e124801c74abaadca2749dc7
|
refs/heads/master
| 2023-08-31T14:30:27.548081
| 2023-08-29T13:04:20
| 2023-08-29T13:04:20
| 131,317,137
| 4,136
| 607
|
Apache-2.0
| 2023-08-30T22:17:02
| 2018-04-27T15:56:03
|
Go
|
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
fix_xrange.py
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes xrange(...) into range(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, consuming_calls
from .. import patcomp
class FixXrange(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
(name='range'|name='xrange') trailer< '(' args=any ')' >
rest=any* >
"""
def start_tree(self, tree, filename):
super(FixXrange, self).start_tree(tree, filename)
self.transformed_xranges = set()
def finish_tree(self, tree, filename):
self.transformed_xranges = None
def transform(self, node, results):
name = results["name"]
if name.value == u"xrange":
return self.transform_xrange(node, results)
elif name.value == u"range":
return self.transform_range(node, results)
else:
raise ValueError(repr(name))
def transform_xrange(self, node, results):
name = results["name"]
name.replace(Name(u"range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results):
if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
range_call = Call(Name(u"range"), [results["args"].clone()])
# Encase the range call in list().
list_call = Call(Name(u"list"), [range_call],
prefix=node.prefix)
# Put things that were after the range() call after the list call.
for n in results["rest"]:
list_call.append_child(n)
return list_call
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
| comparison< any 'in' node=any any*>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in consuming_calls
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
|
8dac53d737a127da99f45cc55316e1315453fab8
|
be815aacbd7b06ac0ce3f412831639aa0297b988
|
/pipe-cli/src/utilities/permissions_operations.py
|
0744d403b96cad6713d843a354509c02d9bcee24
|
[
"Apache-2.0"
] |
permissive
|
epam/cloud-pipeline
|
8a861dae60d0f86089ff55e2f278e8593fc5e112
|
570dd898e96de931b96e584c86e72296b0e40607
|
refs/heads/develop
| 2023-08-30T08:03:18.672866
| 2023-08-29T17:07:13
| 2023-08-29T17:07:13
| 174,065,041
| 155
| 73
|
Apache-2.0
| 2023-09-14T13:36:36
| 2019-03-06T03:34:40
|
Java
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
permissions_operations.py
|
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.api.user import User
from src.api.entity import Entity
class PermissionsOperations(object):
@classmethod
def chown(cls, user_name, class_name, object_name):
if object_name.isdigit():
object_id = object_name
else:
object_id = Entity.load_by_id_or_name(object_name, class_name)['id']
User.change_owner(user_name, class_name, object_id)
|
c68d82581cfc277c65f4e9ea858566452472a04b
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/IPython/core/tests/print_argv.py
|
4ec9e2799ede8f83053fbe20df50b0ee097dda77
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
print_argv.py
|
import sys
print(sys.argv[1:])
|
9c8683a4ee6778b0861f5631e3da4e3d578d1988
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/common_ext/scripts/client/__init__.py
|
4b276f151ec1424b1bd76fd0fa7ead27c752ef40
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 116
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: common_ext/scripts/client/__init__.py
pass
|
bdb55db5e0f341f9d1c04b6cdacf00035f9050d0
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/classifier_test_err_on_empty_sets.py
|
9b596ec9d5118fae54919ef7779d2c0809c08bf4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
classifier_test_err_on_empty_sets.py
|
#
# classifier_test_err_on_empty_sets.py
# Mich, 2016-06-07
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
import os
tmp_dir=os.getenv("TMP")
class ClassifierTestErrorWhenNoDataTest(MldbUnitTest): # noqa
@classmethod
def setUpClass(cls):
ds = mldb.create_dataset({'id' : 'ds', 'type' : 'sparse.mutable'})
ds.record_row('row1', [['label', 1, 12], ['feat1', 1, 0], ['feat2', 1, 0]])
ds.record_row('row2', [['label', 0, 12], ['feat1', 1, 0], ['feat2', 0, 0]])
ds.record_row('row3', [['label', 0, 12], ['feat1', 0, 0], ['feat2', 0, 0]])
ds.commit()
mldb.post('/v1/procedures', {
'type' : 'classifier.train',
'params' : {
'runOnCreation' : True,
"mode": "boolean",
'algorithm' : 'glz',
"configuration": {
"glz": {
"type": "glz",
"verbosity": 3,
"normalize": False,
"regularization": 'l2'
}
},
'trainingData' : """
SELECT {* EXCLUDING(label)} AS features, label
FROM ds
""",
"modelFileUrl":
"file://" + tmp_dir + "fmlhTODO.cls",
}
})
def test_classifier_test_no_data(self):
err_str = "Cannot run classifier.test procedure on empty test set"
with self.assertRaisesRegex(ResponseException, err_str):
mldb.post('/v1/procedures', {
"type": "classifier.test",
"params": {
'runOnCreation' : True,
"testingData": """
SELECT
{* EXCLUDING(label)} AS features,
label AS score,
label AS label
FROM ds
LIMIT 0
"""
}
})
with self.assertRaisesRegex(ResponseException, err_str):
mldb.post('/v1/procedures', {
"type": "classifier.test",
"params": {
'runOnCreation' : True,
"testingData": """
SELECT
{* EXCLUDING(label)} AS features,
label AS score,
label AS label
FROM ds
OFFSET 100
"""
}
})
with self.assertRaisesRegex(ResponseException, err_str):
mldb.post('/v1/procedures', {
"type": "classifier.test",
"params": {
'runOnCreation' : True,
"testingData": """
SELECT
{* EXCLUDING(label)} AS features,
label AS score,
label AS label
FROM ds
WHERE patate=123
"""
}
})
if __name__ == '__main__':
mldb.run_tests()
|
40092c1ca67dff26c5d470fb40c42c1871e93323
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/data_center/migrations/0014_custom_move_managment_to_networks.py
|
0d8d7301db8c0d89a8c00ad7d932e1210b2b7d72
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
0014_custom_move_managment_to_networks.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ipaddress
from django.db import migrations, models
def move_to_networks(apps, schema_editor):
DataCenterAsset = apps.get_model('data_center', 'DataCenterAsset')
IPAddress = apps.get_model('networks', 'IPAddress')
assets = DataCenterAsset.objects.exclude(management_ip_old=None)
for idx, asset in enumerate(assets):
try:
ip = IPAddress.objects.get(
address=asset.management_ip_old,
)
except IPAddress.DoesNotExist:
ip = IPAddress(
address=asset.management_ip_old,
)
ip.number = int(ipaddress.ip_address(ip.address))
ip.hostname = asset.management_hostname_old
ip.is_management = True
ip.base_object = asset
ip.save()
def move_from_networks(apps, schema_editor):
IPAddress = apps.get_model('networks', 'IPAddress')
ips = IPAddress.objects.filter(
is_management=True, base_object__asset__datacenterasset__isnull=False
)
for ip in ips:
dca = ip.base_object.asset.datacenterasset
dca.management_ip_old = ip.address
dca.management_hostname_old = ip.hostname
dca.save()
class Migration(migrations.Migration):
dependencies = [
('data_center', '0013_auto_20160606_1438'),
('networks', '0001_initial'),
]
operations = [
# rename first to `management_ip_old` because there is now property
# `management_ip` in DataCenterAsset which "hides" database field
# thus should not be used directly
migrations.RenameField(
model_name='datacenterasset',
old_name='management_ip',
new_name='management_ip_old'
),
migrations.RenameField(
model_name='datacenterasset',
old_name='management_hostname',
new_name='management_hostname_old'
),
migrations.RunPython(
move_to_networks, reverse_code=move_from_networks
),
migrations.RemoveField(
model_name='datacenterasset',
name='management_hostname_old',
),
migrations.RemoveField(
model_name='datacenterasset',
name='management_ip_old',
),
]
|
9e2f111209554ff43f64f473dbb562d12ed90183
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libreco/algorithms/rnn4rec.py
|
6217dcbb1765bc92c7425ae443118d9452277283
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 9,365
|
py
|
rnn4rec.py
|
"""Implementation of RNN4Rec model."""
from ..bases import DynEmbedBase, ModelMeta
from ..layers import embedding_lookup, normalize_embeds, tf_dense, tf_rnn
from ..tfops import dropout_config, reg_config, tf
from ..torchops import hidden_units_config
from ..utils.misc import count_params
class RNN4Rec(DynEmbedBase, metaclass=ModelMeta, backend="tensorflow"):
"""*RNN4Rec* algorithm.
.. NOTE::
The original paper used GRU, but in this implementation we can also use LSTM.
Parameters
----------
task : {'rating', 'ranking'}
Recommendation task. See :ref:`Task`.
data_info : :class:`~libreco.data.DataInfo` object
Object that contains useful information for training and inference.
loss_type : {'cross_entropy', 'focal', 'bpr'}, default: 'cross_entropy'
Loss for model training.
rnn_type : {'lstm', 'gru'}, default: 'gru'
RNN for modeling.
embed_size: int, default: 16
Vector size of embeddings.
norm_embed : bool, default: False
Whether to l2 normalize output embeddings.
n_epochs: int, default: 10
Number of epochs for training.
lr : float, default 0.001
Learning rate for training.
lr_decay : bool, default: False
Whether to use learning rate decay.
epsilon : float, default: 1e-5
A small constant added to the denominator to improve numerical stability in
Adam optimizer.
According to the `official comment <https://github.com/tensorflow/tensorflow/blob/v1.15.0/tensorflow/python/training/adam.py#L64>`_,
default value of `1e-8` for `epsilon` is generally not good, so here we choose `1e-5`.
Users can try tuning this hyperparameter if the training is unstable.
reg : float or None, default: None
Regularization parameter, must be non-negative or None.
batch_size : int, default: 256
Batch size for training.
sampler : {'random', 'unconsumed', 'popular'}, default: 'random'
Negative sampling strategy.
- ``'random'`` means random sampling.
- ``'unconsumed'`` samples items that the target user did not consume before.
- ``'popular'`` has a higher probability to sample popular items as negative samples.
.. versionadded:: 1.1.0
num_neg : int, default: 1
Number of negative samples for each positive sample, only used in `ranking` task.
dropout_rate : float or None, default: None
Probability of an element to be zeroed. If it is None, dropout is not used.
hidden_units : int, list of int or tuple of (int,), default: 16
Number of layers and corresponding layer size in RNN.
.. versionchanged:: 1.0.0
Accept type of ``int``, ``list`` or ``tuple``, instead of ``str``.
use_layer_norm : bool, default: False
Whether to use layer normalization.
recent_num : int or None, default: 10
Number of recent items to use in user behavior sequence.
random_num : int or None, default: None
Number of random sampled items to use in user behavior sequence.
If `recent_num` is not None, `random_num` is not considered.
seed : int, default: 42
Random seed.
lower_upper_bound : tuple or None, default: None
Lower and upper score bound for `rating` task.
tf_sess_config : dict or None, default: None
Optional TensorFlow session config, see `ConfigProto options
<https://github.com/tensorflow/tensorflow/blob/v2.10.0/tensorflow/core/protobuf/config.proto#L431>`_.
References
----------
*Balazs Hidasi et al.* `Session-based Recommendations with Recurrent Neural Networks
<https://arxiv.org/pdf/1511.06939.pdf>`_.
"""
item_variables = (
"embedding/item_embeds_var",
"embedding/item_bias_var",
"embedding/seq_embeds_var",
)
def __init__(
self,
task,
data_info=None,
loss_type="cross_entropy",
rnn_type="gru",
embed_size=16,
norm_embed=False,
n_epochs=20,
lr=0.001,
lr_decay=False,
epsilon=1e-5,
reg=None,
batch_size=256,
sampler="random",
num_neg=1,
dropout_rate=None,
hidden_units=16,
use_layer_norm=False,
recent_num=10,
random_num=None,
seed=42,
lower_upper_bound=None,
tf_sess_config=None,
):
super().__init__(
task,
data_info,
embed_size,
norm_embed,
recent_num,
random_num,
lower_upper_bound,
tf_sess_config,
)
self.all_args = locals()
self.loss_type = loss_type
self.rnn_type = rnn_type.lower()
self.n_epochs = n_epochs
self.lr = lr
self.lr_decay = lr_decay
self.epsilon = epsilon
self.hidden_units = hidden_units_config(hidden_units)
self.reg = reg_config(reg)
self.batch_size = batch_size
self.sampler = sampler
self.num_neg = num_neg
self.dropout_rate = dropout_config(dropout_rate)
self.use_ln = use_layer_norm
self.seed = seed
self._check_params()
def _check_params(self):
if self.rnn_type not in ("lstm", "gru"):
raise ValueError("`rnn_type` must either be `lstm` or `gru`")
if self.loss_type not in ("cross_entropy", "bpr", "focal"):
raise ValueError(
"`loss_type` must be one of (`cross_entropy`, `focal`, `bpr`)"
)
def build_model(self):
tf.set_random_seed(self.seed)
self.is_training = tf.placeholder_with_default(False, shape=[])
self._build_variables()
self.user_embeds = self._build_user_embeddings()
self.serving_topk = self.build_topk()
if self.task == "rating" or self.loss_type in ("cross_entropy", "focal"):
self.item_indices = tf.placeholder(tf.int32, shape=[None])
self.labels = tf.placeholder(tf.float32, shape=[None])
user_embeds = self.user_embeds
item_embeds = tf.nn.embedding_lookup(self.item_embeds, self.item_indices)
item_biases = tf.nn.embedding_lookup(self.item_biases, self.item_indices)
if self.norm_embed:
user_embeds, item_embeds = normalize_embeds(
user_embeds, item_embeds, backend="tf"
)
self.output = tf.reduce_sum(user_embeds * item_embeds, axis=1) + item_biases
elif self.loss_type == "bpr":
self.item_indices_pos = tf.placeholder(tf.int32, shape=[None])
self.item_indices_neg = tf.placeholder(tf.int32, shape=[None])
user_embeds = self.user_embeds
item_embed_pos = tf.nn.embedding_lookup(
self.item_embeds, self.item_indices_pos
)
item_embed_neg = tf.nn.embedding_lookup(
self.item_embeds, self.item_indices_neg
)
item_bias_pos = tf.nn.embedding_lookup(
self.item_biases, self.item_indices_pos
)
item_bias_neg = tf.nn.embedding_lookup(
self.item_biases, self.item_indices_neg
)
if self.norm_embed:
user_embeds, item_embed_pos, item_embed_neg = normalize_embeds(
user_embeds, item_embed_pos, item_embed_neg, backend="tf"
)
item_diff = tf.subtract(item_bias_pos, item_bias_neg) + tf.reduce_sum(
tf.multiply(
self.user_embeds, tf.subtract(item_embed_pos, item_embed_neg)
),
axis=1,
)
self.bpr_loss = tf.log_sigmoid(item_diff)
count_params()
def _build_variables(self):
with tf.variable_scope("embedding"):
# weight and bias parameters for last fc_layer
self.item_embeds = tf.get_variable(
name="item_embeds_var",
shape=[self.n_items, self.embed_size],
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
)
self.item_biases = tf.get_variable(
name="item_bias_var",
shape=[self.n_items],
initializer=tf.zeros_initializer(),
regularizer=self.reg,
)
def _build_user_embeddings(self):
self.user_interacted_seq = tf.placeholder(
tf.int32, shape=[None, self.max_seq_len]
)
self.user_interacted_len = tf.placeholder(tf.int64, shape=[None])
seq_item_embed = embedding_lookup(
indices=self.user_interacted_seq,
var_name="seq_embeds_var",
var_shape=(self.n_items + 1, self.hidden_units[0]),
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
)
rnn_output = tf_rnn(
inputs=seq_item_embed,
rnn_type=self.rnn_type,
lengths=self.user_interacted_len,
maxlen=self.max_seq_len,
hidden_units=self.hidden_units,
dropout_rate=self.dropout_rate,
use_ln=self.use_ln,
is_training=self.is_training,
)
return tf_dense(units=self.embed_size, activation=None)(rnn_output)
|
4efdc34a94c6f5cc5f4b217374c44bae682394d0
|
751fe2de18f00596e4f1ed342b56bd6f38ee2053
|
/wisdem/rotorse/rotor_power.py
|
d34f691fd49c2f52091a66578d7ae81824fdc68b
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/WISDEM
|
42fa780915d62fd4e4203050e886093ecc806c8a
|
d7270ebe1c554293a9d36730d67ab555c071cb17
|
refs/heads/master
| 2023-08-04T01:22:43.215105
| 2023-06-22T23:36:07
| 2023-06-22T23:36:07
| 23,678,280
| 120
| 86
|
Apache-2.0
| 2023-06-22T19:26:34
| 2014-09-04T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 47,627
|
py
|
rotor_power.py
|
"""
Script that computes the regulation trajectory of the rotor and the annual energy production
Nikhar J. Abbas, Pietro Bortolotti
January 2020
"""
import logging
import numpy as np
from openmdao.api import Group, ExplicitComponent
from scipy.optimize import brentq, minimize, minimize_scalar
from scipy.interpolate import PchipInterpolator
from wisdem.ccblade.Polar import Polar
from wisdem.ccblade.ccblade import CCBlade, CCAirfoil
from wisdem.commonse.utilities import smooth_abs, smooth_min, linspace_with_deriv
from wisdem.commonse.distribution import RayleighCDF, WeibullWithMeanCDF
logger = logging.getLogger("wisdem/weis")
TOL = 1e-3
class RotorPower(Group):
def initialize(self):
self.options.declare("modeling_options")
self.options.declare("opt_options")
def setup(self):
modeling_options = self.options["modeling_options"]
self.add_subsystem(
"powercurve",
RegulatedPowerCurve(modeling_options=modeling_options),
promotes=[
"v_min",
"v_max",
"rated_power",
"omega_min",
"omega_max",
"control_maxTS",
"tsr_operational",
"control_pitch",
"drivetrainType",
"r",
"chord",
"theta",
"Rhub",
"Rtip",
"hub_height",
"precone",
"tilt",
"yaw",
"precurve",
"precurveTip",
"presweep",
"presweepTip",
"airfoils_aoa",
"airfoils_Re",
"airfoils_cl",
"airfoils_cd",
"airfoils_cm",
"nBlades",
"rho",
"mu",
"shearExp",
"hubloss",
"tiploss",
"wakerotation",
"usecd",
"nSector",
],
)
self.add_subsystem("gust", GustETM(std=modeling_options["WISDEM"]["RotorSE"]["gust_std"]))
self.add_subsystem("cdf", WeibullWithMeanCDF(nspline=modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"]))
self.add_subsystem("aep", AEP(nspline=modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"]), promotes=["AEP"])
# Connections to the gust calculation
self.connect("powercurve.rated_V", "gust.V_hub")
# Connections to the Weibull CDF
self.connect("powercurve.V_spline", "cdf.x")
# Connections to the aep computation component
self.connect("cdf.F", "aep.CDF_V")
self.connect("powercurve.P_spline", "aep.P")
class GustETM(ExplicitComponent):
# OpenMDAO component that generates an "equivalent gust" wind speed by summing an user-defined wind speed at hub height with 3 times sigma. sigma is the turbulent wind speed standard deviation for the extreme turbulence model, see IEC-61400-1 Eq. 19 paragraph 6.3.2.3
def initialize(self):
# number of standard deviations for strength of gust
self.options.declare("std", default=3.0)
def setup(self):
# Inputs
self.add_input("V_mean", val=0.0, units="m/s", desc="IEC average wind speed for turbine class")
self.add_input("V_hub", val=0.0, units="m/s", desc="hub height wind speed")
self.add_discrete_input("turbulence_class", val="A", desc="IEC turbulence class")
# Output
self.add_output("V_gust", val=0.0, units="m/s", desc="gust wind speed")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
V_mean = inputs["V_mean"]
V_hub = inputs["V_hub"]
std = self.options["std"]
turbulence_class = discrete_inputs["turbulence_class"]
if turbulence_class.upper() == "A":
Iref = 0.16
elif turbulence_class.upper() == "B":
Iref = 0.14
elif turbulence_class.upper() == "C":
Iref = 0.12
else:
raise ValueError("Unknown Turbulence Class: " + str(turbulence_class) + " . Permitted values are A / B / C")
c = 2.0
sigma = c * Iref * (0.072 * (V_mean / c + 3) * (V_hub / c - 4) + 10)
V_gust = V_hub + std * sigma
outputs["V_gust"] = V_gust
class RegulatedPowerCurve(Group):
def initialize(self):
self.options.declare("modeling_options")
def setup(self):
modeling_options = self.options["modeling_options"]
self.add_subsystem("compute_power_curve", ComputePowerCurve(modeling_options=modeling_options), promotes=["*"])
self.add_subsystem("compute_splines", ComputeSplines(modeling_options=modeling_options), promotes=["*"])
class ComputePowerCurve(ExplicitComponent):
"""
Iteratively call CCBlade to compute the power curve.
"""
def initialize(self):
self.options.declare("modeling_options")
self.options.declare("debug", default=False)
def setup(self):
modeling_options = self.options["modeling_options"]
self.n_span = n_span = modeling_options["WISDEM"]["RotorSE"]["n_span"]
self.n_aoa = n_aoa = modeling_options["WISDEM"]["RotorSE"]["n_aoa"] # Number of angle of attacks
self.n_Re = n_Re = modeling_options["WISDEM"]["RotorSE"]["n_Re"] # Number of Reynolds, so far hard set at 1
self.n_tab = n_tab = modeling_options["WISDEM"]["RotorSE"][
"n_tab"
] # Number of tabulated data. For distributed aerodynamic control this could be > 1
self.regulation_reg_III = modeling_options["WISDEM"]["RotorSE"]["regulation_reg_III"]
self.n_pc = modeling_options["WISDEM"]["RotorSE"]["n_pc"]
self.n_pc_spline = modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"]
self.peak_thrust_shaving = modeling_options["WISDEM"]["RotorSE"]["peak_thrust_shaving"]
self.fix_pitch_regI12 = modeling_options["WISDEM"]["RotorSE"]["fix_pitch_regI12"]
if self.peak_thrust_shaving:
self.thrust_shaving_coeff = modeling_options["WISDEM"]["RotorSE"]["thrust_shaving_coeff"]
# parameters
self.add_input("v_min", val=0.0, units="m/s", desc="cut-in wind speed")
self.add_input("v_max", val=0.0, units="m/s", desc="cut-out wind speed")
self.add_input("rated_power", val=0.0, units="W", desc="electrical rated power")
self.add_input("omega_min", val=0.0, units="rpm", desc="minimum allowed rotor rotation speed")
self.add_input("omega_max", val=0.0, units="rpm", desc="maximum allowed rotor rotation speed")
self.add_input("control_maxTS", val=0.0, units="m/s", desc="maximum allowed blade tip speed")
self.add_input("tsr_operational", val=0.0, desc="tip-speed ratio in Region 2 (should be optimized externally)")
self.add_input(
"control_pitch",
val=0.0,
units="deg",
desc="pitch angle in region 2 (and region 3 for fixed pitch machines)",
)
self.add_discrete_input("drivetrainType", val="GEARED")
self.add_input("gearbox_efficiency", val=1.0)
self.add_input(
"generator_efficiency",
val=np.ones(self.n_pc),
desc="Generator efficiency at various rpm values to support table lookup",
)
self.add_input(
"lss_rpm",
val=np.zeros(self.n_pc),
units="rpm",
desc="Low speed shaft RPM values at which the generator efficiency values are given",
)
self.add_input(
"r",
val=np.zeros(n_span),
units="m",
desc="radial locations where blade is defined (should be increasing and not go all the way to hub or tip)",
)
self.add_input("chord", val=np.zeros(n_span), units="m", desc="chord length at each section")
self.add_input(
"theta",
val=np.zeros(n_span),
units="deg",
desc="twist angle at each section (positive decreases angle of attack)",
)
self.add_input("Rhub", val=0.0, units="m", desc="hub radius")
self.add_input("Rtip", val=0.0, units="m", desc="tip radius")
self.add_input("hub_height", val=0.0, units="m", desc="hub height")
self.add_input("precone", val=0.0, units="deg", desc="precone angle")
self.add_input("tilt", val=0.0, units="deg", desc="shaft tilt")
self.add_input("yaw", val=0.0, units="deg", desc="yaw error")
self.add_input("precurve", val=np.zeros(n_span), units="m", desc="precurve at each section")
self.add_input("precurveTip", val=0.0, units="m", desc="precurve at tip")
self.add_input("presweep", val=np.zeros(n_span), units="m", desc="presweep at each section")
self.add_input("presweepTip", val=0.0, units="m", desc="presweep at tip")
# self.add_discrete_input('airfoils', val=[0]*n_span, desc='CCAirfoil instances')
self.add_input("airfoils_cl", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="lift coefficients, spanwise")
self.add_input("airfoils_cd", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="drag coefficients, spanwise")
self.add_input("airfoils_cm", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="moment coefficients, spanwise")
self.add_input("airfoils_aoa", val=np.zeros((n_aoa)), units="deg", desc="angle of attack grid for polars")
self.add_input("airfoils_Re", val=np.zeros((n_Re)), desc="Reynolds numbers of polars")
self.add_discrete_input("nBlades", val=0, desc="number of blades")
self.add_input("rho", val=1.225, units="kg/m**3", desc="density of air")
self.add_input("mu", val=1.81e-5, units="kg/(m*s)", desc="dynamic viscosity of air")
self.add_input("shearExp", val=0.0, desc="shear exponent")
self.add_discrete_input(
"nSector", val=4, desc="number of sectors to divide rotor face into in computing thrust and power"
)
self.add_discrete_input("tiploss", val=True, desc="include Prandtl tip loss model")
self.add_discrete_input("hubloss", val=True, desc="include Prandtl hub loss model")
self.add_discrete_input(
"wakerotation",
val=True,
desc="include effect of wake rotation (i.e., tangential induction factor is nonzero)",
)
self.add_discrete_input("usecd", val=True, desc="use drag coefficient in computing induction factors")
# outputs
self.add_output("V", val=np.zeros(self.n_pc), units="m/s", desc="wind vector")
self.add_output("Omega", val=np.zeros(self.n_pc), units="rpm", desc="rotor rotational speed")
self.add_output("pitch", val=np.zeros(self.n_pc), units="deg", desc="rotor pitch schedule")
self.add_output("P", val=np.zeros(self.n_pc), units="W", desc="rotor electrical power")
self.add_output("P_aero", val=np.zeros(self.n_pc), units="W", desc="rotor mechanical power")
self.add_output("T", val=np.zeros(self.n_pc), units="N", desc="rotor aerodynamic thrust")
self.add_output("Q", val=np.zeros(self.n_pc), units="N*m", desc="rotor aerodynamic torque")
self.add_output("M", val=np.zeros(self.n_pc), units="N*m", desc="blade root moment")
self.add_output("Cp", val=np.zeros(self.n_pc), desc="rotor electrical power coefficient")
self.add_output("Cp_aero", val=np.zeros(self.n_pc), desc="rotor aerodynamic power coefficient")
self.add_output("Ct_aero", val=np.zeros(self.n_pc), desc="rotor aerodynamic thrust coefficient")
self.add_output("Cq_aero", val=np.zeros(self.n_pc), desc="rotor aerodynamic torque coefficient")
self.add_output("Cm_aero", val=np.zeros(self.n_pc), desc="rotor aerodynamic moment coefficient")
self.add_output("V_R25", val=0.0, units="m/s", desc="region 2.5 transition wind speed")
self.add_output("rated_V", val=0.0, units="m/s", desc="rated wind speed")
self.add_output("rated_Omega", val=0.0, units="rpm", desc="rotor rotation speed at rated")
self.add_output("rated_pitch", val=0.0, units="deg", desc="pitch setting at rated")
self.add_output("rated_T", val=0.0, units="N", desc="rotor aerodynamic thrust at rated")
self.add_output("rated_Q", val=0.0, units="N*m", desc="rotor aerodynamic torque at rated")
self.add_output("rated_mech", val=0.0, units="W", desc="Mechanical shaft power at rated")
self.add_output(
"ax_induct_regII", val=np.zeros(n_span), desc="rotor axial induction at cut-in wind speed along blade span"
)
self.add_output(
"tang_induct_regII",
val=np.zeros(n_span),
desc="rotor tangential induction at cut-in wind speed along blade span",
)
self.add_output(
"aoa_regII",
val=np.zeros(n_span),
units="deg",
desc="angle of attack distribution along blade span at cut-in wind speed",
)
self.add_output(
"L_D",
val=np.zeros(n_span),
desc="Lift over drag distribution along blade span at cut-in wind speed",
)
self.add_output("Cp_regII", val=0.0, desc="power coefficient at cut-in wind speed")
self.add_output("Ct_regII", val=0.0, desc="thrust coefficient at cut-in wind speed")
self.add_output(
"cl_regII", val=np.zeros(n_span), desc="lift coefficient distribution along blade span at cut-in wind speed"
)
self.add_output(
"cd_regII", val=np.zeros(n_span), desc="drag coefficient distribution along blade span at cut-in wind speed"
)
self.add_output("rated_efficiency", val=1.0, desc="Efficiency at rated conditions")
# self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Saving out inputs for easy debugging of troublesome cases
if self.options["debug"]:
np.savez(
"debug.npz",
v_min=inputs["v_min"],
v_max=inputs["v_max"],
rated_power=inputs["rated_power"],
omega_min=inputs["omega_min"],
omega_max=inputs["omega_max"],
control_maxTS=inputs["control_maxTS"],
tsr_operational=inputs["tsr_operational"],
control_pitch=inputs["control_pitch"],
gearbox_efficiency=inputs["gearbox_efficiency"],
generator_efficiency=inputs["generator_efficiency"],
lss_rpm=inputs["lss_rpm"],
r=inputs["r"],
chord=inputs["chord"],
theta=inputs["theta"],
Rhub=inputs["Rhub"],
Rtip=inputs["Rtip"],
hub_height=inputs["hub_height"],
precone=inputs["precone"],
tilt=inputs["tilt"],
yaw=inputs["yaw"],
precurve=inputs["precurve"],
precurveTip=inputs["precurveTip"],
presweep=inputs["presweep"],
presweepTip=inputs["presweepTip"],
airfoils_cl=inputs["airfoils_cl"],
airfoils_cd=inputs["airfoils_cd"],
airfoils_cm=inputs["airfoils_cm"],
airfoils_aoa=inputs["airfoils_aoa"],
airfoils_Re=inputs["airfoils_Re"],
rho=inputs["rho"],
mu=inputs["mu"],
shearExp=inputs["shearExp"],
nBlades=discrete_inputs["nBlades"],
)
# Create Airfoil class instances
af = [None] * self.n_span
for i in range(self.n_span):
if self.n_tab > 1:
ref_tab = int(np.floor(self.n_tab / 2))
af[i] = CCAirfoil(
inputs["airfoils_aoa"],
inputs["airfoils_Re"],
inputs["airfoils_cl"][i, :, :, ref_tab],
inputs["airfoils_cd"][i, :, :, ref_tab],
inputs["airfoils_cm"][i, :, :, ref_tab],
)
else:
af[i] = CCAirfoil(
inputs["airfoils_aoa"],
inputs["airfoils_Re"],
inputs["airfoils_cl"][i, :, :, 0],
inputs["airfoils_cd"][i, :, :, 0],
inputs["airfoils_cm"][i, :, :, 0],
)
self.ccblade = CCBlade(
inputs["r"],
inputs["chord"],
inputs["theta"],
af,
inputs["Rhub"],
inputs["Rtip"],
discrete_inputs["nBlades"],
inputs["rho"],
inputs["mu"],
inputs["precone"],
inputs["tilt"],
inputs["yaw"],
inputs["shearExp"],
inputs["hub_height"],
discrete_inputs["nSector"],
inputs["precurve"],
inputs["precurveTip"],
inputs["presweep"],
inputs["presweepTip"],
discrete_inputs["tiploss"],
discrete_inputs["hubloss"],
discrete_inputs["wakerotation"],
discrete_inputs["usecd"],
)
# JPJ: what is this grid for? Seems to be a special distribution of velocities
# for the hub
grid0 = np.cumsum(np.abs(np.diff(np.cos(np.linspace(-np.pi / 4.0, np.pi / 2.0, self.n_pc)))))
grid1 = (grid0 - grid0[0]) / (grid0[-1] - grid0[0])
Uhub = grid1 * (inputs["v_max"] - inputs["v_min"]) + inputs["v_min"]
P_aero = np.zeros(Uhub.shape)
Cp_aero = np.zeros(Uhub.shape)
Ct_aero = np.zeros(Uhub.shape)
Cq_aero = np.zeros(Uhub.shape)
Cm_aero = np.zeros(Uhub.shape)
P = np.zeros(Uhub.shape)
Cp = np.zeros(Uhub.shape)
T = np.zeros(Uhub.shape)
Q = np.zeros(Uhub.shape)
M = np.zeros(Uhub.shape)
pitch = np.zeros(Uhub.shape) + inputs["control_pitch"]
# Unpack variables
P_rated = float(inputs["rated_power"])
R_tip = float(inputs["Rtip"])
tsr = float(inputs["tsr_operational"])
driveType = discrete_inputs["drivetrainType"]
## POWERCURVE PRELIMS ##
# Set rotor speed based on TSR
Omega_tsr = Uhub * tsr / R_tip
# Determine maximum rotor speed (rad/s)- either by TS or by control input
Omega_max = min([inputs["control_maxTS"] / R_tip, inputs["omega_max"] * np.pi / 30.0])
# Apply maximum and minimum rotor speed limits
Omega_min = inputs["omega_min"] * np.pi / 30.0
Omega = np.maximum(np.minimum(Omega_tsr, Omega_max), Omega_min)
Omega_rpm = Omega * 30.0 / np.pi
# Create table lookup of total drivetrain efficiency, where rpm is first column and second column is gearbox*generator
lss_rpm = inputs["lss_rpm"]
gen_eff = inputs["generator_efficiency"]
if not np.any(lss_rpm):
lss_rpm = np.linspace(np.maximum(0.1, Omega_rpm[0]), Omega_rpm[-1], self.n_pc - 1)
_, gen_eff = compute_P_and_eff(
P_rated * lss_rpm / lss_rpm[-1],
P_rated,
np.zeros(self.n_pc - 1),
driveType,
np.zeros((self.n_pc - 1, 2)),
)
# driveEta = np.c_[lss_rpm, float(inputs['gearbox_efficiency'])*gen_eff]
driveEta = float(inputs["gearbox_efficiency"]) * gen_eff
# Set baseline power production
myout, derivs = self.ccblade.evaluate(Uhub, Omega_tsr * 30.0 / np.pi, pitch, coefficients=True)
P_aero, T, Q, M, Cp_aero, Ct_aero, Cq_aero, Cm_aero = [
myout[key] for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]
]
# P, eff = compute_P_and_eff(P_aero, P_rated, Omega_rpm, driveType, driveEta)
eff = np.interp(Omega_rpm, lss_rpm, driveEta)
P = P_aero * eff
Cp = Cp_aero * eff
# Find rated index and guess at rated speed
if P_aero[-1] > P_rated:
U_rated = np.interp(P_rated, P, Uhub)
i_rated = np.nonzero(U_rated <= Uhub)[0][0]
else:
U_rated = Uhub[-1] + 1e-6
# Find Region 3 index
found_rated = P_aero[-1] > P_rated
region3 = len(np.nonzero(P >= P_rated)[0]) > 0
# Guess at Region 2.5, but we will do a more rigorous search below
if Omega_max < Omega_tsr[-1]:
U_2p5 = np.interp(Omega[-1], Omega_tsr, Uhub)
outputs["V_R25"] = U_2p5
else:
U_2p5 = U_rated
region2p5 = U_2p5 < U_rated
# Initialize peak shaving thrust value, will be updated later
max_T = self.thrust_shaving_coeff * T.max() if self.peak_thrust_shaving and found_rated else 1e16
## REGION II.5 and RATED ##
# Solve for rated velocity
if found_rated:
i = i_rated
def const_Urated(x):
pitch_i = x[0]
Uhub_i = x[1]
Omega_i = min([Uhub_i * tsr / R_tip, Omega_max])
Omega_i_rpm = Omega_i * 30.0 / np.pi
myout, _ = self.ccblade.evaluate([Uhub_i], [Omega_i_rpm], [pitch_i], coefficients=False)
P_aero_i = float(myout["P"])
# P_i,_ = compute_P_and_eff(P_aero_i.flatten(), P_rated, Omega_i_rpm, driveType, driveEta)
eff_i = np.interp(Omega_i_rpm, lss_rpm, driveEta)
P_i = float(P_aero_i * eff_i)
return 1e-4 * (P_i - P_rated)
if region2p5:
# Have to search over both pitch and speed
x0 = [0.0, U_rated]
imin = max(i - 3, 0)
imax = min(i + 2, len(Uhub) - 1)
bnds = [[0.0, 15.0], [Uhub[imin] + TOL, Uhub[imax] - TOL]]
const = {}
const["type"] = "eq"
const["fun"] = const_Urated
params_rated = minimize(
lambda x: x[1], x0, method="slsqp", bounds=bnds, constraints=const, tol=TOL, options={"disp": False}
)
if params_rated.success and not np.isnan(params_rated.x[1]):
U_rated = params_rated.x[1]
pitch_rated = params_rated.x[0]
else:
U_rated = U_rated # Use guessed value earlier
pitch_rated = 0.0
else:
# Just search over speed
pitch_rated = 0.0
try:
U_rated = brentq(
lambda x: const_Urated([0.0, x]),
Uhub[i - 2],
Uhub[i + 2],
xtol=1e-1 * TOL,
rtol=1e-2 * TOL,
maxiter=40,
disp=False,
)
except ValueError:
U_rated = minimize_scalar(
lambda x: np.abs(const_Urated([0.0, x])),
bounds=[Uhub[i - 2], Uhub[i + 2]],
method="bounded",
options={"disp": False, "xatol": TOL, "maxiter": 40},
)["x"]
Omega_tsr_rated = U_rated * tsr / R_tip
Omega_rated = np.minimum(Omega_tsr_rated, Omega_max)
Omega_rpm_rated = Omega_rated * 30.0 / np.pi
myout, _ = self.ccblade.evaluate([U_rated], [Omega_rpm_rated], [pitch_rated], coefficients=True)
(
P_aero_rated,
T_rated,
Q_rated,
M_rated,
Cp_aero_rated,
Ct_aero_rated,
Cq_aero_rated,
Cm_aero_rated,
) = [float(myout[key]) for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]]
eff_rated = np.interp(Omega_rpm_rated, lss_rpm, driveEta)
Cp_rated = Cp_aero_rated * eff_rated
P_rated = P_rated
## REGION II.5 and RATED with peak shaving##
if self.peak_thrust_shaving:
max_T = self.thrust_shaving_coeff * T_rated
def const_Urated_Tpeak(x):
pitch_i = x[0]
Uhub_i = x[1]
Omega_i = min([Uhub_i * tsr / R_tip, Omega_max])
Omega_i_rpm = Omega_i * 30.0 / np.pi
myout, _ = self.ccblade.evaluate([Uhub_i], [Omega_i_rpm], [pitch_i], coefficients=False)
P_aero_i = float(myout["P"])
# P_i,_ = compute_P_and_eff(P_aero_i.flatten(), P_rated, Omega_i_rpm, driveType, driveEta)
eff_i = np.interp(Omega_i_rpm, lss_rpm, driveEta)
P_i = float(P_aero_i * eff_i)
T_i = float(myout["T"])
return 1e-4 * (P_i - P_rated), 1e-4 * (T_i - max_T)
# Have to search over both pitch and speed
x0 = [0.0, U_rated]
bnds = [[0.0, 15.0], [Uhub[i - 2] + TOL, Uhub[-1] - TOL]]
const = {}
const["type"] = "eq"
const["fun"] = const_Urated_Tpeak
params_rated = minimize(
lambda x: x[1], x0, method="slsqp", bounds=bnds, constraints=const, tol=TOL, options={"disp": False}
)
if params_rated.success and not np.isnan(params_rated.x[1]):
U_rated = params_rated.x[1]
pitch_rated = params_rated.x[0]
else:
U_rated = U_rated # Use guessed value earlier
pitch_rated = 0.0
Omega_tsr_rated = U_rated * tsr / R_tip
Omega_rated = np.minimum(Omega_tsr_rated, Omega_max)
Omega_rpm_rated = Omega_rated * 30.0 / np.pi
myout, _ = self.ccblade.evaluate([U_rated], [Omega_rpm_rated], [pitch_rated], coefficients=True)
(
P_aero_rated,
T_rated,
Q_rated,
M_rated,
Cp_aero_rated,
Ct_aero_rated,
Cq_aero_rated,
Cm_aero_rated,
) = [float(myout[key]) for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]]
eff_rated = np.interp(Omega_rpm_rated, lss_rpm, driveEta)
Cp_rated = Cp_aero_rated * eff_rated
P_rated = P_rated
else:
# No rated conditions, so just assume last values
U_rated = Uhub[-1] + 1e-6
Omega_tsr_rated = Omega_tsr[-1]
Omega_rated = Omega[-1]
Omega_rpm_rated = Omega_rpm[-1]
pitch_rated = pitch[-1]
P_aero_rated = P_aero[-1]
P_rated = P[-1]
T_rated = T[-1]
Q_rated = Q[-1]
M_rated = M[-1]
Cp_rated = Cp[-1]
Cp_aero_rated = Cp_aero[-1]
Ct_aero_rated = Ct_aero[-1]
Cq_aero_rated = Cq_aero[-1]
Cm_aero_rated = Cm_aero[-1]
eff_rated = eff[-1]
# Store rated speed in array
Uhub = np.r_[Uhub, U_rated]
isort = np.argsort(Uhub)
Uhub = Uhub[isort]
Omega_tsr = np.r_[Omega_tsr, Omega_tsr_rated][isort]
Omega = np.r_[Omega, Omega_rated][isort]
Omega_rpm = np.r_[Omega_rpm, Omega_rpm_rated][isort]
pitch = np.r_[pitch, pitch_rated][isort]
P_aero = np.r_[P_aero, P_aero_rated][isort]
P = np.r_[P, P_rated][isort]
T = np.r_[T, T_rated][isort]
Q = np.r_[Q, Q_rated][isort]
M = np.r_[M, M_rated][isort]
Cp = np.r_[Cp, Cp_rated][isort]
Cp_aero = np.r_[Cp_aero, Cp_aero_rated][isort]
Ct_aero = np.r_[Ct_aero, Ct_aero_rated][isort]
Cq_aero = np.r_[Cq_aero, Cq_aero_rated][isort]
Cm_aero = np.r_[Cm_aero, Cm_aero_rated][isort]
eff = np.r_[eff, eff_rated][isort]
i_rated = np.where(Uhub == U_rated)[0][0]
i_3 = np.minimum(i_rated + 1, self.n_pc)
# Set rated conditions for rest of powercurve
Omega[i_rated:] = Omega_rated # Stay at this speed if hit rated too early
Omega_rpm = Omega * 30.0 / np.pi
## REGION II ##
# Functions to be used inside of power maximization until Region 3
def maximizePower(pitch_i, Uhub_i, Omega_rpm_i):
myout, _ = self.ccblade.evaluate([Uhub_i], [Omega_rpm_i], [pitch_i], coefficients=False)
return -myout["P"] * 1e-6
def constr_Tmax(pitch_i, Uhub_i, Omega_rpm_i):
myout, _ = self.ccblade.evaluate([Uhub_i], [Omega_rpm_i], [pitch_i], coefficients=False)
return 1e-5 * (max_T - float(myout["T"]))
# Maximize power until rated
for i in range(i_3):
# No need to optimize if already doing well or if flag
# fix_pitch_regI12, which locks pitch in region I 1/2, is on
if (
((Omega[i] == Omega_tsr[i]) and not self.peak_thrust_shaving)
or ((Omega[i] == Omega_tsr[i]) and self.peak_thrust_shaving and (T[i] <= max_T))
or ((Omega[i] == Omega_min) and self.fix_pitch_regI12)
or (found_rated and (i == i_rated))
):
continue
# Find pitch value that gives highest power rating
pitch0 = pitch[i] if i == 0 else pitch[i - 1]
bnds = [pitch0 - 10.0, pitch0 + 10.0]
if self.peak_thrust_shaving and found_rated:
# Have to constrain thrust
const = {}
const["type"] = "ineq"
const["fun"] = lambda x: constr_Tmax(x, Uhub[i], Omega_rpm[i])
params = minimize(
lambda x: maximizePower(x, Uhub[i], Omega_rpm[i]),
pitch0,
method="slsqp", # "cobyla",
bounds=[bnds],
constraints=const,
tol=TOL,
options={"maxiter": 20, "disp": False}, #'catol':0.01*max_T},
)
pitch[i] = params.x[0]
else:
# Only adjust pitch
pitch[i] = minimize_scalar(
lambda x: maximizePower(x, Uhub[i], Omega_rpm[i]),
bounds=bnds,
method="bounded",
options={"disp": False, "xatol": TOL, "maxiter": 40},
)["x"]
# Find associated power
myout, _ = self.ccblade.evaluate([Uhub[i]], [Omega_rpm[i]], [pitch[i]], coefficients=True)
P_aero[i], T[i], Q[i], M[i], Cp_aero[i], Ct_aero[i], Cq_aero[i], Cm_aero[i] = [
myout[key] for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]
]
# P[i], eff[i] = compute_P_and_eff(P_aero[i], P_rated, Omega_rpm[i], driveType, driveEta)
eff[i] = np.interp(Omega_rpm[i], lss_rpm, driveEta)
P[i] = P_aero[i] * eff[i]
Cp[i] = Cp_aero[i] * eff[i]
## REGION III ##
# JPJ: this part can be converted into a BalanceComp with a solver.
# This will be less expensive and allow us to get derivatives through the process.
if region3:
# Function to be used to stay at rated power in Region 3
def rated_power_dist(pitch_i, Uhub_i, Omega_rpm_i):
myout, _ = self.ccblade.evaluate([Uhub_i], [Omega_rpm_i], [pitch_i], coefficients=False)
P_aero_i = myout["P"]
eff_i = np.interp(Omega_rpm_i, lss_rpm, driveEta)
P_i = P_aero_i * eff_i
return 1e-4 * (P_i - P_rated)
# Solve for Region 3 pitch
if self.regulation_reg_III:
for i in range(i_3, self.n_pc):
pitch0 = pitch[i - 1]
bnds = ([pitch0, pitch0 + 15.0],)
try:
pitch[i] = brentq(
lambda x: rated_power_dist(x, Uhub[i], Omega_rpm[i]),
bnds[0][0],
bnds[0][1],
xtol=1e-1 * TOL,
rtol=1e-2 * TOL,
maxiter=40,
disp=False,
)
except ValueError:
pitch[i] = minimize_scalar(
lambda x: np.abs(rated_power_dist(x, Uhub[i], Omega_rpm[i])),
bounds=bnds[0],
method="bounded",
options={"disp": False, "xatol": TOL, "maxiter": 40},
)["x"]
myout, _ = self.ccblade.evaluate([Uhub[i]], [Omega_rpm[i]], [pitch[i]], coefficients=True)
P_aero[i], T[i], Q[i], M[i], Cp_aero[i], Ct_aero[i], Cq_aero[i], Cm_aero[i] = [
myout[key] for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]
]
eff[i] = np.interp(Omega_rpm[i], lss_rpm, driveEta)
P[i] = P_aero[i] * eff[i]
Cp[i] = Cp_aero[i] * eff[i]
# P[i] = P_rated
# If we are thrust shaving, then check if this is a point that must be modified
if self.peak_thrust_shaving and T[i] >= max_T:
const = {}
const["type"] = "ineq"
const["fun"] = lambda x: constr_Tmax(x, Uhub[i], Omega_rpm[i])
params = minimize(
lambda x: np.abs(rated_power_dist(x, Uhub[i], Omega_rpm[i])),
pitch0,
method="slsqp",
bounds=bnds,
constraints=const,
tol=TOL,
options={"disp": False},
)
if params.success and not np.isnan(params.x[0]):
pitch[i] = params.x[0]
myout, _ = self.ccblade.evaluate([Uhub[i]], [Omega_rpm[i]], [pitch[i]], coefficients=True)
P_aero[i], T[i], Q[i], M[i], Cp_aero[i], Ct_aero[i], Cq_aero[i], Cm_aero[i] = [
myout[key] for key in ["P", "T", "Q", "Mb", "CP", "CT", "CQ", "CMb"]
]
eff[i] = np.interp(Omega_rpm[i], lss_rpm, driveEta)
P[i] = P_aero[i] * eff[i]
Cp[i] = Cp_aero[i] * eff[i]
# P[i] = P_rated
else:
P[i_3:] = P_rated
P_aero[i_3:] = P_aero[i_3 - 1]
T[i_3:] = 0
Q[i_3:] = P[i_3:] / Omega[i_3:]
M[i_3:] = 0
pitch[i_3:] = 0
Cp[i_3:] = P[i_3:] / (0.5 * inputs["rho"] * np.pi * R_tip**2 * Uhub[i_3:] ** 3)
Cp_aero[i_3:] = P_aero[i_3:] / (0.5 * inputs["rho"] * np.pi * R_tip**2 * Uhub[i_3:] ** 3)
Ct_aero[i_3:] = 0
Cq_aero[i_3:] = 0
Cm_aero[i_3:] = 0
## END POWERCURVE ##
# Store outputs
outputs["T"] = T
outputs["Q"] = Q
outputs["Omega"] = Omega_rpm
outputs["P"] = P
outputs["Cp"] = Cp
outputs["P_aero"] = P_aero
outputs["Cp_aero"] = Cp_aero
outputs["Ct_aero"] = Ct_aero
outputs["Cq_aero"] = Cq_aero
outputs["Cm_aero"] = Cm_aero
outputs["V"] = Uhub
outputs["M"] = M
outputs["pitch"] = pitch
outputs["rated_V"] = np.float_(U_rated)
outputs["rated_Omega"] = Omega_rpm_rated
outputs["rated_pitch"] = pitch_rated
outputs["rated_T"] = T_rated
outputs["rated_Q"] = Q_rated
outputs["rated_mech"] = P_aero_rated
outputs["rated_efficiency"] = eff_rated
self.ccblade.induction_inflow = True
tsr_vec = Omega_rpm / 30.0 * np.pi * R_tip / Uhub
id_regII = np.argmin(abs(tsr_vec - inputs["tsr_operational"]))
loads, derivs = self.ccblade.distributedAeroLoads(Uhub[id_regII], Omega_rpm[id_regII], pitch[id_regII], 0.0)
# outputs
outputs["ax_induct_regII"] = loads["a"]
outputs["tang_induct_regII"] = loads["ap"]
outputs["aoa_regII"] = loads["alpha"]
outputs["cl_regII"] = loads["Cl"]
outputs["cd_regII"] = loads["Cd"]
outputs["L_D"] = loads["Cl"] / loads["Cd"]
outputs["Cp_regII"] = Cp_aero[id_regII]
outputs["Ct_regII"] = Ct_aero[id_regII]
class ComputeSplines(ExplicitComponent):
"""
Compute splined quantities for V, P, and Omega.
"""
def initialize(self):
self.options.declare("modeling_options")
def setup(self):
modeling_options = self.options["modeling_options"]
self.n_pc = modeling_options["WISDEM"]["RotorSE"]["n_pc"]
self.n_pc_spline = modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"]
self.add_input("v_min", val=0.0, units="m/s", desc="cut-in wind speed")
self.add_input("v_max", val=0.0, units="m/s", desc="cut-out wind speed")
self.add_input("V", val=np.zeros(self.n_pc), units="m/s", desc="wind vector")
self.add_input("Omega", val=np.zeros(self.n_pc), units="rpm", desc="rotor rotational speed")
self.add_input("P", val=np.zeros(self.n_pc), units="W", desc="rotor electrical power")
self.add_output("V_spline", val=np.zeros(self.n_pc_spline), units="m/s", desc="wind vector")
self.add_output("P_spline", val=np.zeros(self.n_pc_spline), units="W", desc="rotor electrical power")
self.add_output("Omega_spline", val=np.zeros(self.n_pc_spline), units="rpm", desc="omega")
self.declare_partials(of="V_spline", wrt="v_min")
self.declare_partials(of="V_spline", wrt="v_max")
self.declare_partials(of="P_spline", wrt="v_min", method="fd")
self.declare_partials(of="P_spline", wrt="v_max", method="fd")
self.declare_partials(of="P_spline", wrt="V", method="fd")
self.declare_partials(of="P_spline", wrt="P", method="fd")
self.declare_partials(of="Omega_spline", wrt="v_min", method="fd")
self.declare_partials(of="Omega_spline", wrt="v_max", method="fd")
self.declare_partials(of="Omega_spline", wrt="V", method="fd")
self.declare_partials(of="Omega_spline", wrt="Omega", method="fd")
def compute(self, inputs, outputs):
# Fit spline to powercurve for higher grid density
V_spline = np.linspace(inputs["v_min"], inputs["v_max"], self.n_pc_spline)
spline = PchipInterpolator(inputs["V"], inputs["P"])
P_spline = spline(V_spline)
spline = PchipInterpolator(inputs["V"], inputs["Omega"])
Omega_spline = spline(V_spline)
# outputs
outputs["V_spline"] = V_spline.flatten()
outputs["P_spline"] = P_spline.flatten()
outputs["Omega_spline"] = Omega_spline.flatten()
def compute_partials(self, inputs, partials):
linspace_with_deriv
V_spline, dy_dstart, dy_dstop = linspace_with_deriv(inputs["v_min"], inputs["v_max"], self.n_pc_spline)
partials["V_spline", "v_min"] = dy_dstart
partials["V_spline", "v_max"] = dy_dstop
# Class to define a constraint so that the blade cannot operate in stall conditions
class NoStallConstraint(ExplicitComponent):
def initialize(self):
self.options.declare("modeling_options")
def setup(self):
modeling_options = self.options["modeling_options"]
self.n_span = n_span = modeling_options["WISDEM"]["RotorSE"]["n_span"]
self.n_aoa = n_aoa = modeling_options["WISDEM"]["RotorSE"]["n_aoa"] # Number of angle of attacks
self.n_Re = n_Re = modeling_options["WISDEM"]["RotorSE"]["n_Re"] # Number of Reynolds, so far hard set at 1
self.n_tab = n_tab = modeling_options["WISDEM"]["RotorSE"][
"n_tab"
] # Number of tabulated data. For distributed aerodynamic control this could be > 1
self.add_input(
"s",
val=np.zeros(n_span),
desc="1D array of the non-dimensional spanwise grid defined along blade axis (0-blade root, 1-blade tip)",
)
self.add_input("aoa_along_span", val=np.zeros(n_span), units="deg", desc="Angle of attack along blade span")
self.add_input("stall_margin", val=3.0, units="deg", desc="Minimum margin from the stall angle")
self.add_input(
"min_s",
val=0.25,
desc="Minimum nondimensional coordinate along blade span where to define the constraint (blade root typically stalls)",
)
self.add_input("airfoils_cl", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="lift coefficients, spanwise")
self.add_input("airfoils_cd", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="drag coefficients, spanwise")
self.add_input("airfoils_cm", val=np.zeros((n_span, n_aoa, n_Re, n_tab)), desc="moment coefficients, spanwise")
self.add_input("airfoils_aoa", val=np.zeros((n_aoa)), units="deg", desc="angle of attack grid for polars")
self.add_output(
"no_stall_constraint",
val=np.zeros(n_span),
desc="Constraint, ratio between angle of attack plus a margin and stall angle",
)
self.add_output(
"stall_angle_along_span", val=np.zeros(n_span), units="deg", desc="Stall angle along blade span"
)
def compute(self, inputs, outputs):
i_min = np.argmin(abs(inputs["min_s"] - inputs["s"]))
for i in range(self.n_span):
unsteady = eval_unsteady(
inputs["airfoils_aoa"],
inputs["airfoils_cl"][i, :, 0, 0],
inputs["airfoils_cd"][i, :, 0, 0],
inputs["airfoils_cm"][i, :, 0, 0],
)
outputs["stall_angle_along_span"][i] = unsteady["alpha1"]
if outputs["stall_angle_along_span"][i] == 0:
outputs["stall_angle_along_span"][i] = 1e-6 # To avoid nan
for i in range(i_min, self.n_span):
outputs["no_stall_constraint"][i] = (inputs["aoa_along_span"][i] + inputs["stall_margin"]) / outputs[
"stall_angle_along_span"
][i]
if outputs["stall_angle_along_span"][i] <= 1.0e-6:
outputs["no_stall_constraint"][i] = 0.0
logger.debug(
"Blade is violating the minimum margin to stall at span location %.2f %%" % (inputs["s"][i] * 100.0)
)
class AEP(ExplicitComponent):
def initialize(self):
self.options.declare("nspline")
def setup(self):
n_pc_spline = self.options["nspline"]
"""integrate to find annual energy production"""
# inputs
self.add_input(
"CDF_V",
val=np.zeros(n_pc_spline),
units="m/s",
desc="cumulative distribution function evaluated at each wind speed",
)
self.add_input("P", val=np.zeros(n_pc_spline), units="W", desc="power curve (power)")
self.add_input(
"lossFactor", val=1.0, desc="multiplicative factor for availability and other losses (soiling, array, etc.)"
)
# outputs
self.add_output("AEP", val=0.0, units="kW*h", desc="annual energy production")
# self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
lossFactor = inputs["lossFactor"]
P = inputs["P"]
CDF_V = inputs["CDF_V"]
factor = lossFactor / 1e3 * 365.0 * 24.0
outputs["AEP"] = factor * np.trapz(P, CDF_V) # in kWh
"""
dAEP_dP, dAEP_dCDF = trapz_deriv(P, CDF_V)
dAEP_dP *= factor
dAEP_dCDF *= factor
dAEP_dlossFactor = np.array([outputs['AEP']/lossFactor])
self.J = {}
self.J['AEP', 'CDF_V'] = np.reshape(dAEP_dCDF, (1, len(dAEP_dCDF)))
self.J['AEP', 'P'] = np.reshape(dAEP_dP, (1, len(dAEP_dP)))
self.J['AEP', 'lossFactor'] = dAEP_dlossFactor
def compute_partials(self, inputs, J):
J = self.J
"""
def compute_P_and_eff(aeroPower, ratedPower, Omega_rpm, drivetrainType, drivetrainEff):
if not np.any(drivetrainEff):
drivetrainType = drivetrainType.upper()
if drivetrainType == "GEARED":
constant = 0.01289
linear = 0.08510
quadratic = 0.0
elif drivetrainType == "SINGLE_STAGE":
constant = 0.01331
linear = 0.03655
quadratic = 0.06107
elif drivetrainType == "MULTI_DRIVE":
constant = 0.01547
linear = 0.04463
quadratic = 0.05790
elif drivetrainType in ["PM_DIRECT_DRIVE", "DIRECT_DRIVE", "DIRECT DRIVE"]:
constant = 0.01007
linear = 0.02000
quadratic = 0.06899
elif drivetrainType == "CONSTANT_EFF":
constant = 0.00
linear = 0.07
quadratic = 0.0
else:
raise ValueError("The drivetrain model is not supported! Please check rotor_power.py")
Pbar0 = aeroPower / ratedPower
# handle negative power case (with absolute value)
Pbar1, dPbar1_dPbar0 = smooth_abs(Pbar0, dx=0.01)
# truncate idealized power curve for purposes of efficiency calculation
Pbar, dPbar_dPbar1, _ = smooth_min(Pbar1, 1.0, pct_offset=0.01)
# compute efficiency
eff = 1.0 - (constant / Pbar + linear + quadratic * Pbar)
eff = np.maximum(eff, 1e-3)
else:
# Use table lookup from rpm to calculate total efficiency
eff = np.interp(Omega_rpm, drivetrainEff[:, 0], drivetrainEff[:, 1])
return aeroPower * eff, eff
def eval_unsteady(alpha, cl, cd, cm):
# calculate unsteady coefficients from polars for OpenFAST's Aerodyn
unsteady = {}
Re = 1e6 # Does not factor into any calculations
try:
mypolar = Polar(Re, alpha, cl, cd, cm, compute_params=True, radians=False)
(alpha0, alpha1, alpha2, cnSlope, cn1, cn2, cd0, cm0) = mypolar.unsteadyParams()
except:
alpha0 = alpha1 = alpha2 = cnSlope = cn1 = cn2 = cd0 = cm0 = 0.0
unsteady["alpha0"] = alpha0
unsteady["alpha1"] = alpha1
unsteady["alpha2"] = alpha2
unsteady["Cd0"] = 0.0
unsteady["Cm0"] = cm0
unsteady["Cn1"] = cn1
unsteady["Cn2"] = cn2
unsteady["C_nalpha"] = cnSlope
unsteady["eta_e"] = 1
unsteady["T_f0"] = "Default"
unsteady["T_V0"] = "Default"
unsteady["T_p"] = "Default"
unsteady["T_VL"] = "Default"
unsteady["b1"] = "Default"
unsteady["b2"] = "Default"
unsteady["b5"] = "Default"
unsteady["A1"] = "Default"
unsteady["A2"] = "Default"
unsteady["A5"] = "Default"
unsteady["S1"] = 0
unsteady["S2"] = 0
unsteady["S3"] = 0
unsteady["S4"] = 0
unsteady["St_sh"] = "Default"
unsteady["k0"] = 0
unsteady["k1"] = 0
unsteady["k2"] = 0
unsteady["k3"] = 0
unsteady["k1_hat"] = 0
unsteady["x_cp_bar"] = "Default"
unsteady["UACutout"] = "Default"
unsteady["filtCutOff"] = "Default"
unsteady["Alpha"] = alpha
unsteady["Cl"] = cl
unsteady["Cd"] = cd
unsteady["Cm"] = cm
return unsteady
|
9aa7fa4963d7e54db9d917f1f9322015b71c3ebe
|
e04a5b20f946c5033f24d4dd8acda395a98747c5
|
/h2o/custom-recipes/h2o-build-model/recipe.py
|
0d5a7646838446942407d4380b65ee888bfbdea9
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
dataiku/dataiku-contrib
|
2a2f2fb420d7f2ab49b82d80659cc6f6ec1d8f61
|
9a9f189e8a544a81c205d8a8b3779d4517b88653
|
refs/heads/master
| 2023-09-04T03:33:58.625093
| 2023-04-26T08:17:34
| 2023-04-26T08:17:34
| 45,074,604
| 103
| 94
|
Apache-2.0
| 2023-06-08T21:29:07
| 2015-10-27T22:41:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,962
|
py
|
recipe.py
|
# coding: utf-8
import dataiku
from dataiku.customrecipe import *
import pandas as pd
import os, sys, json, shutil
import h2o
from h2o_utils import DSS_dataset_to_H2O_frame, saved_model_folder
## init h2o
ip = get_plugin_config()['h2o_server_ip']
port = int(get_plugin_config()['h2o_server_port'])
h2o.init(ip, port)
## create train_frame
factor_columns = get_recipe_config().get('factor_columns',[])
train_dataset = get_input_names_for_role('train_set')[0]
train_frame = DSS_dataset_to_H2O_frame(train_dataset)
for col in factor_columns:
train_frame[col] = train_frame[col].asfactor()
## create valid_frame if ratio or validation set provided
valid_frame = None
train_ratio = get_recipe_config().get('train_ratio',-1.)
if train_ratio != -1. and train_ratio != 1. :
old_train = train_frame
train_frame, valid_frame = train_frame.split_frame(ratios=[train_ratio])
print 'Split {} lines into {} for training and {} for validation'.format(len(old_train), len(train_frame), len(valid_frame))
valid_names = get_input_names_for_role('validation_set')
if valid_names:
if train_ratio != -1. :
raise Exception("You may specify either an input_dataset for validation, or a train ratio, but not both.")
valid_frame = DSS_dataset_to_H2O_frame(valid_names[0])
for col in get_recipe_config().get('factor_columns',[]):
valid_frame[col] = valid_frame[col].asfactor()
## create target if needed
algorithm = get_recipe_config().get('algorithm')
kwargs = dict()
def needs_target(algo):
return algo in ['deeplearning', 'gbm', 'glm', 'naive_bayes', 'random_forest']
if needs_target(algorithm):
target = get_recipe_config().get('target')
if not target or target == '':
raise Exception('algorithm ' + algorithm + ' needs a target, please review the recipe\'s settings.')
kwargs['y'] = train_frame[target]
kwargs['x'] = train_frame.drop(target)
else:
kwargs['x'] = train_frame
if valid_frame is not None:
if needs_target(algorithm):
kwargs['validation_y'] = valid_frame[target]
kwargs['validation_x'] = valid_frame.drop(target)
else:
kwargs['validation_x'] = valid_frame
## create output_folder and dump model config
output_folder = dataiku.Folder(get_output_names_for_role('output_folder')[0])
output_folder_path = output_folder.get_path()
# clean it: works only on local FS
# for file in os.listdir(output_folder_path):
# path = os.path.join(output_folder_path, file)
# if os.path.isfile(path): os.unlink(path)
# elif os.path.isdir (path): shutil.rmtree(path)
model_config = {
'factor_columns':factor_columns,
'input_type': dataiku.Dataset(train_dataset).get_config()['type'] }
with open(os.path.join(output_folder_path, 'model_config.json'),'w') as file:
file.write(json.dumps(model_config,indent=4))
## set final parameters, train model
params = get_recipe_config().get('algorithm_parameters','{}')
if params == '': params = '{}'
kwargs.update(json.loads(params))
kwargs['model_id'] = 'DSS.H2O_connector.model.' + output_folder.full_name + '.' + algorithm
algorithms = {
'autoencoder': h2o.h2o.autoencoder,
'deeplearning': h2o.h2o.deeplearning,
'gbm': h2o.h2o.gbm,
'glm': h2o.h2o.glm,
'glrm': h2o.h2o.glrm,
'kmeans': h2o.h2o.kmeans,
'naive_bayes': h2o.h2o.naive_bayes,
'prcomp': h2o.h2o.prcomp,
'random_forest': h2o.h2o.random_forest,
'svd': h2o.h2o.svd,
}
# print 'Arguments passed to H2O: ', kwargs # This makes the job fail with exception None ??
model = algorithms[algorithm](**kwargs)
## save model summary in output_folder and model to disk
with open(os.path.join(output_folder_path, 'model_summary.txt'),'w') as file:
orig_stdout = sys.stdout
sys.stdout = file
model.show() # this method uses print to write to stdout
sys.stdout = orig_stdout
h2o.h2o.save_model(
model,
saved_model_folder(model_config, output_folder),
force=True) # "force" means overwrite
|
5336fb2fc5f663884881e6ec71ce361087510092
|
40282fc3afc28166ce01cdf2240d445a1930f2b0
|
/docs/sections/section5/solutions/breakout_1_sol.py
|
003975ee8e69ce82e64900a0fb14ea887ffb7b23
|
[
"MIT"
] |
permissive
|
Harvard-IACS/2020-CS109A
|
7dac61f88aefe9647fe7e3eabb3dc6ef85cc8d73
|
665100fec24309edb818a51bc8c29db2912d370f
|
refs/heads/master
| 2022-07-31T18:05:47.127653
| 2021-11-17T22:30:00
| 2021-11-17T22:30:00
| 287,811,847
| 114
| 123
|
MIT
| 2022-05-04T06:26:14
| 2020-08-15T19:28:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 937
|
py
|
breakout_1_sol.py
|
# init breakout 1 solution
# Train model
model1_wine = LogisticRegression(penalty='none').fit(X_wine_train, y_wine_train)
# Score model
train_score = model1_wine.score(X_wine_train, y_wine_train)
test_score = model1_wine.score(X_wine_test, y_wine_test)
# Print scores
print("Training Set Accuracy: {:.4f}".format(train_score))
print("Testing Set Accuracy: {:.4f}\n".format(test_score))
# Predict probabilities for our training data
y_proba_train = model1_wine.predict_proba(X_wine_train)
# Check shape of our predictions to show that we have 3 probabilities predicted
# for each observation (i.e. predicted probabilities for each of our 3 classes)
print(
"The shape of our predicted training probabilities array: {}\n"
.format(y_proba_train.shape)
)
# Sum all 3 classes at each observation
print(
"The sum of predicted probabilities for all 3 classes by observation:\n\n{}"
.format(np.sum(y_proba_train, axis=1))
)
|
e0da69acd1b096f5475b650be74766344cd7511b
|
6a85191d6c2ae1e0db5873e7c6cb5d341eb72253
|
/benchs/bench_speed.py
|
6709e121c14fd4195aa06b4d859040aac920dbca
|
[
"MIT"
] |
permissive
|
KinglittleQ/torch-batch-svd
|
3875b182ec2b496fda46dacbbb4e972b741d8223
|
c0a96119187f7d55f939d2ff2b92942c6d6ca930
|
refs/heads/master
| 2022-10-23T06:23:39.906646
| 2022-10-10T05:27:37
| 2022-10-10T05:27:37
| 171,496,660
| 356
| 36
|
MIT
| 2022-05-04T02:51:00
| 2019-02-19T15:17:40
|
C++
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
bench_speed.py
|
import torch
from torch_batch_svd import svd
def bench_speed(N, H, W):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.manual_seed(0)
a = torch.randn(N, H, W).cuda()
b = a.clone().cuda()
torch.cuda.synchronize()
start.record()
for i in range(100):
U, S, V = svd(a)
end.record()
torch.cuda.synchronize()
t = start.elapsed_time(end) / 100
print("Perform batched SVD on a {}x{}x{} matrix: {} ms".format(N, H, W, t))
start.record()
U, S, V = torch.svd(b, some=True, compute_uv=True)
end.record()
torch.cuda.synchronize()
t = start.elapsed_time(end)
print("Perform torch.svd on a {}x{}x{} matrix: {} ms".format(N, H, W, t))
if __name__ == "__main__":
bench_speed(10000, 9, 9)
bench_speed(20000, 9, 9)
|
dd73a6754441408b1a4f5ad1b7e3558d638e9101
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/functional/cli/test_batch.py
|
e721b729cfe4ea93ae8c4b249ab4713abeeb9d80
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,639
|
py
|
test_batch.py
|
"""
tests.pytests.functional.cli.test_batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import salt.cli.batch
import salt.config
import salt.utils.jid
from tests.support.mock import Mock, patch
class MockPub:
"""
Mock salt.client.LocalClient.pub method
"""
calls = 0
initial_ping = False
batch1_jid = None
batch1_tgt = None
batch2_jid = None
batch2_tgt = None
batch3_jid = None
batch3_tgt = None
def __call__(self, tgt, fun, *args, **kwargs):
if tgt == "minion*" and fun == "test.ping":
MockPub.calls += 1
MockPub.initial_ping = salt.utils.jid.gen_jid({})
pub_ret = {
"jid": MockPub.initial_ping,
"minions": ["minion0", "minion1", "minion2", "minion3"],
}
elif fun == "state.sls":
if MockPub.calls == 1:
MockPub.calls += 1
MockPub.batch1_tgt = list(tgt)
MockPub.batch1_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif MockPub.calls == 2:
MockPub.calls += 1
MockPub.batch2_tgt = tgt
MockPub.batch2_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif MockPub.calls == 3:
MockPub.calls += 1
MockPub.batch3_tgt = tgt
MockPub.batch3_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif fun == "saltutil.find_job":
jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
return pub_ret
class MockSubscriber:
"""
Mock salt.transport.ipc IPCMessageSubscriber in order to inject events into
salt.utils.Event
"""
calls = 0
pubret = None
def __init__(self, *args, **kwargs):
return
def read(self, timeout=None):
"""
Mock IPCMessageSubcriber read method.
- Return events for initial ping
- Returns event for a minion in first batch to cause second batch to get sent.
- Returns 5 null events on first iteration of second batch to go back to first batch.
- On second iteration of first batch, send an event from second batch which will get cached.
- Return events for the rest of the batches.
"""
if MockSubscriber.pubret.initial_ping:
# Send ping responses for 4 minions
jid = MockSubscriber.pubret.initial_ping
if MockSubscriber.calls == 0:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion0", fun="test.ping")
elif MockSubscriber.calls == 1:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion1", fun="test.ping")
elif MockSubscriber.calls == 2:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion2", fun="test.ping")
elif MockSubscriber.calls == 3:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion3", fun="test.ping")
if MockSubscriber.pubret.batch1_jid:
jid = MockSubscriber.pubret.batch1_jid
tgt = MockSubscriber.pubret.batch1_tgt
if MockSubscriber.calls == 4:
# Send a return for first minion in first batch. This causes the
# second batch to get sent.
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
if MockSubscriber.pubret.batch2_jid:
if MockSubscriber.calls <= 10:
# Skip the first iteration of the second batch; this will cause
# batch logic to go back to iterating over the first batch.
MockSubscriber.calls += 1
return
elif MockSubscriber.calls == 11:
# Send the minion from the second batch, This event will get cached.
jid = MockSubscriber.pubret.batch2_jid
tgt = MockSubscriber.pubret.batch2_tgt
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
if MockSubscriber.calls == 12:
jid = MockSubscriber.pubret.batch1_jid
tgt = MockSubscriber.pubret.batch1_tgt
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[1], fun="state.sls")
if MockSubscriber.pubret.batch3_jid:
jid = MockSubscriber.pubret.batch3_jid
tgt = MockSubscriber.pubret.batch3_tgt
if MockSubscriber.calls == 13:
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
return
def _ret(self, jid, minion_id, fun, _return=True, _retcode=0):
"""
Create a mock return from a jid, minion, and fun
"""
dumped = salt.payload.dumps(
{
"fun_args": [],
"jid": jid,
"return": _return,
"retcode": 0,
"success": True,
"cmd": "_return",
"fun": fun,
"id": minion_id,
"_stamp": "2021-05-24T01:23:25.373194",
},
use_bin_type=True,
)
tag = "salt/job/{}/ret".format(jid).encode()
return b"".join([tag, b"\n\n", dumped])
def connect(self, timeout=None):
pass
def test_batch_issue_56273():
"""
Regression test for race condition in batch logic.
https://github.com/saltstack/salt/issues/56273
"""
mock_pub = MockPub()
MockSubscriber.pubret = mock_pub
def returns_for_job(jid):
return True
opts = {
"conf_file": "",
"tgt": "minion*",
"fun": "state.sls",
"arg": ["foo"],
"timeout": 1,
"gather_job_timeout": 1,
"batch": 2,
"extension_modules": "",
"failhard": True,
}
with patch("salt.transport.ipc.IPCMessageSubscriber", MockSubscriber):
batch = salt.cli.batch.Batch(opts, quiet=True)
with patch.object(batch.local, "pub", Mock(side_effect=mock_pub)):
with patch.object(
batch.local, "returns_for_job", Mock(side_effect=returns_for_job)
):
ret = list(batch.run())
assert len(ret) == 4
for val, _ in ret:
values = list(val.values())
assert len(values) == 1
assert values[0] is True
|
7b9b11f5f7d3a5a8406787721a4fdba4305924d8
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/models_extra/gas_distribution/properties/natural_gas.py
|
b972d431cc493c1ea43d9b25c80a8eea42f78399
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 16,502
|
py
|
natural_gas.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Natural gas property package with a single (pseudo) component for use
in isothermal unit models.
Data sources:
[1] Stochastic Optimal Control Model for Natural Gas Network
Operations. V. Zavala, 2014, Comp. Chem. Eng.
"""
# TODO: Missing docstrings
# pylint: disable=missing-class-docstring
from pyomo.core.base.units_container import units as pyunits
from pyomo.core.base.var import Var
from pyomo.core.base.constraint import Constraint
from pyomo.core.base.expression import Expression
from pyomo.core.base.param import Param
from pyomo.core.expr import sqrt
# Import IDAES cores
from idaes.core import (
PhysicalParameterBlock,
)
import idaes.logger as idaeslog
from idaes.core import (
declare_process_block_class,
VaporPhase,
Component,
MaterialFlowBasis,
StateBlock,
StateBlockData,
)
from idaes.core.util.constants import Constants
# Set up logger
_log = idaeslog.getLogger(__name__)
@declare_process_block_class("NaturalGasParameterBlock")
class NaturalGasParameterBlockData(PhysicalParameterBlock):
"""
Property package for natural gas with a single pseudo-component.
"""
def build(self):
super(NaturalGasParameterBlockData, self).build()
self._state_block_class = NaturalGasStateBlock
self.Vap = VaporPhase()
self.dens_nominal = Param(
initialize=0.72,
units=pyunits.kg / pyunits.m**3,
doc="Density of the gas a standard temperature and pressure",
# Used to convert mass flow rate between kg/hr and SCM/hr
)
nat_gas_data = {"mw": (18.0, pyunits.kg / pyunits.kmol)}
nat_gas_config = {"parameter_data": nat_gas_data}
self.natural_gas = Component(**nat_gas_config)
ng = self.natural_gas
# Set up dictionaries we will rely on to compute heat capacities.
kJkmolK = pyunits.kJ / pyunits.kmol / pyunits.K
kJkmolK2 = pyunits.kJ / pyunits.kmol / pyunits.K**2
kJkmolK3 = pyunits.kJ / pyunits.kmol / pyunits.K**3
kJkmolK4 = pyunits.kJ / pyunits.kmol / pyunits.K**4
ng.cp_mol_coef_A = Param(initialize=2.34 * 18.0, units=kJkmolK)
ng.cp_mol_coef_B = Param(initialize=0.0, units=kJkmolK2)
ng.cp_mol_coef_C = Param(initialize=0.0, units=kJkmolK3)
ng.cp_mol_coef_D = Param(initialize=0.0, units=kJkmolK4)
ng.cv_mol_coef_A = Param(initialize=1.85 * 18.0, units=kJkmolK)
ng.cv_mol_coef_B = Param(initialize=0.0, units=kJkmolK2)
ng.cv_mol_coef_C = Param(initialize=0.0, units=kJkmolK3)
ng.cv_mol_coef_D = Param(initialize=0.0, units=kJkmolK4)
self.temperature_ref = Param(initialize=298.15, units=pyunits.K)
@classmethod
def define_metadata(cls, obj):
kghr = pyunits.kg / pyunits.hr
nondim = pyunits.dimensionless
kgkmolK = pyunits.kg / pyunits.kmol / pyunits.K
kmolm3 = pyunits.kmol / pyunits.m**3
kgm3 = pyunits.kg / pyunits.m**3
kmolhr = pyunits.kmol / pyunits.hr
obj.add_properties(
{
# What do the units in this dict get used for? What if they're
# different than the units we define for each variable in the
# StateBlockData methods?
"flow_mol": {"method": None, "units": kmolhr},
"pressure": {"method": None, "units": pyunits.bar},
"temperature": {"method": None, "units": pyunits.K},
"mole_frac_comp": {"method": None, "units": pyunits.dimensionless},
"flow_mol_comp": {"method": "_flow_mol_comp", "units": kmolhr},
"mw": {"method": "_mw", "units": pyunits.kg / pyunits.kmol},
"flow_mass": {"method": "_flow_mass", "units": kghr},
"cp_mol_comp": {"method": "_cp_mol_comp", "units": kgkmolK},
"cp_mol": {"method": "_cp_mol", "units": kgkmolK},
"cp_mass": {"method": "_cp_mass", "units": kgkmolK},
"cv_mol_comp": {"method": "_cv_mol_comp", "units": kgkmolK},
"cv_mol": {"method": "_cv_mol", "units": kgkmolK},
"cv_mass": {"method": "_cv_mass", "units": kgkmolK},
"heat_capacity_ratio": {
"method": "_heat_capacity_ratio",
"units": nondim,
},
"heat_capacity_ratio_phase": {
"method": "_heat_capacity_ratio_phase",
"units": nondim,
},
"compress_fact": {"method": "_compress_fact", "units": nondim},
"dens_mol": {"method": "_dens_mol", "units": kmolm3},
"dens_mol_comp": {"method": "_dens_mol_comp", "units": kmolm3},
"dens_mass": {"method": "_dens_mass", "units": kgm3},
}
)
# NOTE: We do not implement enthalpy as we are not yet using this
# property package with a non-isothermal pipeline.
obj.define_custom_properties(
{
"speed_of_sound": {
"method": "_speed_of_sound",
"units": obj.derived_units.VELOCITY,
},
}
)
obj.add_default_units(
{
"time": pyunits.hr,
"length": pyunits.km,
"mass": pyunits.kg,
"amount": pyunits.kmol,
"temperature": pyunits.K,
# I would like to specify some reasonable units for area,
# energy, and pressure, but this is not supported in IDAES.
# "area": pyunits.m**2,
# "energy": pyunits.kJ,
# "pressure": pyunits.bar,
}
)
class NaturalGasStateBlock(StateBlock):
# This is confusing.
# (a) Not sure why this class is necessary when I don't want
# to attach any methods.
# (b) Now is NaturalGasStateBlock defined twice? Once here and once
# by NaturalGasStateBlock? This appears to be the case. This
# class gets overridden...
# declare_process_block_class on its own is confusing...
pass
@declare_process_block_class(
"NaturalGasStateBlock",
block_class=NaturalGasStateBlock,
)
class NaturalGasStateBlockData(StateBlockData):
def build(self):
super().build()
# TODO: Initialize to 10 (1e6 SCM)/day
self.flow_mol = Var(
initialize=300.0,
doc="Molar flow rate",
units=pyunits.kmol / pyunits.hr,
)
self.pressure = Var(
initialize=50.0,
doc="Gas pressure",
units=pyunits.bar,
)
self.temperature = Var(
initialize=298.15,
doc="Gas temperature",
units=pyunits.K,
)
component_list = self.config.parameters.component_list
self.mole_frac_comp = Var(
component_list,
initialize=1.0 / len(component_list),
doc="Component mole fractions within the gas",
units=pyunits.dimensionless,
)
if not self.config.defined_state:
# For a single-phase property package, should we just always
# "fix" mole fraction? Probably not, as it will violate assumptions
# when we combine multiple units (ports) as long as mole_frac_comp
# is a state variable, which I would rather not change.
def sum_component_eq_rule(b):
return 1.0 == sum(b.mole_frac_comp[j] for j in component_list)
self.sum_component_eq = Constraint(
rule=sum_component_eq_rule,
doc=(
"Enforces that the sum of mole fractions equals one when\n"
"state variables are not already elsewhere defined."
),
)
def _flow_mol_comp(self):
params = self.config.parameters
component_list = params.component_list
def flow_mol_comp_rule(b, j):
return self.flow_mol * self.mole_frac_comp[j]
self.flow_mol_comp = Expression(
component_list,
rule=flow_mol_comp_rule,
doc="Molar flow rate of a particular component",
)
def _mw(self):
params = self.config.parameters
component_list = params.component_list
mole_frac = self.mole_frac_comp
mw_comp = {j: params.get_component(j).mw for j in component_list}
self.mw = Expression(
expr=sum(mole_frac[j] * mw_comp[j] for j in component_list),
doc="Average molecular weight of the gas",
)
def _flow_mass(self):
# I would like flow_mass to be a variable, so I can fix it.
# However, I leave it as an expression here for consistency with
# generic property packages. A flow_mass variable will be added
# by pipeline unit models so that its derivatives may be computed.
self.flow_mass = Expression(
expr=self.mw * self.flow_mol,
doc="Mass flow rate of the gas",
)
def _cp_mol_comp(self):
params = self.config.parameters
component_list = params.component_list
comp = {j: params.get_component(j) for j in component_list}
def cp_mol_comp_rule(b, j):
return (
comp[j].cp_mol_coef_A
+ comp[j].cp_mol_coef_B * b.temperature
+ comp[j].cp_mol_coef_C * b.temperature**2
+ comp[j].cp_mol_coef_D * b.temperature**3
)
self.cp_mol_comp = Expression(
component_list,
rule=cp_mol_comp_rule,
doc=(
"Pure component constant-pressure molar heat capacity "
"of each component"
),
)
def _cp_mol(self):
component_list = self.config.parameters.component_list
self.cp_mol = Expression(
expr=sum(
self.mole_frac_comp[j] * self.cp_mol_comp[j] for j in component_list
),
doc="Constant-pressure molar heat capacity of the gas mixture",
)
def _cp_mass(self):
self.cp_mass = Expression(
expr=self.cp_mol / self.mw,
doc="Constant-pressure specific heat capacity of the gas mixture",
)
def _cv_mol_comp(self):
params = self.config.parameters
component_list = params.component_list
comp = {j: params.get_component(j) for j in component_list}
def cv_mol_comp_rule(b, j):
return (
comp[j].cv_mol_coef_A
+ comp[j].cv_mol_coef_B * b.temperature
+ comp[j].cv_mol_coef_C * b.temperature**2
+ comp[j].cv_mol_coef_D * b.temperature**3
)
self.cv_mol_comp = Expression(
component_list,
rule=cv_mol_comp_rule,
doc=(
"Pure component constant-volume molar heat capacity "
"of each component"
),
)
def _cv_mol(self):
component_list = self.config.parameters.component_list
self.cv_mol = Expression(
expr=sum(
self.mole_frac_comp[j] * self.cv_mol_comp[j] for j in component_list
),
doc="Constant-volume molar heat capacity of the gas mixture",
)
def _cv_mass(self):
self.cv_mass = Expression(
expr=self.cv_mol / self.mw,
doc="Constant-volume specific heat capacity of the gas mixture",
)
def _heat_capacity_ratio(self):
self.heat_capacity_ratio = Expression(
expr=self.cp_mass / self.cv_mass,
doc=(
"Ratio of constant-volume to constant-pressure heat "
"capacities of the gas mixture"
),
)
def _heat_capacity_ratio_phase(self):
# Pipeline unit models require a heat_capacity_ratio_phase attribute
# for compatibility with generic property packages.
self.heat_capacity_ratio_phase = Expression(
self.config.parameters.phase_list,
expr=self.heat_capacity_ratio.expr,
doc=(
"Ratio of constant-volume to constant-pressure heat "
"capacities of the gas mixture"
),
)
def _compress_fact(self):
# compress_fact is a param because here it is constant.
# It could be a variable/expression, however, in a more complicated
# model.
self.compress_fact = Param(
initialize=0.80,
doc="Compressibility factor of the gas",
)
def _dens_mol(self):
# Make molar density a variable as it is important enough that
# somebody might want to fix it.
self.dens_mol = Var(
initialize=1.0,
units=pyunits.kmol / pyunits.m**3,
doc="Molar density of the gas phase",
)
pressure = self.pressure
gas_const = Constants.gas_constant
compress_fact = self.compress_fact
temperature = self.temperature
dens_mol_expr = pyunits.convert(
pressure / gas_const / compress_fact / temperature,
pyunits.kmol / pyunits.m**3,
)
self.dens_mol_eq = Constraint(
expr=self.dens_mol == dens_mol_expr,
doc=(
"Equation used to calculate molar density -- "
"ideal gas equation with a\ncompressibility factor"
),
)
def _dens_mol_comp(self):
component_list = self.config.parameters.component_list
def dens_mol_comp_rule(b, j):
return self.mole_frac_comp[j] * self.dens_mol
self.dens_mol_comp = Expression(
component_list,
rule=dens_mol_comp_rule,
doc="Molar density of a particular component in the gas",
)
def _speed_of_sound(self):
# Use a constraint here to make balance equation expressions more
# legible.
self.speed_of_sound = Var(
initialize=300.0,
units=pyunits.m / pyunits.s,
doc="Speed of sound in the gas",
)
gas_const = pyunits.convert(
Constants.gas_constant,
pyunits.kJ / pyunits.kmol / pyunits.K,
)
speed_of_sound_expr = pyunits.convert(
sqrt(
self.heat_capacity_ratio
* self.compress_fact
* gas_const
* self.temperature
/ self.mw
),
pyunits.m / pyunits.s,
)
self.speed_of_sound_eq = Constraint(
expr=self.speed_of_sound == speed_of_sound_expr,
doc="Equation to calculate speed of sound",
)
def define_state_vars(self):
return {
"flow_mol": self.flow_mol,
"pressure": self.pressure,
"temperature": self.temperature,
"mole_frac_comp": self.mole_frac_comp,
}
def get_material_flow_terms(self, p, j):
return self.flow_mol * self.mole_frac_comp[j]
def get_material_density_terms(self, p, j):
# Converting to kmol/km^3 is a workaround -- really I would like to
# change the units of area and material_holdup in the control volume.
return pyunits.convert(
self.dens_mol_comp[j],
pyunits.kmol / pyunits.km**3,
)
def get_material_flow_basis(self):
return MaterialFlowBasis.molar
def get_enthalpy_flow_terms(self, p):
return (
(self.temperature - self.config.parameters.temperature_ref)
* self.cp_mol
* self.flow_mol
)
|
71fef814f2176bb56c5a64cd711d59869c097434
|
0c3cf82c1f3f4dfe3f7d3a3d0f5912a25c821264
|
/pandasdmx/urn.py
|
aec252a27a0555d3b6b332e54859059b108176b4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
dr-leo/pandaSDMX
|
ae9ac430433afa0e57a574ba0aea416fdb3c399f
|
c72224efd2219db9b926de9c7ca8d312132ab592
|
refs/heads/master
| 2023-03-10T17:40:08.026395
| 2023-02-25T19:23:44
| 2023-02-25T19:23:44
| 21,954,882
| 128
| 60
|
Apache-2.0
| 2023-02-25T13:33:19
| 2014-07-17T19:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
urn.py
|
import re
from typing import Dict
from pandasdmx.model import PACKAGE, MaintainableArtefact
# Regular expression for URNs
URN = re.compile(
r"urn:sdmx:org\.sdmx\.infomodel"
r"\.(?P<package>[^\.]*)"
r"\.(?P<class>[^=]*)=((?P<agency>[^:]*):)?"
r"(?P<id>[^\(\.]*)(\((?P<version>[\d\.]*)\))?"
r"(\.(?P<item_id>.*))?"
)
_BASE = (
"urn:sdmx:org.sdmx.infomodel.{package}.{obj.__class__.__name__}="
"{ma.maintainer.id}:{ma.id}({ma.version}){extra_id}"
)
def make(obj, maintainable_parent=None, strict=False):
"""Create an SDMX URN for `obj`.
If `obj` is not :class:`.MaintainableArtefact`, then `maintainable_parent`
must be supplied in order to construct the URN.
"""
if not isinstance(obj, MaintainableArtefact):
ma = maintainable_parent or obj.get_scheme()
extra_id = f".{obj.id}"
else:
ma = obj
extra_id = ""
if not isinstance(ma, MaintainableArtefact):
raise ValueError(
f"Neither {repr(obj)} nor {repr(maintainable_parent)} are maintainable"
)
elif ma.maintainer is None:
raise ValueError(f"Cannot construct URN for {repr(ma)} without maintainer")
elif strict and ma.version is None:
raise ValueError(f"Cannot construct URN for {repr(ma)} without version")
return _BASE.format(
package=PACKAGE[obj.__class__], obj=obj, ma=ma, extra_id=extra_id
)
def match(value: str) -> Dict[str, str]:
try:
match = URN.match(value)
assert match is not None
except AssertionError:
raise ValueError(f"not a valid SDMX URN: {value}")
else:
return match.groupdict()
|
fd3b5f22ee2a1b6a36abe98888fa8ef6b1e1282a
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/src/super_gradients/common/exceptions/sg_trainer_exceptions.py
|
73202c50a13df2418ef7fc23fa20bb5ee5c3a241
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
sg_trainer_exceptions.py
|
class UnsupportedTrainingParameterFormat(Exception):
"""Exception raised illegal training param format.
:param desc: Explanation of the error
"""
def __init__(self, desc: str):
self.message = "Unsupported training parameter format: " + desc
super().__init__(self.message)
class UnsupportedOptimizerFormat(UnsupportedTrainingParameterFormat):
"""Exception raised illegal optimizer format."""
def __init__(self):
super().__init__("optimizer parameter expected one of ['Adam','SGD','RMSProp'], or torch.optim.Optimizer object")
class IllegalDataloaderInitialization(Exception):
"""Exception raised illegal data loaders."""
def __init__(self):
super().__init__("train_loader, valid_loader and class parameters are required when initializing Trainer with data loaders")
class GPUModeNotSetupError(Exception):
"""Exception raised when the DDP should be setup but is not."""
def __init__(self):
super().__init__(
"Your environment was not setup to support DDP. Please run at the beginning of your script:\n"
">>> from super_gradients.common.environment.env_helpers import init_trainer\n"
">>> setup_device(multi_gpu=..., num_gpus=...)\n"
)
|
69b6b2065f5e70111452420d2ea2d5889446f83b
|
92b2002d90f4a3d8d7ae2e10c3dc9df3145e6e80
|
/examples/minimal.py
|
01087600256f70fb90c86c36a5c439f5a0037aed
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sciter-sdk/pysciter
|
62cc25d75f9630c84ad446316effae91592e339c
|
a32b2cee5e6ada5c2bd210bf790710671668cbe1
|
refs/heads/master
| 2023-08-17T08:23:46.645047
| 2022-08-27T00:48:52
| 2022-08-27T03:24:46
| 51,951,031
| 403
| 49
|
MIT
| 2022-08-27T03:24:47
| 2016-02-17T19:57:51
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
minimal.py
|
"""Minimalistic PySciter sample for Windows."""
import sciter
if __name__ == '__main__':
sciter.runtime_features(file_io=True, allow_sysinfo=True)
frame = sciter.Window(ismain=True, uni_theme=True)
frame.minimal_menu()
frame.load_file("examples/minimal.htm")
frame.run_app()
|
0d5ee2af7245061d495ab3b5f5e19fcae85ca42f
|
4124e6d1a99b40e3e990915969899ba0ddfa9390
|
/kaldi/matrix/packed.py
|
17f3206e1624f32f143cad09faec49385b0f47e4
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
pykaldi/pykaldi
|
23f0554072e99fbfa036be27a1b4d1e08f719525
|
b482f79a334383a16a3805d658aa221ca3d23c6d
|
refs/heads/master
| 2023-03-10T06:02:38.465779
| 2022-05-29T21:24:42
| 2022-05-29T21:24:42
| 94,806,200
| 1,019
| 283
|
Apache-2.0
| 2022-09-18T13:34:33
| 2017-06-19T18:05:19
|
Python
|
UTF-8
|
Python
| false
| false
| 9,317
|
py
|
packed.py
|
from . import _kaldi_matrix
import _matrix_common # FIXME: Relative/absolute import is buggy in Python 3.
from . import _packed_matrix
from . import _sp_matrix
from . import _tp_matrix
################################################################################
# single precision packed matrix types
################################################################################
class _PackedMatrixBase(object):
"""Base class defining the extra API for single precision packed matrices.
No constructor.
"""
def size(self):
"""Returns size as a tuple.
Returns:
A tuple (num_rows, num_cols) of integers.
"""
return self.num_rows, self.num_cols
def swap_(self, other):
"""Swaps the contents with another matrix.
Shallow swap.
Args:
other (Matrix or SpMatrix or TpMatrix): The input matrix.
Raises:
ValueError: If **other** is not a square matrix.
"""
# Using the native code instead of size()
# prevents an exception for the case when
# other is not a python object
m, n = other.num_rows, other.num_cols
if m != n:
raise ValueError("other is not a square matrix.")
if isinstance(other, _kaldi_matrix.Matrix):
return self.swap_with_matrix_(other)
elif isinstance(other, _packed_matrix.PackedMatrix):
return self.swap_with_packed_(other)
else:
raise ValueError("other must be a Matrix or SpMatrix or TpMatrix.")
class SpMatrix(_PackedMatrixBase, _sp_matrix.SpMatrix):
"""Single precision symmetric matrix."""
def __init__(self, num_rows = None,
resize_type=_matrix_common.MatrixResizeType.SET_ZERO):
"""Creates a new symmetric matrix.
If `num_rows` is not ``None``, initializes the symmetric matrix to the
specified size. Otherwise, initializes an empty symmetric matrix.
Args:
num_rows (int): The number of rows. Defaults to ``None``.
resize_type (MatrixResizeType): How to initialize the elements.
If ``MatrixResizeType.SET_ZERO`` or
``MatrixResizeType.COPY_DATA``, they are set to zero.
If ``MatrixResizeType.UNDEFINED``, they are left uninitialized.
Defaults to ``MatrixResizeType.SET_ZERO``.
"""
super(SpMatrix, self).__init__()
if num_rows is not None:
if isinstance(num_rows, int) and num_rows >= 0:
self.resize_(num_rows, resize_type)
else:
raise ValueError("num_rows should be a non-negative integer.")
def clone(self):
"""Clones the symmetric matrix.
Returns:
SpMatrix: A copy of the symmetric matrix.
"""
return SpMatrix(len(self)).copy_from_sp_(self)
class TpMatrix(_PackedMatrixBase, _tp_matrix.TpMatrix):
"""Single precision triangular matrix."""
def __init__(self, num_rows = None,
resize_type=_matrix_common.MatrixResizeType.SET_ZERO):
"""Initializes a new triangular matrix.
If `num_rows` is not ``None``, initializes the triangular matrix to the
specified size. Otherwise, initializes an empty triangular matrix.
Args:
num_rows (int): Number of rows. Defaults to None.
resize_type (MatrixResizeType): How to initialize the elements.
If ``MatrixResizeType.SET_ZERO`` or
``MatrixResizeType.COPY_DATA``, they are set to zero.
If ``MatrixResizeType.UNDEFINED``, they are left uninitialized.
Defaults to ``MatrixResizeType.SET_ZERO``.
"""
super(TpMatrix, self).__init__()
if num_rows is not None:
if isinstance(num_rows, int) and num_rows >= 0:
self.resize_(num_rows, resize_type)
else:
raise ValueError("num_rows should be a non-negative integer.")
def clone(self):
"""Clones the triangular matrix.
Returns:
TpMatrix: A copy of the triangular matrix.
"""
return TpMatrix(len(self)).copy_from_tp_(self)
################################################################################
# double precision packed matrix types
################################################################################
class _DoublePackedMatrixBase(object):
"""Base class defining the extra API for double precision packed matrices.
No constructor.
"""
def size(self):
"""Returns size as a tuple.
Returns:
A tuple (num_rows, num_cols) of integers.
"""
return self.num_rows, self.num_cols
def swap_(self, other):
"""Swaps the contents with another matrix.
Shallow swap.
Args:
other (DoubleMatrix or DoubleSpMatrix or DoubleTpMatrix):
The input matrix.
Raises:
ValueError: If **other** is not a square matrix.
"""
m, n = other.num_rows, other.num_cols
if m != n:
raise ValueError("other is not a square matrix.")
if isinstance(other, _kaldi_matrix.DoubleMatrix):
return self.swap_with_matrix_(other)
elif isinstance(other, _packed_matrix.DoublePackedMatrix):
return self.swap_with_packed_(other)
else:
raise ValueError("other must be a DoubleMatrix or DoubleSpMatrix "
"or DoubleTpMatrix.")
class DoubleSpMatrix(_DoublePackedMatrixBase, _sp_matrix.DoubleSpMatrix):
"""Double precision symmetric matrix."""
def __init__(self, num_rows = None,
resize_type=_matrix_common.MatrixResizeType.SET_ZERO):
"""Creates a new symmetric matrix.
If `num_rows` is not ``None``, initializes the symmetric matrix to the
specified size. Otherwise, initializes an empty symmetric matrix.
Args:
num_rows (int): Number of rows. Defaults to None.
resize_type (MatrixResizeType): How to initialize the elements.
If ``MatrixResizeType.SET_ZERO`` or
``MatrixResizeType.COPY_DATA``, they are set to zero.
If ``MatrixResizeType.UNDEFINED``, they are left uninitialized.
Defaults to ``MatrixResizeType.SET_ZERO``.
"""
super(DoubleSpMatrix, self).__init__()
if num_rows is not None:
if isinstance(num_rows, int) and num_rows >= 0:
self.resize_(num_rows, resize_type)
else:
raise ValueError("num_rows should be a non-negative integer.")
def clone(self):
"""Clones the symmetric matrix.
Returns:
DoubleSpMatrix: A copy of the symmetric matrix.
"""
return DoubleSpMatrix(len(self)).copy_from_sp_(self)
class DoubleTpMatrix(_DoublePackedMatrixBase, _tp_matrix.DoubleTpMatrix):
"""Double precision triangular matrix."""
def __init__(self, num_rows = None,
resize_type=_matrix_common.MatrixResizeType.SET_ZERO):
"""Initializes a new triangular matrix.
If `num_rows` is not ``None``, initializes the triangular matrix to the
specified size. Otherwise, initializes an empty triangular matrix.
Args:
num_rows (int): Number of rows. Defaults to None.
resize_type (MatrixResizeType): How to initialize the elements.
If ``MatrixResizeType.SET_ZERO`` or
``MatrixResizeType.COPY_DATA``, they are set to zero.
If ``MatrixResizeType.UNDEFINED``, they are left uninitialized.
Defaults to ``MatrixResizeType.SET_ZERO``.
"""
super(DoubleTpMatrix, self).__init__()
if num_rows is not None:
if isinstance(num_rows, int) and num_rows >= 0:
self.resize_(num_rows, resize_type)
else:
raise ValueError("num_rows should be a non-negative integer.")
def clone(self):
"""Clones the triangular matrix.
Returns:
DoubleTpMatrix: A copy of the triangular matrix.
"""
return DoubleTpMatrix(len(self)).copy_from_tp_(self)
################################################################################
def _sp_matrix_wrapper(matrix):
"""Constructs a new matrix instance by swapping contents.
This function is used for converting `kaldi.matrix._sp_matrix.SpMatrix`
instances into `SpMatrix` instances without copying the contents.
This is a destructive operation. Contents of the input matrix are moved to
the newly contstructed matrix by swapping data pointers.
Args:
matrix (`Matrix`): The input matrix.
Returns:
SpMatrix: The new matrix instance.
"""
if isinstance(matrix, _sp_matrix.SpMatrix):
return SpMatrix().swap_(matrix)
elif isinstance(matrix, _sp_matrix.DoubleSpMatrix):
return DoubleSpMatrix().swap_(matrix)
else:
raise TypeError("unrecognized input type")
################################################################################
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
|
9a9bff98d86e84402ffa9854fd3ff33b11ca04d1
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/__init__.py
|
aaa15e299059770892415dbb4e89b99363571a8e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,177
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/__init__.py
from frameworks.wulf import WindowLayer
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.framework import ScopeTemplates, ComponentSettings, GroupedViewSettings
from gui.Scaleform.framework.package_layout import PackageBusinessHandler
from gui.Scaleform.genConsts.CONTEXT_MENU_HANDLER_TYPE import CONTEXT_MENU_HANDLER_TYPE
from gui.Scaleform.genConsts.PREBATTLE_ALIASES import PREBATTLE_ALIASES
from gui.app_loader import settings as app_settings
from gui.shared import EVENT_BUS_SCOPE
from gui.shared.utils.functions import getViewName
def getContextMenuHandlers():
from gui.Scaleform.daapi.view.lobby.prb_windows.PrebattleUserCMHandler import PrebattleUserCMHandler
return ((CONTEXT_MENU_HANDLER_TYPE.PREBATTLE_USER, PrebattleUserCMHandler),)
def getViewSettings():
from gui.Scaleform.daapi.view.lobby.prb_windows import invite_windows
from gui.Scaleform.daapi.view.lobby.prb_windows.BattleSessionList import BattleSessionList
from gui.Scaleform.daapi.view.lobby.prb_windows.BattleSessionWindow import BattleSessionWindow
from gui.Scaleform.daapi.view.lobby.SendInvitesWindow import SendInvitesWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.SquadPromoWindow import SquadPromoWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.SwitchPeripheryWindow import SwitchPeripheryWindow
return (GroupedViewSettings(PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, SendInvitesWindow, 'sendInvitesWindow.swf', WindowLayer.WINDOW, '', PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True, isCentered=False),
GroupedViewSettings(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, invite_windows.AutoInviteWindow, 'receivedInviteWindow.swf', WindowLayer.WINDOW, 'receivedInviteWindow', None, ScopeTemplates.DEFAULT_SCOPE, True, isCentered=False),
GroupedViewSettings(PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, BattleSessionWindow, 'battleSessionWindow.swf', WindowLayer.WINDOW, '', PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True, isCentered=False),
GroupedViewSettings(PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, BattleSessionList, 'battleSessionList.swf', WindowLayer.WINDOW, '', PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True, isCentered=False),
GroupedViewSettings(VIEW_ALIAS.SQUAD_PROMO_WINDOW, SquadPromoWindow, 'squadPromoWindow.swf', WindowLayer.WINDOW, '', None, ScopeTemplates.DEFAULT_SCOPE),
GroupedViewSettings(VIEW_ALIAS.SWITCH_PERIPHERY_WINDOW, SwitchPeripheryWindow, 'switchPeripheryWindow.swf', WindowLayer.TOP_WINDOW, '', None, ScopeTemplates.DEFAULT_SCOPE),
GroupedViewSettings(VIEW_ALIAS.SWITCH_PERIPHERY_WINDOW_MODAL, SwitchPeripheryWindow, 'switchPeripheryWindow.swf', WindowLayer.TOP_WINDOW, '', None, ScopeTemplates.DEFAULT_SCOPE, isModal=True, canDrag=False))
def getBusinessHandlers():
return (_PrbPackageBusinessHandler(),)
class _PrbPackageBusinessHandler(PackageBusinessHandler):
def __init__(self):
listeners = ((PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, self.__showAutoInviteWindow),
(VIEW_ALIAS.SQUAD_PROMO_WINDOW, self.loadViewByCtxEvent),
(VIEW_ALIAS.SWITCH_PERIPHERY_WINDOW, self.loadViewByCtxEvent))
super(_PrbPackageBusinessHandler, self).__init__(listeners, app_settings.APP_NAME_SPACE.SF_LOBBY, EVENT_BUS_SCOPE.LOBBY)
def __showPrebattleWindow(self, event):
alias = name = event.alias
self.loadViewWithDefName(alias, name, event.ctx)
def __showAutoInviteWindow(self, event):
alias = PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY
name = getViewName(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, event.ctx.get('prbID'))
self.loadViewWithDefName(alias, name, event.ctx)
|
5606e56d8675469ac42ba379db7dc888ddfdbddc
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/espnet2/utils/test_eer.py
|
8c44c334b4153ac1e343a5018efdae88b75d294b
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
test_eer.py
|
import pytest
from espnet2.utils.eer import ComputeErrorRates, ComputeMinDcf, tuneThresholdfromScore
@pytest.mark.parametrize(
"scores, labels, eer",
[([0.0, 1.0], [0, 1], 0.0), ([0.7, 0.2, 0.9, 0.3], [0, 1, 0, 1], 100.0)],
)
def test_eer_computation(scores, labels, eer):
results = tuneThresholdfromScore(scores, labels, [1, 0.1])
eer_est = results[1]
fnrs, fprs, thresholds = ComputeErrorRates(scores, labels)
p_trg, c_miss, c_fa = 0.05, 1, 1
mindcf, _ = ComputeMinDcf(fnrs, fprs, thresholds, p_trg, c_miss, c_fa)
assert eer_est == eer, (eer_est, eer)
|
8ca81420eb4a07b62d8b3b742e299c1efa39e5d9
|
0ae45f428c4ad4c5a5e4861e68380b451d11132f
|
/trench/backends/aws.py
|
7d8cfaed3f1ae583a5d208353ebd4b01cda96c31
|
[
"MIT"
] |
permissive
|
merixstudio/django-trench
|
09645d38b40dccf3b3db19b1c020a8ea8c35d741
|
b7f0eb4f0031e6013826e59a38ebd7010661cfd9
|
refs/heads/develop
| 2023-06-10T21:21:30.426789
| 2023-03-17T08:18:30
| 2023-03-17T08:18:30
| 151,680,451
| 270
| 53
|
NOASSERTION
| 2023-05-25T02:33:36
| 2018-10-05T06:39:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
aws.py
|
from django.utils.translation import gettext_lazy as _
import logging
import boto3
import botocore.exceptions
from trench.backends.base import AbstractMessageDispatcher
from trench.responses import (
DispatchResponse,
FailedDispatchResponse,
SuccessfulDispatchResponse,
)
from trench.settings import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION
from botocore.exceptions import ClientError, EndpointConnectionError
class AWSMessageDispatcher(AbstractMessageDispatcher):
_SMS_BODY = _("Your verification code is: ")
_SUCCESS_DETAILS = _("SMS message with MFA code has been sent.")
def dispatch_message(self) -> DispatchResponse:
try:
client = boto3.client(
"sns",
aws_access_key_id=self._config.get(AWS_ACCESS_KEY),
aws_secret_access_key=self._config.get(AWS_SECRET_KEY),
region_name=self._config.get(AWS_REGION),
)
client.publish(
PhoneNumber=self._to,
Message=self._SMS_BODY + self.create_code(),
)
return SuccessfulDispatchResponse(details=self._SUCCESS_DETAILS)
except ClientError as cause:
logging.error(cause, exc_info=True)
return FailedDispatchResponse(details=str(cause))
except EndpointConnectionError as cause:
logging.error(cause, exc_info=True)
return FailedDispatchResponse(details=str(cause))
|
7f1ae740797336605ae009af3208d4d6dca930d3
|
432ea480327c3e0ce37d605d1c4ac29a8b653853
|
/src/visions/typesets/typeset.py
|
f42fa302178044f52a7f0c8a7ded1e21d5169dd3
|
[
"BSD-4-Clause",
"BSD-2-Clause"
] |
permissive
|
dylan-profiler/visions
|
3f7f99b06cc8a7b90cb4df988dbbec6c329a8e0a
|
a0b55bbf95e6efe001195e4b497358d6283966b5
|
refs/heads/develop
| 2022-11-27T01:17:01.735418
| 2022-10-30T10:44:37
| 2022-10-30T10:44:37
| 227,633,867
| 188
| 23
|
NOASSERTION
| 2022-10-05T23:06:31
| 2019-12-12T15:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 15,020
|
py
|
typeset.py
|
import warnings
from functools import singledispatch
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import networkx as nx
import pandas as pd
from visions.types.generic import Generic
from visions.types.type import VisionsBaseType
TypeOrTypeset = TypeVar("TypeOrTypeset", Type[VisionsBaseType], "VisionsTypeset")
pathTypes = TypeVar(
"pathTypes", Type[VisionsBaseType], Dict[str, Type[VisionsBaseType]]
)
pdT = TypeVar("pdT", pd.Series, pd.DataFrame)
T = Type[VisionsBaseType]
def build_graph(nodes: Set[Type[VisionsBaseType]]) -> Tuple[nx.DiGraph, nx.DiGraph]:
"""Constructs a traversable relation graph between visions types
Builds a type relation graph from a collection of :class:`visions.types.type.VisionsBaseType` where
each node corresponds to a type and each edge is a relation defined on the type.
Args:
nodes: An Sequence of :class:`visions.types.type.VisionsBaseType`
Returns:
A directed graph of type relations for the provided nodes.
"""
style_map = {True: "dashed", False: "solid"}
relation_graph = nx.DiGraph()
relation_graph.add_nodes_from(nodes)
noninferential_edges = []
for node in nodes:
for relation in node.relations:
if relation.related_type not in nodes:
warnings.warn(
f"Provided relations included mapping from {relation.related_type} to {relation.type} "
f"but {relation.related_type} was not included in the provided list of nodes"
)
else:
relation_graph.add_edge(
relation.related_type,
relation.type,
relationship=relation,
style=style_map[relation.inferential],
)
if not relation.inferential:
noninferential_edges.append((relation.related_type, relation.type))
check_graph_constraints(relation_graph)
base_graph = relation_graph.edge_subgraph(noninferential_edges)
return relation_graph, base_graph
def check_graph_constraints(relation_graph: nx.DiGraph) -> None:
"""Validates a relation_graph is appropriately constructed
Args:
relation_graph: A directed graph representing the set of relations between type nodes.
"""
check_isolates(relation_graph)
check_cycles(relation_graph)
def check_isolates(graph: nx.DiGraph) -> None:
"""Check for orphaned nodes.
Args:
graph: the graph to check
"""
nodes = set(graph.nodes)
root_node = next(nx.topological_sort(graph))
isolates = list(set(nx.isolates(graph)) - {root_node}) # root can be isolate
graph.remove_nodes_from(isolates)
orphaned_nodes = nodes - set(graph.nodes)
if orphaned_nodes:
message = f"{orphaned_nodes} were isolates in the type relation map and consequently orphaned. "
message += "Please add some mapping to the orphaned nodes."
warnings.warn(message)
def check_cycles(graph: nx.DiGraph) -> None:
"""Check for cycles and warn if one is found
Args:
graph: the graph to check
"""
cycles = list(nx.simple_cycles(graph))
if len(cycles) > 0:
warnings.warn(f"Cyclical relations between types {cycles} detected")
def traverse_graph_with_series(
base_type: T,
series: Sequence,
graph: nx.DiGraph,
path: List[T] = None,
state: Optional[dict] = None,
) -> Tuple[Sequence, List[T], dict]:
"""Depth First Search traversal. There should be at most one successor that contains the series.
Args:
base_type: Entry-point for graph to start traversal
series: the Series to check
graph: the Graph to traverse
path: the path so far
state: traversal state
Returns:
The most uniquely specified node matching the series.
"""
if state is None:
state = dict()
if path is None:
path = []
path.append(base_type)
for vision_type in graph.successors(base_type):
relation = graph[base_type][vision_type]["relationship"]
if relation.is_relation(series, state):
series = relation.transform(series, state)
return traverse_graph_with_series(vision_type, series, graph, path, state)
return series, path, state
def traverse_graph_with_sampled_series(
base_type: T,
series: pd.Series,
graph: nx.DiGraph,
sample_size: int = 10,
state: dict = dict(),
) -> Tuple[Sequence, List[T], dict]:
"""Depth First Search traversal with sampling. There should be at most one successor that contains the series.
Args:
base_type: Entry-point for graph to start traversal
series: the Series to check
graph: the Graph to traverse
sample_size: number of items used in heuristic traversal
state: traversal state
Returns:
The most uniquely specified node matching the series.
"""
if (series.shape[0] < 1000) or (sample_size > series.shape[0]):
return traverse_graph_with_series(base_type, series, graph, state=state)
series_sample = series.sample(sample_size)
_, path, _ = traverse_graph_with_series(
base_type, series_sample, graph, state=state
)
if len(path) == 1:
return series, path, state
# Cast the full series
from_type = path[0]
for i, to_type in enumerate(path[1:]):
relation = graph[from_type][to_type]["relationship"]
if not relation.is_relation(series, state):
break
series = relation.transform(series, state)
from_type = to_type
return series, path[0 : (i + 2)], state
@singledispatch
def traverse_graph(
data: Sequence, root_node: T, graph: nx.DiGraph
) -> Tuple[Sequence, Union[List[T], Dict[str, List[T]]], Dict[str, dict]]:
return traverse_graph_with_series(root_node, data, graph)
@singledispatch
def get_type_from_path(
path_data: Union[Sequence[T], Dict[str, Sequence[T]]]
) -> Union[T, Dict[str, T]]:
raise TypeError(f"Can't get types from path object of type {type(path_data)}")
@get_type_from_path.register(list)
@get_type_from_path.register(tuple)
def _get_type_from_path_builtin(path_list: Sequence[T]) -> T:
return path_list[-1]
@get_type_from_path.register(dict)
def _get_type_from_path_dict(path_dict: Dict[str, Sequence[T]]) -> Dict[str, T]:
return {k: v[-1] for k, v in path_dict.items()}
class VisionsTypeset:
"""
A collection of :class:`visions.types.type.VisionsBaseType` with associated relationship map between them.
Attributes:
types: The collection of Visions Types derived from :class:`visions.types.type.VisionsBaseType`
base_graph: The graph of relations composed exclusively of :class:`visions.relations.relations.IdentityRelation`
relation_graph: The full relation graph including both :class:`visions.relations.relations.IdentityRelation`
and :class:`visions.relations.relations.InferenceRelation`
"""
def __init__(self, types: Set[Type[VisionsBaseType]]) -> None:
"""
Args:
types: a set of types
"""
self._root_node: Optional[T] = None
if not isinstance(types, Iterable):
raise ValueError("types should be Sequence")
self.relation_graph, self.base_graph = build_graph(set(types))
if not issubclass(self.root_node, Generic):
raise ValueError("`root_node` should be a subclass of Generic")
self.types = set(self.relation_graph.nodes)
@property
def root_node(self) -> T:
"""Returns a cached copy of the relation_graphs root node
Args:
Returns:
A cached copy of the relation_graphs root node.
"""
if self._root_node is None:
self._root_node = next(nx.topological_sort(self.relation_graph))
return self._root_node
def detect(self, data: Any) -> Tuple[Sequence, Any, dict]:
"""The results found after only considering IdentityRelations.
Notes:
This is an advanced feature, consider using `detect_type` in case the type is what is needed.
Args:
data: a DataFrame or Series to determine types over
Returns:
A tuple of the coerced sequence, visited nodes and state
"""
return traverse_graph(data, self.root_node, self.base_graph)
def detect_type(self, data: Sequence) -> Union[T, Dict[str, T]]:
"""The inferred type found only considering IdentityRelations.
Args:
data: a DataFrame or Series to determine types over
Returns:
A dictionary of {name: type} pairs in the case of DataFrame input or a type
"""
_, paths, _ = self.detect(data)
return get_type_from_path(paths)
def infer(self, data: Sequence) -> Tuple[Sequence, Any, dict]:
"""The results found after considering all relations.
Notes:
This is an advanced feature, consider using `infer_type` in case the type is what is needed.
Args:
data: a DataFrame or Series to determine types over
Returns:
A tuple of the coerced sequence, visited nodes and state
"""
return traverse_graph(data, self.root_node, self.relation_graph)
def infer_type(self, data: Sequence) -> Union[T, Dict[str, T]]:
"""The inferred type found using all type relations.
Args:
data: a DataFrame or Series to determine types over
Returns:
A dictionary of {name: type} pairs in the case of DataFrame input or a type
"""
_, paths, _ = self.infer(data)
return get_type_from_path(paths)
def cast_to_detected(self, data: Sequence) -> Sequence:
"""Transforms input data into a canonical representation using only IdentityRelations
Args:
data: a DataFrame or Series to determine types over
Returns:
new_data: The transformed DataFrame or Series.
"""
data, _, _ = self.detect(data)
return data
def cast_to_inferred(self, data: Sequence) -> Sequence:
"""Transforms input data and returns it's corresponding new type relation using all relations.
Args:
data: a DataFrame or Series to determine types over
Returns:
new_data: The transformed DataFrame or Series.
types: A dictionary of {name: type} pairs in the case of DataFrame input or a type.
"""
data, _, _ = self.infer(data)
return data
def output_graph(
self,
file_name: Union[str, Path],
base_only: bool = False,
dpi: Optional[int] = None,
) -> None:
"""Write the type graph to a file.
Args:
file_name: the file to save the output to
base_only: if True, plot the graph without relation mapping edges
dpi: set the dpi of the output image
"""
from visions.utils.graph import output_graph
if base_only:
graph = self.base_graph.copy()
else:
graph = self.relation_graph.copy()
graph.graph["node"] = {"shape": "box", "color": "red"}
if dpi is not None:
graph.graph["graph"] = {"dpi": dpi}
output_graph(graph, file_name)
def plot_graph(
self,
dpi: int = 800,
base_only: bool = False,
figsize: Optional[Tuple[int, int]] = None,
):
"""
Args:
dpi: dpi of the matplotlib figure.
figsize: figure size
base_only: Only display the typesets base_graph
Returns:
Displays the image
"""
import os
import tempfile
from matplotlib import image as mpimg
from matplotlib import pyplot as plt
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
self.output_graph(temp_file.name, dpi=dpi, base_only=base_only)
img = mpimg.imread(temp_file.name)
plt.figure(dpi=dpi, figsize=figsize)
plt.axis("off")
plt.imshow(img)
os.unlink(temp_file.name)
def _get_other_type(self, other: TypeOrTypeset) -> Set[T]:
"""Converts input into a set of :class:`visions.types.type.VisionsBaseType`
Args:
other: A :class:`visions.types.type.VisionsBaseType` or :class:`visions.typesets.typeset.VisionsTypeset`
Raises:
NotImplementedError:
Returns:
Set[Type[VisionsBaseType]]:
"""
if isinstance(other, VisionsTypeset):
other_types = set(other.types)
elif issubclass(other, VisionsBaseType):
other_types = {other}
else:
raise NotImplementedError(
f"Typeset operation not implemented for type {type(other)}"
)
return other_types
def replace(self, old: T, new: T) -> "VisionsTypeset":
"""Create a new typeset having replace one type with another.
Args:
old: Visions type to replace.
new: Replacement visions type.
Returns
A VisionsTypeset
"""
types = self.types.copy()
types.add(new)
types.remove(old)
return VisionsTypeset(types)
def __add__(self, other: TypeOrTypeset) -> "VisionsTypeset":
"""Adds a type or typeset into the current typeset.
Args:
other: Type or typeset to be added
Returns
A VisionsTypeset
"""
other_types = self._get_other_type(other)
return VisionsTypeset(self.types | other_types)
def __iadd__(self, other: TypeOrTypeset) -> "VisionsTypeset":
"""Adds a type or typeset into the current typeset.
Args:
other: Type or typeset to be added
Returns
A VisionsTypeset
"""
return self.__add__(other)
def __sub__(self, other: TypeOrTypeset) -> "VisionsTypeset":
"""Subtracts a type or typeset from the current typeset.
Args:
other: Type or typeset to be removed
Returns
A VisionsTypeset
"""
other_types = self._get_other_type(other)
return VisionsTypeset(self.types - other_types)
def __isub__(self, other: TypeOrTypeset) -> "VisionsTypeset":
"""Subtracts a type or typeset from the current typeset.
Args:
other: Type or typeset to be removed
Returns
A VisionsTypeset
"""
return self.__sub__(other)
def __repr__(self) -> str:
"""Pretty representation of the typeset.
Returns
A :class:`visions.typesets.typeset.VisionsTypeset`
"""
return self.__class__.__name__
|
6ff8b6a6f28bcc1e77853be40c923520aadc5f95
|
51819802a13fbf4c71ea0f6ee3771b86fcf1834c
|
/srsly/tests/ruamel_yaml/test_fail.py
|
02cef0b11f5ecb59408ecb0eeac299c049de26d3
|
[
"MIT"
] |
permissive
|
explosion/srsly
|
1860eda76b79bce49e46a8edeb39828774d1d900
|
1aa4ae1b690b513092ce1e58257427cddf38e97f
|
refs/heads/master
| 2023-08-23T00:56:04.762619
| 2023-07-24T11:40:07
| 2023-07-24T11:40:07
| 159,904,634
| 383
| 43
|
MIT
| 2023-07-25T12:13:06
| 2018-12-01T03:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,290
|
py
|
test_fail.py
|
# coding: utf-8
# there is some work to do
# provide a failing test xyz and a non-failing xyz_no_fail ( to see
# what the current failing output is.
# on fix of srsly.ruamel_yaml, move the marked test to the appropriate test (without mark)
# and remove remove the xyz_no_fail
import pytest
from .roundtrip import round_trip, dedent, round_trip_load, round_trip_dump
class TestCommentFailures:
@pytest.mark.xfail(strict=True)
def test_set_comment_before_tag(self):
# no comments before tags
round_trip(
"""
# the beginning
!!set
# or this one?
? a
# next one is B (lowercase)
? b # You see? Promised you.
? c
# this is the end
"""
)
def test_set_comment_before_tag_no_fail(self):
# no comments before tags
inp = """
# the beginning
!!set
# or this one?
? a
# next one is B (lowercase)
? b # You see? Promised you.
? c
# this is the end
"""
assert round_trip_dump(round_trip_load(inp)) == dedent(
"""
!!set
# or this one?
? a
# next one is B (lowercase)
? b # You see? Promised you.
? c
# this is the end
"""
)
@pytest.mark.xfail(strict=True)
def test_comment_dash_line(self):
round_trip(
"""
- # abc
a: 1
b: 2
"""
)
def test_comment_dash_line_fail(self):
x = """
- # abc
a: 1
b: 2
"""
data = round_trip_load(x)
# this is not nice
assert round_trip_dump(data) == dedent(
"""
# abc
- a: 1
b: 2
"""
)
class TestIndentFailures:
@pytest.mark.xfail(strict=True)
def test_indent_not_retained(self):
round_trip(
"""
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
also_special:
- a
- 19
- 32
asia and europe: &asia_europe
Turkey: Ankara
Russia: Moscow
countries:
Asia:
<<: *asia_europe
Japan: Tokyo # 東京
Europe:
<<: *asia_europe
Spain: Madrid
Italy: Rome
Antarctica:
- too cold
"""
)
def test_indent_not_retained_no_fail(self):
inp = """
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
also_special:
- a
- 19
- 32
asia and europe: &asia_europe
Turkey: Ankara
Russia: Moscow
countries:
Asia:
<<: *asia_europe
Japan: Tokyo # 東京
Europe:
<<: *asia_europe
Spain: Madrid
Italy: Rome
Antarctica:
- too cold
"""
assert round_trip_dump(round_trip_load(inp), indent=4) == dedent(
"""
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
also_special:
- a
- 19
- 32
asia and europe: &asia_europe
Turkey: Ankara
Russia: Moscow
countries:
Asia:
<<: *asia_europe
Japan: Tokyo # 東京
Europe:
<<: *asia_europe
Spain: Madrid
Italy: Rome
Antarctica:
- too cold
"""
)
def Xtest_indent_top_level_no_fail(self):
inp = """
- a:
- b
"""
round_trip(inp, indent=4)
class TestTagFailures:
@pytest.mark.xfail(strict=True)
def test_standard_short_tag(self):
round_trip(
"""\
!!map
name: Anthon
location: Germany
language: python
"""
)
def test_standard_short_tag_no_fail(self):
inp = """
!!map
name: Anthon
location: Germany
language: python
"""
exp = """
name: Anthon
location: Germany
language: python
"""
assert round_trip_dump(round_trip_load(inp)) == dedent(exp)
class TestFlowValues:
def test_flow_value_with_colon(self):
inp = """\
{a: bcd:efg}
"""
round_trip(inp)
def test_flow_value_with_colon_quoted(self):
inp = """\
{a: 'bcd:efg'}
"""
round_trip(inp, preserve_quotes=True)
class TestMappingKey:
def test_simple_mapping_key(self):
inp = """\
{a: 1, b: 2}: hello world
"""
round_trip(inp, preserve_quotes=True, dump_data=False)
def test_set_simple_mapping_key(self):
from srsly.ruamel_yaml.comments import CommentedKeyMap
d = {CommentedKeyMap([("a", 1), ("b", 2)]): "hello world"}
exp = dedent(
"""\
{a: 1, b: 2}: hello world
"""
)
assert round_trip_dump(d) == exp
def test_change_key_simple_mapping_key(self):
from srsly.ruamel_yaml.comments import CommentedKeyMap
inp = """\
{a: 1, b: 2}: hello world
"""
d = round_trip_load(inp, preserve_quotes=True)
d[CommentedKeyMap([("b", 1), ("a", 2)])] = d.pop(
CommentedKeyMap([("a", 1), ("b", 2)])
)
exp = dedent(
"""\
{b: 1, a: 2}: hello world
"""
)
assert round_trip_dump(d) == exp
def test_change_value_simple_mapping_key(self):
from srsly.ruamel_yaml.comments import CommentedKeyMap
inp = """\
{a: 1, b: 2}: hello world
"""
d = round_trip_load(inp, preserve_quotes=True)
d = {CommentedKeyMap([("a", 1), ("b", 2)]): "goodbye"}
exp = dedent(
"""\
{a: 1, b: 2}: goodbye
"""
)
assert round_trip_dump(d) == exp
|
0835da7cedca8f7ab911ceca733269288141afa2
|
bed34365a9dab825fd9f4a4ff1b0863f441266ac
|
/neutron/api/v2/base.py
|
4dffbed7c43d3434f2cb63405480932939645e29
|
[
"Apache-2.0"
] |
permissive
|
openstack/neutron
|
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
|
dde31aae392b80341f6440eb38db1583563d7d1f
|
refs/heads/master
| 2023-08-31T13:09:41.831598
| 2023-08-31T11:37:30
| 2023-08-31T11:37:30
| 2,400,289
| 1,174
| 1,325
|
Apache-2.0
| 2022-06-29T08:00:05
| 2011-09-16T16:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 38,420
|
py
|
base.py
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from neutron_lib.api import attributes
from neutron_lib.api import faults
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from neutron_lib import rpc as n_rpc
from neutron_lib.services import constants as service_const
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import webob.exc
from neutron._i18n import _
from neutron.api import api_common
from neutron.api.v2 import resource as wsgi_resource
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
@property
def plugin(self):
return self._plugin
@property
def resource(self):
return self._resource
@property
def attr_info(self):
return self._attr_info
@property
def member_actions(self):
return self._member_actions
@property
def allow_pagination(self):
return self._allow_pagination
@property
def allow_sorting(self):
return self._allow_sorting
def _init_policy_attrs(self):
"""Create the list of attributes required by policy.
If the attribute map contains a tenant_id policy, then include
project_id to bring the resource into the brave new world.
:return: sorted list of attributes required by policy
"""
policy_attrs = {name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')}
if 'tenant_id' in policy_attrs:
policy_attrs.add('project_id')
# Could use list(), but sorted() makes testing easier.
return sorted(policy_attrs)
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._filter_validation = self._is_filter_validation_supported()
self._policy_attrs = self._init_policy_attrs()
self._notifier = n_rpc.get_notifier('network')
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info("Allow sorting is enabled because native "
"pagination requires native sorting")
self._allow_sorting = True
self.parent = parent
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.items():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
return api_common.is_native_pagination_supported(self._plugin)
def _is_native_sorting_supported(self):
return api_common.is_native_sorting_supported(self._plugin)
def _is_filter_validation_supported(self):
return api_common.is_filter_validation_supported(self._plugin)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
# TODO(amotoki): At now, all attribute maps have tenant_id and
# determine excluded attributes based on tenant_id.
# We need to migrate tenant_id to project_id later
# as attr_info is referred to in various places and we need
# to check all logis carefully.
if attr_name == 'project_id':
continue
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW],
attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
# TODO(amotoki): As mentioned in the above TODO,
# we treat project_id and tenant_id equivalently.
# This should be migrated to project_id in Ocata.
if attr_name == 'tenant_id':
attributes_to_exclude.append('project_id')
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(data, fields_to_strip)
def _filter_attributes(self, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in data.items()
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
id,
do_authz=True,
field_list=None,
parent_id=parent_id)
except (oslo_policy.PolicyNotAuthorized,
oslo_policy.InvalidScope):
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
ret_value = getattr(self._plugin, name)(*arg_list, **kwargs)
# It is simply impossible to predict whether one of this
# actions alters resource usage. For instance a tenant port
# is created when a router interface is added. Therefore it is
# important to mark as dirty resources whose counters have
# been altered by this operation
resource_registry.set_resources_dirty(request.context)
return ret_value
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(
request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'],
is_filter_validation_supported=self._filter_validation)
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
tmp_list = []
for obj in obj_list:
self._set_parent_id_into_ext_resources_request(
request, obj, parent_id, is_get=True)
if policy.check(
request.context, self._plugin_handlers[self.SHOW],
obj, plugin=self._plugin, pluralized=self._collection):
tmp_list.append(obj)
obj_list = tmp_list
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
obj, fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
# Synchronize usage trackers, if needed
resource_registry.resync_resource(
request.context, self._resource, request.context.tenant_id)
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
self._set_parent_id_into_ext_resources_request(
request, obj, parent_id, is_get=True)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
@db_api.retry_db_errors
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
@db_api.retry_db_errors
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except (oslo_policy.PolicyNotAuthorized, oslo_policy.InvalidScope):
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception("Unable to undo add for "
"%(resource)s %(id)s",
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
return self._create(request, body, **kwargs)
@db_api.retry_db_errors
def _create(self, request, body, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
try:
body = Controller.prepare_request_body(
request.context, body, True, self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
except Exception as e:
LOG.warning("An exception happened while processing the request "
"body. The exception message is [%s].", e)
raise e
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
else:
items = [body]
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
# For ext resources policy check, we support two types, such as
# parent_id is in request body, another type is parent_id is in
# request url, which we can get from kwargs.
self._set_parent_id_into_ext_resources_request(
request, item[self._resource], parent_id)
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
if reservation:
reservations.append(reservation)
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
registry.publish(self._resource, events.BEFORE_RESPONSE, self,
payload=events.APIEventPayload(
request.context, notifier_method, action,
request_body=body,
states=({}, create_result,),
collection_name=self._collection))
return create_result
def do_create(body, bulk=False, emulated=False):
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if bulk and not emulated:
obj_creator = getattr(self._plugin, "%s_bulk" % action)
else:
obj_creator = getattr(self._plugin, action)
try:
if emulated:
return self._emulate_bulk_create(obj_creator, request,
body, parent_id)
else:
if self._collection in body:
# This is weird but fixing it requires changes to the
# plugin interface
kwargs.update({self._collection: body})
else:
kwargs.update({self._resource: body})
return obj_creator(request.context, **kwargs)
except Exception:
# In case of failure the plugin will always raise an
# exception. Cancel the reservation
with excutils.save_and_reraise_exception():
for reservation in reservations:
quota.QUOTAS.cancel_reservation(
request.context, reservation.reservation_id)
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
objs = do_create(body, bulk=True)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection:
[self._filter_attributes(
obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
if self._collection in body:
# Emulate atomic bulk behavior
objs = do_create(body, bulk=True, emulated=True)
return notify({self._collection: objs})
else:
obj = do_create(body)
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
if request.body:
msg = _('Request body is not supported in DELETE.')
raise webob.exc.HTTPBadRequest(msg)
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
return self._delete(request, id, **kwargs)
@db_api.retry_db_errors
def _delete(self, request, id, **kwargs):
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except (oslo_policy.PolicyNotAuthorized, oslo_policy.InvalidScope):
# To avoid giving away information, pretend that it
# doesn't exist if policy does not authorize SHOW
with excutils.save_and_reraise_exception() as ctxt:
if not policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
pluralized=self._collection):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
# A delete operation usually alters resource usage, so mark affected
# usage trackers as dirty
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.delete.end'
result = {self._resource: self._view(request.context, obj)}
notifier_payload = {self._resource + '_id': id}
notifier_payload.update(result)
self._notifier.info(request.context,
notifier_method,
notifier_payload)
registry.publish(self._resource, events.BEFORE_RESPONSE, self,
payload=events.APIEventPayload(
request.context, notifier_method, action,
states=({}, obj, result,),
collection_name=self._collection))
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
return self._update(request, id, body, **kwargs)
@db_api.retry_db_errors
def _update(self, request, id, body, **kwargs):
try:
body = Controller.prepare_request_body(
request.context, body, False, self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
except Exception as e:
LOG.warning("An exception happened while processing the request "
"body. The exception message is [%s].", e)
raise e
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.items()
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
parent_id = kwargs.get(self._parent_id_name)
# If the parent_id exist, we should get orig_obj with
# self._parent_id_name field.
if parent_id and self._parent_id_name not in field_list:
field_list.append(self._parent_id_name)
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[constants.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
# Then get the ext_parent_id, format to ext_parent_parent_resource_id
if self._parent_id_name in orig_obj:
self._set_parent_id_into_ext_resources_request(
request, orig_obj, parent_id)
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except (oslo_policy.PolicyNotAuthorized, oslo_policy.InvalidScope):
# To avoid giving away information, pretend that it
# doesn't exist if policy does not authorize SHOW
with excutils.save_and_reraise_exception() as ctxt:
if not policy.check(request.context,
self._plugin_handlers[self.SHOW],
orig_obj,
pluralized=self._collection):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
if self._native_bulk and hasattr(self._plugin, "%s_bulk" % action):
obj_updater = getattr(self._plugin, "%s_bulk" % action)
else:
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
# Usually an update operation does not alter resource usage, but as
# there might be side effects it might be worth checking for changes
# in resource usage here as well (e.g: a tenant port is created when a
# router interface is added)
resource_registry.set_resources_dirty(request.context)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
registry.publish(self._resource, events.BEFORE_RESPONSE, self,
payload=events.APIEventPayload(
request.context, notifier_method, action,
request_body=body,
states=(orig_object_copy, result,),
collection_name=self._collection))
return result
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
try:
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
try:
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item
else {resource: item}, is_create, resource,
attr_info, allow_bulk) for item in body[collection]
]
return {collection: bulk_body}
except Exception as e:
LOG.warning(
"An exception happened while processing the request "
"body. The exception message is [%s].", e)
raise e
res_dict = body.get(resource)
except (AttributeError, TypeError):
msg = _("Body contains invalid data")
raise webob.exc.HTTPBadRequest(msg)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
if not isinstance(res_dict, dict):
msg = _("Object '%s' contains invalid data") % resource
raise webob.exc.HTTPBadRequest(msg)
attr_ops = attributes.AttributeInfo(attr_info)
attr_ops.populate_project_id(context, res_dict, is_create)
attributes.populate_project_info(attr_info)
attr_ops.verify_attributes(res_dict)
if is_create: # POST
attr_ops.fill_post_defaults(
res_dict, exc_cls=webob.exc.HTTPBadRequest)
else: # PUT
for attr, attr_vals in attr_info.items():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
attr_ops.convert_values(res_dict, exc_cls=webob.exc.HTTPBadRequest)
return body
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
# NOTE(kevinbenton): we raise a 404 to hide the existence of the
# network from the tenant since they don't have access to it.
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _set_parent_id_into_ext_resources_request(
self, request, resource_item, parent_id, is_get=False):
if not parent_id:
return
# This will pass most create/update/delete cases
if not is_get and (request.context.is_admin or
request.context.is_advsvc or
self.parent['member_name'] not in
service_const.EXT_PARENT_RESOURCE_MAPPING or
resource_item.get(self._parent_id_name)):
return
# Then we arrive here, that means the request or get obj contains
# ext_parent. If this func is called by list/get, and it contains
# _parent_id_name. We need to re-add the ex_parent prefix to policy.
if is_get:
if (not request.context.is_admin or
not request.context.is_advsvc and
self.parent['member_name'] in
service_const.EXT_PARENT_RESOURCE_MAPPING):
resource_item.setdefault(
"%s_%s" % (constants.EXT_PARENT_PREFIX,
self._parent_id_name),
parent_id)
# If this func is called by create/update/delete, we just add.
else:
resource_item.setdefault(
"%s_%s" % (constants.EXT_PARENT_PREFIX, self._parent_id_name),
parent_id)
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, faults.FAULT_MAP)
|
3fe267d37c2a2bdde093c6098cd591609956cbe4
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Huawei/VRP3/get_config.py
|
0535150e9cb761312a0874ad37bff71b275ed4dc
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
get_config.py
|
# ---------------------------------------------------------------------
# Huawei.VRP3.get_config
# sergey.sadovnikov@gmail.com
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetconfig import IGetConfig
class Script(BaseScript):
name = "Huawei.VRP3.get_config"
interface = IGetConfig
def execute_cli(self, **kwargs):
self.cli("no monitor")
with self.configure():
try:
config = self.cli("show running-config")
except self.CLISyntaxError:
# MA5600 V100R011(MA5605) Version
raise self.NotSupportedError()
config = self.strip_first_lines(config, 3)
return self.cleaned_config(config)
|
f948c90a2ad774777e348ade41d56aa7d9b65453
|
4fb35218d67890a6e011b149be391dfd78d931b7
|
/self_instruct/src/data_processing/generate_char_image_prompts.py
|
231055635444f2de8e0d463ae5866c79a9d8cc3b
|
[
"Apache-2.0"
] |
permissive
|
IlyaGusev/rulm
|
8d76dc9f1b230e314412673b3b3ed7312b1de3db
|
ede656a2fa6cd68af8ae3dfecde300758a9bae6f
|
refs/heads/master
| 2023-07-28T10:31:47.336766
| 2023-07-26T19:03:23
| 2023-07-26T19:03:23
| 148,945,566
| 204
| 23
|
Apache-2.0
| 2023-08-26T13:07:05
| 2018-09-15T22:45:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,852
|
py
|
generate_char_image_prompts.py
|
import json
import os
import shutil
import fire
from jinja2 import Template
from tqdm import tqdm
from src.util.io import read_jsonl, write_jsonl
from src.util.openai import openai_batch_completion, OpenAIDecodingArguments
def encode_prompt(char, template_path):
with open(template_path) as f:
template = Template(f.read())
return template.render(char_context=char["context"]).strip() + "\n"
def get_char_key(char):
return (char["name"].strip(), char["context"].strip())
def process_batch(batch, model_name, template_path):
prompts = [[
{"role": "user", "content": encode_prompt(r, template_path)}
] for r in batch]
results = openai_batch_completion(
batch=prompts,
model_name=model_name,
decoding_args=OpenAIDecodingArguments(
max_tokens=3076
)
)
final_prompts = dict()
for char, prompt, result in zip(batch, prompts, results):
result = result.message["content"]
final_prompts[get_char_key(char)] = result
print(prompt[-1]["content"])
print(result)
print("=============")
print()
return final_prompts
def main(
chars_path,
output_path,
template_path,
model_name="gpt-4",
request_batch_size=5
):
existing_keys = set()
output_records = []
if os.path.exists(output_path):
with open(output_path) as f:
output_records = [json.loads(line) for line in f]
existing_keys = {get_char_key(r) for r in output_records}
print(f"Existing keys: {len(existing_keys)}")
chars = read_jsonl(chars_path)
key2idx = {get_char_key(char): idx for idx, char in enumerate(chars)}
batch = []
output_chars = []
for char in tqdm(chars):
char.pop("most_similar_chars", None)
char.pop("avg_similarity_score", None)
key = get_char_key(char)
if key in existing_keys:
print(f"Skipping {key}")
output_chars.append(char)
continue
batch.append(char)
if len(batch) != request_batch_size:
continue
prompts = process_batch(batch, model_name, template_path)
for key, prompt in prompts.items():
chars[key2idx[key]]["image_prompt"] = prompt
output_chars.append(chars[key2idx[key]])
batch = []
write_jsonl(output_chars, output_path + "_tmp")
shutil.move(output_path + "_tmp", output_path)
if batch:
prompts = process_batch(batch, model_name, template_path)
for key, prompt in prompts.items():
chars[key2idx[key]]["image_prompt"] = prompt
output_chars.append(chars[key2idx[key]])
write_jsonl(output_chars, output_path + "_tmp")
shutil.move(output_path + "_tmp", output_path)
if __name__ == "__main__":
fire.Fire(main)
|
bcf2c78d58dbedbcc7a2d9e792e0ec6c93e7ca0d
|
09557f76830b73d95ac1c7175833bc74280db53b
|
/dmb/utils/dist_utils.py
|
249c42feb428e5d54f3d41b2a98e251a8499deb8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DeepMotionAIResearch/DenseMatchingBenchmark
|
e32c6c0846dcb4103289d539be28ef7382b70c8e
|
010aeb66e3ceaf3d866036b0ca751861df39432d
|
refs/heads/master
| 2021-11-11T18:56:05.160934
| 2021-11-08T10:28:47
| 2021-11-08T10:28:47
| 222,069,511
| 183
| 39
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,042
|
py
|
dist_utils.py
|
from collections import OrderedDict
import torch.distributed as dist
from torch._utils import (
_flatten_dense_tensors, _unflatten_dense_tensors, _take_tensors
)
from mmcv.runner import OptimizerHook
try:
from apex import amp
import apex
except ImportError:
raise ImportError('Use APEX for multi-precision via apex.amp')
def _all_reduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def all_reduce_grads(model, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in model.parameters()
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_all_reduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
super(DistOptimizerHook, self).__init__(grad_clip)
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
all_reduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
class DistApexOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_apex=True):
super(DistApexOptimizerHook, self).__init__(grad_clip)
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.use_apex = use_apex
def after_train_iter(self, runner):
runner.model.zero_grad()
runner.optimizer.zero_grad()
# Note: If mixed precision is not used, this ends up doing nothing
# Otherwise apply loss scaling for mixed-precision recipe
with amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_losses:
scaled_losses.backward()
all_reduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
|
fb2b0f739ab1ad22597faf3a1c5d59a3764404d0
|
95b4a15808b9c412c8364db80fd619a65dd587e0
|
/src/compas/geometry/_core/predicates_3.py
|
55560cdf7a9f675f965a06b1f27ce75bb64b8d32
|
[
"MIT"
] |
permissive
|
compas-dev/compas
|
11d5c4d9afd554833297b4a5dbe6a975e6940ce3
|
486e2e9332553240bcbd80e100d26bff58071709
|
refs/heads/main
| 2023-08-31T15:49:32.430570
| 2023-08-17T10:19:52
| 2023-08-17T10:19:52
| 104,857,648
| 286
| 116
|
MIT
| 2023-09-12T13:53:36
| 2017-09-26T08:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 17,563
|
py
|
predicates_3.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import fabs
from compas.utilities import window
from compas.geometry import subtract_vectors
from compas.geometry import cross_vectors
from compas.geometry import dot_vectors
from compas.geometry import normalize_vector
from compas.geometry import centroid_points
from compas.geometry import normal_polygon
from compas.geometry import length_vector_sqrd
from compas.geometry import distance_point_point
from compas.geometry import distance_point_plane
from compas.geometry import distance_point_line
from compas.geometry import closest_point_on_segment
from compas.geometry import area_triangle
def is_colinear(a, b, c, tol=1e-6):
"""Determine if three points are colinear.
Parameters
----------
a : [float, float, float] | :class:`~compas.geometry.Point`
Point 1.
b : [float, float, float] | :class:`~compas.geometry.Point`
Point 2.
c : [float, float, float] | :class:`~compas.geometry.Point`
Point 3.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the points are colinear.
False otherwise.
"""
return area_triangle([a, b, c]) < tol
def is_colinear_line_line(line1, line2, tol=1e-6):
"""Determine if two lines are colinear.
Parameters
----------
line1 : [point, point] | :class:`~compas.geometry.Line`
Line 1.
line2 : [point, point] | :class:`~compas.geometry.Line`
Line 2.
tol : float, optional
A tolerance for colinearity verification.
Returns
-------
bool
True if the lines are colinear.
False otherwise.
"""
a, b = line1
c, d = line2
return is_colinear(a, b, c, tol) and is_colinear(a, b, d, tol)
def is_parallel_line_line(line1, line2, tol=1e-6):
"""Determine if two lines are parallel.
Parameters
----------
line1 : [point, point] | :class:`~compas.geometry.Line`
Line 1.
line2 : [point, point] | :class:`~compas.geometry.Line`
Line 2.
tol : float, optional
A tolerance for colinearity verification.
Returns
-------
bool
True if the lines are colinear.
False otherwise.
"""
a, b = line1
c, d = line2
e1 = normalize_vector(subtract_vectors(b, a))
e2 = normalize_vector(subtract_vectors(d, c))
return abs(dot_vectors(e1, e2)) > 1.0 - tol
def is_coplanar(points, tol=0.01):
"""Determine if the points are coplanar.
Parameters
----------
points : sequence[point]
A sequence of point locations.
tol : float, optional
A tolerance for planarity validation.
Returns
-------
bool
True if the points are coplanar.
False otherwise.
Notes
-----
Compute the normal vector (cross product) of the vectors formed by the first
three points. Include one more vector at a time to compute a new normal and
compare with the original normal. If their cross product is not zero, they
are not parallel, which means the point are not in the same plane.
Four points are coplanar if the volume of the tetrahedron defined by them is
0. Coplanarity is equivalent to the statement that the pair of lines
determined by the four points are not skew, and can be equivalently stated
in vector form as (x2 - x0).[(x1 - x0) x (x3 - x2)] = 0.
"""
if len(points) < 4:
return True
tol2 = tol**2
if len(points) == 4:
v01 = subtract_vectors(points[1], points[0])
v02 = subtract_vectors(points[2], points[0])
v23 = subtract_vectors(points[3], points[2])
res = dot_vectors(v02, cross_vectors(v01, v23))
return res**2 < tol2
a, b, c = points[:3]
ab = subtract_vectors(b, a)
n0 = cross_vectors(ab, subtract_vectors(c, a))
points = points[3:]
for c in points:
n1 = cross_vectors(ab, subtract_vectors(c, a))
if length_vector_sqrd(cross_vectors(n0, n1)) > tol:
return False
return True
def is_polygon_convex(polygon):
"""Determine if a polygon is convex.
Parameters
----------
polygon : sequence[point] | :class:`~compas.geometry.Polygon`
A polygon.
Returns
-------
bool
True if the polygon is convex.
False otherwise.
Notes
-----
Use this function for *spatial* polygons.
If the polygon is in a horizontal plane, use :func:`is_polygon_convex_xy` instead.
Examples
--------
>>> polygon = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.4, 0.4, 0.0], [0.0, 1.0, 0.0]]
>>> is_polygon_convex(polygon)
False
"""
a = polygon[0]
o = polygon[1]
b = polygon[2]
oa = subtract_vectors(a, o)
ob = subtract_vectors(b, o)
n0 = cross_vectors(oa, ob)
for a, o, b in window(polygon + polygon[:2], 3):
oa = subtract_vectors(a, o)
ob = subtract_vectors(b, o)
n = cross_vectors(oa, ob)
if dot_vectors(n, n0) >= 0:
continue
else:
return False
return True
def is_point_on_plane(point, plane, tol=1e-6):
"""Determine if a point lies on a plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
plane : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is in on the plane.
False otherwise.
"""
return distance_point_plane(point, plane) <= tol
def is_point_infront_plane(point, plane, tol=1e-6):
"""Determine if a point lies in front of a plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
plane : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is in front of the plane.
False otherwise.
"""
return dot_vectors(subtract_vectors(point, plane[0]), plane[1]) > tol
is_point_in_halfspace = is_point_infront_plane
def is_point_behind_plane(point, plane, tol=1e-6):
"""Determine if a point lies behind a plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
plane : [point, normal] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is in front of the plane.
False otherwise.
"""
return dot_vectors(subtract_vectors(point, plane[0]), plane[1]) < -tol
def is_point_on_line(point, line, tol=1e-6):
"""Determine if a point lies on a line.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
line : [point, point] | :class:`~compas.geometry.Line`
A line.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is in on the line.
False otherwise.
"""
return distance_point_line(point, line) <= tol
def is_point_on_segment(point, segment, tol=1e-6):
"""Determine if a point lies on a given line segment.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
segment : [point, point] | :class:`~compas.geometry.Line`
A line segment.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is on the line segment.
False otherwise.
"""
a, b = segment
d_ab = distance_point_point(a, b)
if d_ab == 0:
return False
if not is_point_on_line(point, (a, b), tol=tol):
return False
d_pa = distance_point_point(a, point)
d_pb = distance_point_point(b, point)
if d_pa + d_pb <= d_ab + tol:
return True
return False
def is_point_on_polyline(point, polyline, tol=1e-6):
"""Determine if a point is on a polyline.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
polyline : sequence[point] | :class:`~compas.geometry.Polyline`
A polyline.
tol : float, optional
The tolerance for membership verification.
Returns
-------
bool
True if the point is on the polyline.
False otherwise.
"""
for i in range(len(polyline) - 1):
a = polyline[i]
b = polyline[i + 1]
c = closest_point_on_segment(point, (a, b))
if distance_point_point(point, c) <= tol:
return True
return False
def is_point_in_triangle(point, triangle):
"""Determine if a point is in the interior of a triangle.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
triangle : [point, point, point]
A triangle.
Returns
-------
bool
True if the point is in inside the triangle.
False otherwise.
See Also
--------
compas.geometry.is_point_in_triangle_xy
Notes
-----
Should the point be on the same plane as the triangle?
"""
def is_on_same_side(p1, p2, segment):
a, b = segment
v = subtract_vectors(b, a)
c1 = cross_vectors(v, subtract_vectors(p1, a))
c2 = cross_vectors(v, subtract_vectors(p2, a))
if dot_vectors(c1, c2) >= 0:
return True
return False
a, b, c = triangle
if is_on_same_side(point, a, (b, c)) and is_on_same_side(point, b, (a, c)) and is_on_same_side(point, c, (a, b)):
return True
return False
def is_point_in_circle(point, circle):
"""Determine if a point lies in a circle.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
A point.
circle : [plane, float] | :class:`~compas.geometry.Circle`
A circle.
Returns
-------
bool
True if the point lies in the circle.
False otherwise.
"""
plane, radius = circle
if is_point_on_plane(point, plane):
return distance_point_point(point, plane[0]) <= radius
return False
def is_intersection_line_line(l1, l2, tol=1e-6):
"""Verifies if two lines intersect.
Parameters
----------
l1 : [point, point] | :class:`~compas.geometry.Line`
A line.
l2 : [point, point] | :class:`~compas.geometry.Line`
A line.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if the lines intersect in one point.
False if the lines are skew, parallel or lie on top of each other.
"""
a, b = l1
c, d = l2
e1 = normalize_vector(subtract_vectors(b, a))
e2 = normalize_vector(subtract_vectors(d, c))
# check for parallel lines
if abs(dot_vectors(e1, e2)) > 1.0 - tol:
return False
# check for intersection
if abs(dot_vectors(cross_vectors(e1, e2), subtract_vectors(c, a))) < tol:
return True
return False
def is_intersection_segment_segment(s1, s2, tol=1e-6):
"""Verifies if two segments intersect.
Parameters
----------
s1 : [point, point] | :class:`~compas.geometry.Line`
A line segment.
s2 : [point, point] | :class:`~compas.geometry.Line`
A line segment.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if the segments intersect in one point.
False if the segments are skew, parallel or lie on top of each other.
"""
raise NotImplementedError
def is_intersection_line_triangle(line, triangle, tol=1e-6):
"""Verifies if a line (ray) intersects with a triangle.
Parameters
----------
line : [point, point] | :class:`~compas.geometry.Line`
A line.
triangle : [point, point, point]
A triangle.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if the line (ray) intersects with the triangle.
False otherwise.
Notes
-----
Based on the Moeller Trumbore intersection algorithm.
The line is treated as continues, directed ray and not as line segment with a start and end point
Examples
--------
>>>
"""
a, b, c = triangle
# direction vector and base point of line
v1 = subtract_vectors(line[1], line[0])
p1 = line[0]
# Find vectors for two edges sharing triangle vertex 1
e1 = subtract_vectors(b, a)
e2 = subtract_vectors(c, a)
# Begin calculating determinant - also used to calculate u parameter
p = cross_vectors(v1, e2)
# if determinant is near zero, ray lies in plane of triangle
det = dot_vectors(e1, p)
# NOT CULLING
if det > -tol and det < tol:
return False
inv_det = 1.0 / det
# calculate distance from V1 to ray origin
t = subtract_vectors(p1, a)
# Calculate u parameter and make_blocks bound
u = dot_vectors(t, p) * inv_det
# The intersection lies outside of the triangle
if u < 0.0 or u > 1.0:
return False
# Prepare to make_blocks v parameter
q = cross_vectors(t, e1)
# Calculate V parameter and make_blocks bound
v = dot_vectors(v1, q) * inv_det
# The intersection lies outside of the triangle
if v < 0.0 or u + v > 1.0:
return False
t = dot_vectors(e2, q) * inv_det
if t > tol:
return True
# No hit
return False
def is_intersection_line_plane(line, plane, tol=1e-6):
"""Determine if a line (ray) intersects with a plane.
Parameters
----------
line : [point, point] | :class:`~compas.geometry.Line`
A line.
plane : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if the line intersects with the plane.
False otherwise.
"""
pt1 = line[0]
pt2 = line[1]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > tol:
return True
return False
def is_intersection_segment_plane(segment, plane, tol=1e-6):
"""Determine if a line segment intersects with a plane.
Parameters
----------
segment : [point, point] | :class:`~compas.geometry.Line`
A line segment.
plane : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if the segment intersects with the plane.
False otherwise.
"""
pt1 = segment[0]
pt2 = segment[1]
p_cent = plane[0]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > tol:
v2 = subtract_vectors(pt1, p_cent)
fac = -dot_vectors(p_norm, v2) / dot
if fac > 0.0 and fac < 1.0:
return True
return False
else:
return False
def is_intersection_plane_plane(plane1, plane2, tol=1e-6):
"""Verifies if two planes intersect.
Parameters
----------
plane1 : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
plane2 : [point, vector] | :class:`~compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Returns
-------
bool
True if plane1 intersects with plane2.
False otherwise.
"""
# check for parallelity of planes
if abs(dot_vectors(plane1[1], plane2[1])) > 1 - tol:
return False
return True
def is_point_in_box(point, box):
"""Determine if the point lies inside the given box.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
The test point.
box : sequence[point] | :class:`~compas.geometry.Box`.
The box defined by 8 points with the first 4 points defining the bottom face,
and the last 4 points defining the top face.
Returns
-------
bool
True, if the point lies inside the box.
False, otherwise.
"""
raise NotImplementedError
def is_point_in_polyhedron(point, polyhedron):
"""Determine if the point lies inside the given polyhedron.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
The test point.
polyhedron : [sequence[point], sequence[sequence[int]]] | :class:`~compas.geometry.Polyhedron`.
The polyhedron defined by a sequence of points
and a sequence of faces, with each face defined as a sequence of indices into the sequence of points.
Returns
-------
bool
True, if the point lies in the polyhedron.
False, otherwise.
"""
vertices, faces = polyhedron
polygons = [[vertices[index] for index in face] for face in faces]
planes = [[centroid_points(polygon), normal_polygon(polygon)] for polygon in polygons]
return all(is_point_behind_plane(point, plane) for plane in planes)
|
0b92fdcbde539dba413e965eb06dc0debf7c1664
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/slicer/registration/specialized.py
|
e03f1a95b1686f5c4ba2800d7d0bcbf55494ccf9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 25,701
|
py
|
specialized.py
|
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
SEMLikeCommandLine,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
import os
class ACPCTransformInputSpec(CommandLineInputSpec):
acpc = InputMultiPath(
traits.List(traits.Float(), minlen=3, maxlen=3),
desc="ACPC line, two fiducial points, one at the anterior commissure and one at the posterior commissure.",
argstr="--acpc %s...",
)
midline = InputMultiPath(
traits.List(traits.Float(), minlen=3, maxlen=3),
desc="The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane).",
argstr="--midline %s...",
)
outputTransform = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="A transform filled in from the ACPC and Midline registration calculation",
argstr="--outputTransform %s",
)
debugSwitch = traits.Bool(
desc="Click if wish to see debugging output", argstr="--debugSwitch "
)
class ACPCTransformOutputSpec(TraitedSpec):
outputTransform = File(
desc="A transform filled in from the ACPC and Midline registration calculation",
exists=True,
)
class ACPCTransform(SEMLikeCommandLine):
"""title: ACPC Transform
category: Registration.Specialized
description: <p>Calculate a transformation from two lists of fiducial points.</p><p>ACPC line is two fiducial points, one at the anterior commissure and one at the posterior commissure. The resulting transform will bring the line connecting them to horizontal to the AP axis.</p><p>The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane). The resulting transform will put the output volume with the mid sagittal plane lined up with the AS plane.</p><p>Use the Filtering module<b>Resample Scalar/Vector/DWI Volume</b>to apply the transformation to a volume.</p>
version: 1.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ACPCTransform
license: slicer3
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ACPCTransformInputSpec
output_spec = ACPCTransformOutputSpec
_cmd = "ACPCTransform "
_outputs_filenames = {"outputTransform": "outputTransform.mat"}
class FiducialRegistrationInputSpec(CommandLineInputSpec):
fixedLandmarks = InputMultiPath(
traits.List(traits.Float(), minlen=3, maxlen=3),
desc="Ordered list of landmarks in the fixed image",
argstr="--fixedLandmarks %s...",
)
movingLandmarks = InputMultiPath(
traits.List(traits.Float(), minlen=3, maxlen=3),
desc="Ordered list of landmarks in the moving image",
argstr="--movingLandmarks %s...",
)
saveTransform = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Save the transform that results from registration",
argstr="--saveTransform %s",
)
transformType = traits.Enum(
"Translation",
"Rigid",
"Similarity",
desc="Type of transform to produce",
argstr="--transformType %s",
)
rms = traits.Float(desc="Display RMS Error.", argstr="--rms %f")
outputMessage = traits.Str(
desc="Provides more information on the output", argstr="--outputMessage %s"
)
class FiducialRegistrationOutputSpec(TraitedSpec):
saveTransform = File(
desc="Save the transform that results from registration", exists=True
)
class FiducialRegistration(SEMLikeCommandLine):
"""title: Fiducial Registration
category: Registration.Specialized
description: Computes a rigid, similarity or affine transform from a matched list of fiducials
version: 0.1.0.$Revision$
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/TransformFromFiducials
contributor: Casey B Goodlett (Kitware), Dominik Meier (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = FiducialRegistrationInputSpec
output_spec = FiducialRegistrationOutputSpec
_cmd = "FiducialRegistration "
_outputs_filenames = {"saveTransform": "saveTransform.txt"}
class VBRAINSDemonWarpInputSpec(CommandLineInputSpec):
movingVolume = InputMultiPath(
File(exists=True),
desc="Required: input moving image",
argstr="--movingVolume %s...",
)
fixedVolume = InputMultiPath(
File(exists=True),
desc="Required: input fixed (target) image",
argstr="--fixedVolume %s...",
)
inputPixelType = traits.Enum(
"float",
"short",
"ushort",
"int",
"uchar",
desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar",
argstr="--inputPixelType %s",
)
outputVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).",
argstr="--outputVolume %s",
)
outputDisplacementFieldVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Output deformation field vector image (will have the same physical space as the fixedVolume).",
argstr="--outputDisplacementFieldVolume %s",
)
outputPixelType = traits.Enum(
"float",
"short",
"ushort",
"int",
"uchar",
desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar",
argstr="--outputPixelType %s",
)
interpolationMode = traits.Enum(
"NearestNeighbor",
"Linear",
"ResampleInPlace",
"BSpline",
"WindowedSinc",
"Hamming",
"Cosine",
"Welch",
"Lanczos",
"Blackman",
desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc",
argstr="--interpolationMode %s",
)
registrationFilterType = traits.Enum(
"Demons",
"FastSymmetricForces",
"Diffeomorphic",
"LogDemons",
"SymmetricLogDemons",
desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons",
argstr="--registrationFilterType %s",
)
smoothDisplacementFieldSigma = traits.Float(
desc="A gaussian smoothing value to be applied to the deformation field at each iteration.",
argstr="--smoothDisplacementFieldSigma %f",
)
numberOfPyramidLevels = traits.Int(
desc="Number of image pyramid levels to use in the multi-resolution registration.",
argstr="--numberOfPyramidLevels %d",
)
minimumFixedPyramid = InputMultiPath(
traits.Int,
desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)",
sep=",",
argstr="--minimumFixedPyramid %s",
)
minimumMovingPyramid = InputMultiPath(
traits.Int,
desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)",
sep=",",
argstr="--minimumMovingPyramid %s",
)
arrayOfPyramidLevelIterations = InputMultiPath(
traits.Int,
desc="The number of iterations for each pyramid level",
sep=",",
argstr="--arrayOfPyramidLevelIterations %s",
)
histogramMatch = traits.Bool(
desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.",
argstr="--histogramMatch ",
)
numberOfHistogramBins = traits.Int(
desc="The number of histogram levels", argstr="--numberOfHistogramBins %d"
)
numberOfMatchPoints = traits.Int(
desc="The number of match points for histrogramMatch",
argstr="--numberOfMatchPoints %d",
)
medianFilterSize = InputMultiPath(
traits.Int,
desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.",
sep=",",
argstr="--medianFilterSize %s",
)
initializeWithDisplacementField = File(
desc="Initial deformation field vector image file name",
exists=True,
argstr="--initializeWithDisplacementField %s",
)
initializeWithTransform = File(
desc="Initial Transform filename",
exists=True,
argstr="--initializeWithTransform %s",
)
makeBOBF = traits.Bool(
desc="Flag to make Brain-Only Background-Filled versions of the input and target volumes.",
argstr="--makeBOBF ",
)
fixedBinaryVolume = File(
desc="Mask filename for desired region of interest in the Fixed image.",
exists=True,
argstr="--fixedBinaryVolume %s",
)
movingBinaryVolume = File(
desc="Mask filename for desired region of interest in the Moving image.",
exists=True,
argstr="--movingBinaryVolume %s",
)
lowerThresholdForBOBF = traits.Int(
desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d"
)
upperThresholdForBOBF = traits.Int(
desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d"
)
backgroundFillValue = traits.Int(
desc="Replacement value to overwrite background when performing BOBF",
argstr="--backgroundFillValue %d",
)
seedForBOBF = InputMultiPath(
traits.Int,
desc="coordinates in all 3 directions for Seed when performing BOBF",
sep=",",
argstr="--seedForBOBF %s",
)
neighborhoodForBOBF = InputMultiPath(
traits.Int,
desc="neighborhood in all 3 directions to be included when performing BOBF",
sep=",",
argstr="--neighborhoodForBOBF %s",
)
outputDisplacementFieldPrefix = traits.Str(
desc="Displacement field filename prefix for writing separate x, y, and z component images",
argstr="--outputDisplacementFieldPrefix %s",
)
outputCheckerboardVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.",
argstr="--outputCheckerboardVolume %s",
)
checkerboardPatternSubdivisions = InputMultiPath(
traits.Int,
desc="Number of Checkerboard subdivisions in all 3 directions",
sep=",",
argstr="--checkerboardPatternSubdivisions %s",
)
outputNormalized = traits.Bool(
desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.",
argstr="--outputNormalized ",
)
outputDebug = traits.Bool(
desc="Flag to write debugging images after each step.", argstr="--outputDebug "
)
weightFactors = InputMultiPath(
traits.Float,
desc="Weight fatctors for each input images",
sep=",",
argstr="--weightFactors %s",
)
gradient_type = traits.Enum(
"0",
"1",
"2",
desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)",
argstr="--gradient_type %s",
)
upFieldSmoothing = traits.Float(
desc="Smoothing sigma for the update field at each iteration",
argstr="--upFieldSmoothing %f",
)
max_step_length = traits.Float(
desc="Maximum length of an update vector (0: no restriction)",
argstr="--max_step_length %f",
)
use_vanilla_dem = traits.Bool(
desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem "
)
gui = traits.Bool(
desc="Display intermediate image volumes for debugging", argstr="--gui "
)
promptUser = traits.Bool(
desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer",
argstr="--promptUser ",
)
numberOfBCHApproximationTerms = traits.Int(
desc="Number of terms in the BCH expansion",
argstr="--numberOfBCHApproximationTerms %d",
)
numberOfThreads = traits.Int(
desc="Explicitly specify the maximum number of threads to use.",
argstr="--numberOfThreads %d",
)
class VBRAINSDemonWarpOutputSpec(TraitedSpec):
outputVolume = File(
desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).",
exists=True,
)
outputDisplacementFieldVolume = File(
desc="Output deformation field vector image (will have the same physical space as the fixedVolume).",
exists=True,
)
outputCheckerboardVolume = File(
desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.",
exists=True,
)
class VBRAINSDemonWarp(SEMLikeCommandLine):
"""title: Vector Demon Registration (BRAINS)
category: Registration.Specialized
description:
This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp.
version: 3.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Hans J. Johnson and Greg Harris.
acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health.
"""
input_spec = VBRAINSDemonWarpInputSpec
output_spec = VBRAINSDemonWarpOutputSpec
_cmd = "VBRAINSDemonWarp "
_outputs_filenames = {
"outputVolume": "outputVolume.nii",
"outputCheckerboardVolume": "outputCheckerboardVolume.nii",
"outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd",
}
class BRAINSDemonWarpInputSpec(CommandLineInputSpec):
movingVolume = File(
desc="Required: input moving image", exists=True, argstr="--movingVolume %s"
)
fixedVolume = File(
desc="Required: input fixed (target) image",
exists=True,
argstr="--fixedVolume %s",
)
inputPixelType = traits.Enum(
"float",
"short",
"ushort",
"int",
"uchar",
desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar",
argstr="--inputPixelType %s",
)
outputVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).",
argstr="--outputVolume %s",
)
outputDisplacementFieldVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Output deformation field vector image (will have the same physical space as the fixedVolume).",
argstr="--outputDisplacementFieldVolume %s",
)
outputPixelType = traits.Enum(
"float",
"short",
"ushort",
"int",
"uchar",
desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar",
argstr="--outputPixelType %s",
)
interpolationMode = traits.Enum(
"NearestNeighbor",
"Linear",
"ResampleInPlace",
"BSpline",
"WindowedSinc",
"Hamming",
"Cosine",
"Welch",
"Lanczos",
"Blackman",
desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc",
argstr="--interpolationMode %s",
)
registrationFilterType = traits.Enum(
"Demons",
"FastSymmetricForces",
"Diffeomorphic",
desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic",
argstr="--registrationFilterType %s",
)
smoothDisplacementFieldSigma = traits.Float(
desc="A gaussian smoothing value to be applied to the deformation field at each iteration.",
argstr="--smoothDisplacementFieldSigma %f",
)
numberOfPyramidLevels = traits.Int(
desc="Number of image pyramid levels to use in the multi-resolution registration.",
argstr="--numberOfPyramidLevels %d",
)
minimumFixedPyramid = InputMultiPath(
traits.Int,
desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)",
sep=",",
argstr="--minimumFixedPyramid %s",
)
minimumMovingPyramid = InputMultiPath(
traits.Int,
desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)",
sep=",",
argstr="--minimumMovingPyramid %s",
)
arrayOfPyramidLevelIterations = InputMultiPath(
traits.Int,
desc="The number of iterations for each pyramid level",
sep=",",
argstr="--arrayOfPyramidLevelIterations %s",
)
histogramMatch = traits.Bool(
desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.",
argstr="--histogramMatch ",
)
numberOfHistogramBins = traits.Int(
desc="The number of histogram levels", argstr="--numberOfHistogramBins %d"
)
numberOfMatchPoints = traits.Int(
desc="The number of match points for histrogramMatch",
argstr="--numberOfMatchPoints %d",
)
medianFilterSize = InputMultiPath(
traits.Int,
desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.",
sep=",",
argstr="--medianFilterSize %s",
)
initializeWithDisplacementField = File(
desc="Initial deformation field vector image file name",
exists=True,
argstr="--initializeWithDisplacementField %s",
)
initializeWithTransform = File(
desc="Initial Transform filename",
exists=True,
argstr="--initializeWithTransform %s",
)
maskProcessingMode = traits.Enum(
"NOMASK",
"ROIAUTO",
"ROI",
"BOBF",
desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is chosen, then the mask is implicitly defined using a otsu foreground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.",
argstr="--maskProcessingMode %s",
)
fixedBinaryVolume = File(
desc="Mask filename for desired region of interest in the Fixed image.",
exists=True,
argstr="--fixedBinaryVolume %s",
)
movingBinaryVolume = File(
desc="Mask filename for desired region of interest in the Moving image.",
exists=True,
argstr="--movingBinaryVolume %s",
)
lowerThresholdForBOBF = traits.Int(
desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d"
)
upperThresholdForBOBF = traits.Int(
desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d"
)
backgroundFillValue = traits.Int(
desc="Replacement value to overwrite background when performing BOBF",
argstr="--backgroundFillValue %d",
)
seedForBOBF = InputMultiPath(
traits.Int,
desc="coordinates in all 3 directions for Seed when performing BOBF",
sep=",",
argstr="--seedForBOBF %s",
)
neighborhoodForBOBF = InputMultiPath(
traits.Int,
desc="neighborhood in all 3 directions to be included when performing BOBF",
sep=",",
argstr="--neighborhoodForBOBF %s",
)
outputDisplacementFieldPrefix = traits.Str(
desc="Displacement field filename prefix for writing separate x, y, and z component images",
argstr="--outputDisplacementFieldPrefix %s",
)
outputCheckerboardVolume = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.",
argstr="--outputCheckerboardVolume %s",
)
checkerboardPatternSubdivisions = InputMultiPath(
traits.Int,
desc="Number of Checkerboard subdivisions in all 3 directions",
sep=",",
argstr="--checkerboardPatternSubdivisions %s",
)
outputNormalized = traits.Bool(
desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.",
argstr="--outputNormalized ",
)
outputDebug = traits.Bool(
desc="Flag to write debugging images after each step.", argstr="--outputDebug "
)
gradient_type = traits.Enum(
"0",
"1",
"2",
desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)",
argstr="--gradient_type %s",
)
upFieldSmoothing = traits.Float(
desc="Smoothing sigma for the update field at each iteration",
argstr="--upFieldSmoothing %f",
)
max_step_length = traits.Float(
desc="Maximum length of an update vector (0: no restriction)",
argstr="--max_step_length %f",
)
use_vanilla_dem = traits.Bool(
desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem "
)
gui = traits.Bool(
desc="Display intermediate image volumes for debugging", argstr="--gui "
)
promptUser = traits.Bool(
desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer",
argstr="--promptUser ",
)
numberOfBCHApproximationTerms = traits.Int(
desc="Number of terms in the BCH expansion",
argstr="--numberOfBCHApproximationTerms %d",
)
numberOfThreads = traits.Int(
desc="Explicitly specify the maximum number of threads to use.",
argstr="--numberOfThreads %d",
)
class BRAINSDemonWarpOutputSpec(TraitedSpec):
outputVolume = File(
desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).",
exists=True,
)
outputDisplacementFieldVolume = File(
desc="Output deformation field vector image (will have the same physical space as the fixedVolume).",
exists=True,
)
outputCheckerboardVolume = File(
desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.",
exists=True,
)
class BRAINSDemonWarp(SEMLikeCommandLine):
"""title: Demon Registration (BRAINS)
category: Registration.Specialized
description:
This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp.
version: 3.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Hans J. Johnson and Greg Harris.
acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health.
"""
input_spec = BRAINSDemonWarpInputSpec
output_spec = BRAINSDemonWarpOutputSpec
_cmd = "BRAINSDemonWarp "
_outputs_filenames = {
"outputVolume": "outputVolume.nii",
"outputCheckerboardVolume": "outputCheckerboardVolume.nii",
"outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd",
}
|
89cf808962ee35982e6ea2b77b1a1e5ea997cc5d
|
bec3ad1268b2b45941a2f4277ccd8e2a0860952e
|
/2020/KAPO/Child_Beubmi/config.py
|
0e7363311e68c327e8357b72be93152e89498b55
|
[] |
no_license
|
pcw109550/write-up
|
7a5e19e6c52f7831c6c7709331eb84b9a67cfd2c
|
bf6ab22619a107c8eb4011861a3c1ea80ac1c9f8
|
refs/heads/master
| 2023-07-08T18:10:56.780156
| 2023-07-04T17:28:12
| 2023-07-04T17:28:12
| 186,902,436
| 168
| 34
| null | 2023-02-08T05:21:24
| 2019-05-15T21:03:53
|
Sage
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
config.py
|
(N, e, ct) = (463082806219936580883135666334369918679238281446351110278776719661479088673665544319208959694407590613953423024929195611477219164472874962273346765831858927593487925977518628492905858122061607836951556625898739173918175634438349489053081163089035828510864774700923582441063158846702096255234134036013036677477873099075985514130068988916661808687590052995112687506192436334251464844209288945383156270095758426969647720336772439648547045888723575855281506638887873, 65537, 358904965856720745590398260013870329174590435599733591921766392565251834028251115357731116048720210486518056198933381670740582151773346645905375904730346845573501778982290069504289827189053144840818609207553365768909492520187712526159992990632289628899337451889492183690798553739994849981792843046160418447596521118529408657867444928122146818876928318030241569487743774259504510836972830613125422031145984172486210782996549986131463630109319807516667208615806351)
hint = 1000752070344265829436389059269225051047444863864866149790714383350962844612666626976098942869248547170915307576523923294950130028433768594989553312278109123304047635585396766621843747137240683305876974749952777
|
f617a19ca804c92fe834ce7fc2946085dafed5d0
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/libraries/dagster-azure/dagster_azure_tests/adls2_tests/test_io_manager.py
|
93701dd7fe8d03d6c69ff3e6632aaaa7c3be4ad4
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 12,272
|
py
|
test_io_manager.py
|
from uuid import uuid4
import pytest
from azure.storage.filedatalake import DataLakeLeaseClient
from dagster import (
AssetIn,
AssetKey,
DagsterInstance,
DagsterRun,
DynamicOut,
DynamicOutput,
GraphOut,
In,
Int,
Out,
asset,
build_input_context,
build_output_context,
graph,
materialize,
op,
resource,
with_resources,
)
from dagster._core.definitions.assets import AssetsDefinition
from dagster._core.definitions.definitions_class import Definitions
from dagster._core.definitions.job_base import InMemoryJob
from dagster._core.definitions.partition import StaticPartitionsDefinition
from dagster._core.definitions.source_asset import SourceAsset
from dagster._core.definitions.unresolved_asset_job_definition import define_asset_job
from dagster._core.events import DagsterEventType
from dagster._core.execution.api import create_execution_plan, execute_plan
from dagster._core.system_config.objects import ResolvedRunConfig
from dagster._core.types.dagster_type import resolve_dagster_type
from dagster._core.utils import make_new_run_id
from dagster_azure.adls2 import create_adls2_client
from dagster_azure.adls2.fake_adls2_resource import FakeADLS2Resource, fake_adls2_resource
from dagster_azure.adls2.io_manager import (
ADLS2PickleIOManager,
PickledObjectADLS2IOManager,
adls2_pickle_io_manager,
)
from dagster_azure.adls2.resources import adls2_resource
from dagster_azure.blob import create_blob_client
from upath import UPath
def fake_io_manager_factory(io_manager):
@resource
def fake_io_manager(_):
return io_manager
return fake_io_manager
def get_step_output(step_events, step_key, output_name="result"):
for step_event in step_events:
if (
step_event.event_type == DagsterEventType.STEP_OUTPUT
and step_event.step_key == step_key
and step_event.step_output_data.output_name == output_name
):
return step_event
return None
def define_inty_job(adls_io_resource=adls2_resource):
@op(out=Out(int))
def return_one():
return 1
@op(
ins={"num": In(Int)},
out=DynamicOut(Int),
)
def add_one(num):
yield DynamicOutput(num + 1, "foo")
yield DynamicOutput(num + 1, "bar")
@graph
def basic_external_plan_execution():
add_one(return_one())
return basic_external_plan_execution.to_job(
resource_defs={"io_manager": adls2_pickle_io_manager, "adls2": adls_io_resource}
)
@pytest.mark.nettest
@pytest.mark.skip(
"Blob this depends on does not exist. See"
" https://linear.app/elementl/issue/CORE-83/test-adls2-pickle-io-manager-deletes-recursively-disabled-reenable-it"
)
def test_adls2_pickle_io_manager_deletes_recursively(storage_account, file_system, credential):
job = define_inty_job()
run_config = {
"resources": {
"io_manager": {"config": {"adls2_file_system": file_system}},
"adls2": {
"config": {
"storage_account": storage_account,
"credential": {"key": credential},
}
},
}
}
run_id = make_new_run_id()
resolved_run_config = ResolvedRunConfig.build(job, run_config=run_config)
execution_plan = create_execution_plan(job, run_config)
assert execution_plan.get_step_by_key("return_one")
step_keys = ["return_one"]
instance = DagsterInstance.ephemeral()
dagster_run = DagsterRun(job_name=job.name, run_id=run_id, run_config=run_config)
return_one_step_events = list(
execute_plan(
execution_plan.build_subset_plan(step_keys, job, resolved_run_config),
job=InMemoryJob(job),
run_config=run_config,
dagster_run=dagster_run,
instance=instance,
)
)
assert get_step_output(return_one_step_events, "return_one")
context = build_input_context(
upstream_output=build_output_context(
step_key="return_one",
name="result",
run_id=run_id,
dagster_type=resolve_dagster_type(int),
),
dagster_type=resolve_dagster_type(int),
)
io_manager = PickledObjectADLS2IOManager(
file_system=file_system,
adls2_client=create_adls2_client(storage_account, credential),
blob_client=create_blob_client(storage_account, credential),
lease_client_constructor=DataLakeLeaseClient,
)
assert io_manager.load_input(context) == 1
# Verify that when the IO manager needs to delete recursively, it is able to do so,
# by removing the whole path for the run
recursive_path = UPath(
io_manager.prefix,
"storage",
run_id,
)
io_manager.unlink(recursive_path)
@pytest.mark.nettest
@pytest.mark.skip(
"Blob this depends on does not exist. See"
" https://linear.app/elementl/issue/CORE-83/test-adls2-pickle-io-manager-deletes-recursively-disabled-reenable-it"
)
def test_adls2_pickle_io_manager_execution(storage_account, file_system, credential):
job = define_inty_job()
run_config = {
"resources": {
"io_manager": {"config": {"adls2_file_system": file_system}},
"adls2": {
"config": {
"storage_account": storage_account,
"credential": {"key": credential},
}
},
}
}
run_id = make_new_run_id()
resolved_run_config = ResolvedRunConfig.build(job, run_config=run_config)
execution_plan = create_execution_plan(job, run_config)
assert execution_plan.get_step_by_key("return_one")
step_keys = ["return_one"]
instance = DagsterInstance.ephemeral()
dagster_run = DagsterRun(job_name=job.name, run_id=run_id, run_config=run_config)
return_one_step_events = list(
execute_plan(
execution_plan.build_subset_plan(step_keys, job, resolved_run_config),
job=InMemoryJob(job),
run_config=run_config,
dagster_run=dagster_run,
instance=instance,
)
)
assert get_step_output(return_one_step_events, "return_one")
context = build_input_context(
upstream_output=build_output_context(
step_key="return_one",
name="result",
run_id=run_id,
dagster_type=resolve_dagster_type(int),
),
dagster_type=resolve_dagster_type(int),
)
io_manager = PickledObjectADLS2IOManager(
file_system=file_system,
adls2_client=create_adls2_client(storage_account, credential),
blob_client=create_blob_client(storage_account, credential),
lease_client_constructor=DataLakeLeaseClient,
)
assert io_manager.load_input(context) == 1
add_one_step_events = list(
execute_plan(
execution_plan.build_subset_plan(["add_one"], job, resolved_run_config),
job=InMemoryJob(job),
dagster_run=dagster_run,
run_config=run_config,
instance=instance,
)
)
context = build_input_context(
upstream_output=build_output_context(
step_key="add_one",
name="result",
run_id=run_id,
mapping_key="foo",
dagster_type=resolve_dagster_type(int),
),
dagster_type=resolve_dagster_type(int),
)
assert get_step_output(add_one_step_events, "add_one")
assert io_manager.load_input(context) == 2
@pytest.mark.skip(
"Blob this depends on does not exist. See"
" https://linear.app/elementl/issue/CORE-83/test-adls2-pickle-io-manager-deletes-recursively-disabled-reenable-it"
)
def test_asset_io_manager(storage_account, file_system, credential):
# if you add new assets to this test, make sure that the output names include _id so that we don't
# run into issues with the azure leasing system in CI
# when this test is run for multiple python versions in parallel the azure leasing system will
# cause failures if two tests try to access the same asset at the same time
_id = f"{uuid4()}".replace("-", "")
@op
def first_op():
return 5
@op
def second_op(op_1):
assert op_1 == 5
return op_1 + 1
@graph(name=f"graph_asset_{_id}", out={f"asset3_{_id}": GraphOut()})
def graph_asset():
return second_op(first_op())
@asset(
name=f"upstream_{_id}",
ins={"asset3": AssetIn(asset_key=AssetKey([f"asset3_{_id}"]))},
)
def upstream(asset3):
return asset3 + 1
SourceAsset(f"source1_{_id}", partitions_def=StaticPartitionsDefinition(["foo", "bar"]))
# prepopulate storage with source asset
io_manager = PickledObjectADLS2IOManager(
file_system=file_system,
adls2_client=create_adls2_client(storage_account, credential),
blob_client=create_blob_client(storage_account, credential),
lease_client_constructor=DataLakeLeaseClient,
)
for partition_key in ["foo", "bar"]:
context = build_output_context(
step_key=f"source1_{_id}",
name="result",
run_id=make_new_run_id(),
dagster_type=resolve_dagster_type(int),
partition_key=partition_key,
)
io_manager.handle_output(context, 1)
@asset(
name=f"downstream_{_id}",
ins={"upstream": AssetIn(asset_key=AssetKey([f"upstream_{_id}"]))},
)
def downstream(upstream, source):
assert upstream == 7
return 1 + upstream + source["foo"] + source["bar"]
asset_job = Definitions(
assets=[upstream, downstream, AssetsDefinition.from_graph(graph_asset)],
resources={"io_manager": adls2_pickle_io_manager, "adls2": adls2_resource},
jobs=[define_asset_job("my_asset_job")],
).get_job_def("my_asset_job")
run_config = {
"resources": {
"io_manager": {"config": {"adls2_file_system": file_system}},
"adls2": {
"config": {
"storage_account": storage_account,
"credential": {"key": credential},
}
},
}
}
result = asset_job.execute_in_process(run_config=run_config)
assert result.success
def test_with_fake_adls2_resource():
job = define_inty_job(adls_io_resource=fake_adls2_resource)
run_config = {
"resources": {
"io_manager": {"config": {"adls2_file_system": "fake_file_system"}},
"adls2": {"config": {"account_name": "my_account"}},
}
}
result = job.execute_in_process(run_config=run_config)
assert result.success
def test_nothing():
@asset
def asset1() -> None:
...
@asset(deps=[asset1])
def asset2() -> None:
...
result = materialize(
with_resources(
[asset1, asset2],
resource_defs={
"io_manager": adls2_pickle_io_manager.configured(
{"adls2_file_system": "fake_file_system"}
),
"adls2": fake_adls2_resource.configured({"account_name": "my_account"}),
},
)
)
handled_output_events = list(filter(lambda evt: evt.is_handled_output, result.all_node_events))
assert len(handled_output_events) == 2
for event in handled_output_events:
assert len(event.event_specific_data.metadata) == 0
def test_nothing_pythonic() -> None:
@asset
def asset1() -> None:
...
@asset(deps=[asset1])
def asset2() -> None:
...
result = materialize(
with_resources(
[asset1, asset2],
resource_defs={
"io_manager": ADLS2PickleIOManager(
adls2=FakeADLS2Resource(account_name="my_account"),
adls2_file_system="fake_file_system",
)
},
)
)
handled_output_events = list(filter(lambda evt: evt.is_handled_output, result.all_node_events))
assert len(handled_output_events) == 2
for event in handled_output_events:
assert len(event.event_specific_data.metadata) == 0 # type: ignore[attr-defined]
|
c5f51fd472d21caabf4836f8f0093a4ec2d8b6b0
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/responsible_ai/data_cleansing/datasets/create_cifar10_csv.py
|
5c0e4010ec658ff2c623e2527eaa1795cc380761
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,610
|
py
|
create_cifar10_csv.py
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Provide data iterator for CIFAR10 examples.
'''
from contextlib import contextmanager
import argparse
import numpy as np
import tarfile
import os
import tqdm
from imageio import imwrite
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
from .utils import get_filename_to_download, save_list_to_csv, split_data_into_train_val, ensure_dir
class Cifar10DataSource(DataSource):
'''
Get data directly from cifar10 dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None, output_dir=None):
super(Cifar10DataSource, self).__init__(shuffle=shuffle)
self._train = train
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
output_file = get_filename_to_download(output_dir, data_uri)
r = download(data_uri, output_file=output_file) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
# Training data
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, allow_pickle=True, encoding="bytes")
images.append(data[b"data"])
labels.append(data[b"labels"])
self._size = 50000
self._images = np.concatenate(
images).reshape(self._size, 3, 32, 32)
self._labels = np.concatenate(labels).reshape(-1, 1)
# Validation data
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, allow_pickle=True, encoding="bytes")
images = data[b"data"]
labels = data[b"labels"]
self._size = 10000
self._images = images.reshape(self._size, 3, 32, 32)
self._labels = np.array(labels).reshape(-1, 1)
r.close()
logger.info('Getting labeled data from {}.'.format(data_uri))
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(Cifar10DataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
@contextmanager
def data_iterator_cifar10(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False,
output_dir=None):
'''
Provide DataIterator with :py:class:`Cifar10DataSource`
with_memory_cache, with_parallel and with_file_cache option's default value is all False,
because :py:class:`Cifar10DataSource` is able to store all data into memory.
For example,
.. code-block:: python
with data_iterator_cifar10(True, batch_size) as di:
for data in di:
SOME CODE TO USE data.
'''
with Cifar10DataSource(train=train, shuffle=shuffle, rng=rng, output_dir=output_dir) as ds, \
data_iterator(ds,
batch_size,
rng=rng,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache) as di:
yield di
def data_iterator_to_csv(csv_path, data_path, data_iterator, seed=0):
index = 0
csv_data = []
with data_iterator as data:
line = ['x:image', 'y:label']
csv_data.append(line)
pbar = tqdm.tqdm(total=data.size, unit='images')
initial_epoch = data.epoch
while data.epoch == initial_epoch:
d = data.next()
for i in range(len(d[0])):
label = d[1][i][0]
file_name = data_path + \
'/{}'.format(label) + '/{}.png'.format(index)
full_path = os.path.join(
csv_path, file_name.replace('/', os.path.sep))
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
imwrite(full_path, d[0][i].reshape(
3, 32, 32).transpose(1, 2, 0))
csv_data.append([file_name, label])
index += 1
pbar.update(1)
pbar.close()
return csv_data
def create_data_csv(seed):
path = os.path.abspath(os.path.dirname(__file__))
base_dir = os.path.join(path, 'cifar10')
ensure_dir(base_dir)
# Create original training set
logger.log(99, 'Downloading CIFAR-10 dataset...')
output_dir = os.path.join(path, 'download')
train_di = data_iterator_cifar10(50000, output_dir=output_dir)
logger.log(99, 'Creating "cifar10_training.csv"... ')
train_csv = data_iterator_to_csv(base_dir, 'training', train_di)
train_csv, val_csv = split_data_into_train_val(
train_csv, val_size=10000, seed=seed)
save_list_to_csv(train_csv, base_dir,
'cifar10_training' + '_' + str(seed) + '.csv')
save_list_to_csv(val_csv, base_dir, 'cifar10_validation' +
'_' + str(seed) + '.csv')
# Create original test set
validation_di = data_iterator_cifar10(
10000, False, None, False, output_dir=output_dir)
logger.log(99, 'Creating "cifar10_test.csv"... ')
test_csv = data_iterator_to_csv(base_dir, 'validation', validation_di)
save_list_to_csv(test_csv, base_dir, 'cifar10_test.csv')
logger.log(99, 'Dataset creation completed successfully.')
def main(args):
create_data_csv(args.seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='csv data', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-s', '--seed', help='seed num', default=0, type=int, required=True)
args = parser.parse_args()
main(args)
|
e975437d8e08f014c028d73e896b04089bbcc6ff
|
6b6d42eadf53e90b08ce564fb188a9a4b126ef12
|
/testsuite/tests/properties/imprecise_is_dot_call/test.py
|
4b914051ea65f2aa64e7f6f48e4bc9cd4219142d
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA"
] |
permissive
|
AdaCore/libadalang
|
f97b95d1672cb1e5083c49ee632c6f9c787d36c2
|
50d658afa70ccbf46b8f7d9d43a21d45d56b206c
|
refs/heads/master
| 2023-09-01T18:34:26.976692
| 2023-08-25T15:53:43
| 2023-08-25T15:53:43
| 47,627,172
| 158
| 49
|
Apache-2.0
| 2022-12-14T10:29:45
| 2015-12-08T14:28:22
|
Ada
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
test.py
|
import sys
import libadalang as lal
for filename in sys.argv[1:]:
print('== {} =='.format(filename))
u = lal.AnalysisContext().get_from_file(filename)
assert not u.diagnostics
for n in u.root.findall(lal.DottedName):
is_dot_call_precise = n.p_is_dot_call(imprecise_fallback=False)
is_dot_call_imprecise = n.p_is_dot_call(imprecise_fallback=True)
print("{} is a dot call? {}. With imprecise fallback: {}".format(
n, is_dot_call_precise, is_dot_call_imprecise
))
print('')
print('Done')
|
7ef63801e9b67b597ad1f02ae48d429b7c2d8a41
|
ec120a737f147a0e5c495ede305098345a1055a8
|
/niftymic/application/register_image.py
|
bddcc793a7e1391a701f3939a328b40bf4edf76a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
gift-surg/NiftyMIC
|
bbc56009f300d4572b92177098ebe319e646a613
|
553bce0824e7b40cd221897b683142d9aeee77d8
|
refs/heads/master
| 2022-03-08T06:18:29.877635
| 2022-02-15T12:58:45
| 2022-02-15T12:58:45
| 108,846,947
| 118
| 31
|
BSD-3-Clause
| 2020-11-26T17:47:34
| 2017-10-30T12:21:38
|
Python
|
UTF-8
|
Python
| false
| false
| 10,207
|
py
|
register_image.py
|
##
# \file register_image.py
# \brief Script to register the obtained reconstruction to a template
# space.
#
# \author Michael Ebner (michael.ebner.14@ucl.ac.uk)
# \date October 2017
#
import re
import os
import numpy as np
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
import niftymic.base.stack as st
import niftymic.base.data_reader as dr
import niftymic.registration.niftyreg as niftyreg
import niftymic.registration.transform_initializer as tinit
from niftymic.utilities.input_arparser import InputArgparser
from niftymic.definitions import REGEX_FILENAMES, DIR_TMP
def main():
time_start = ph.start_timing()
np.set_printoptions(precision=3)
input_parser = InputArgparser(
description="Register an obtained reconstruction (moving) "
"to a template image/space (fixed) using rigid registration. "
"The resulting registration can optionally be applied to previously "
"obtained motion correction slice transforms so that a volumetric "
"reconstruction is possible in the (standard anatomical) space "
"defined by the fixed.",
)
input_parser.add_fixed(required=True)
input_parser.add_moving(required=True)
input_parser.add_output(
help="Path to registration transform (.txt)",
required=True)
input_parser.add_fixed_mask(required=False)
input_parser.add_moving_mask(required=False)
input_parser.add_option(
option_string="--initial-transform",
type=str,
help="Path to initial transform. "
"If not provided, registration will be initialized based on "
"rigid alignment of eigenbasis of the fixed/moving image masks "
"using principal component analysis",
default=None)
input_parser.add_v2v_method(
option_string="--method",
help="Registration method used for the registration.",
default="RegAladin",
)
input_parser.add_argument(
"--init-pca", "-init-pca",
action='store_true',
help="If given, PCA-based initializations will be refined using "
"RegAladin registrations."
)
input_parser.add_dir_input_mc()
input_parser.add_verbose(default=0)
input_parser.add_log_config(default=1)
args = input_parser.parse_args()
input_parser.print_arguments(args)
if args.log_config:
input_parser.log_config(os.path.abspath(__file__))
if not args.output.endswith(".txt"):
raise IOError(
"output filename '%s' invalid; "
"allowed transformation extensions are: '.txt'" % (
args.output))
if args.initial_transform is not None and args.init_pca:
raise IOError(
"Both --initial-transform and --init-pca cannot be activated. "
"Choose one.")
dir_output = os.path.dirname(args.output)
ph.create_directory(dir_output)
debug = False
# --------------------------------Read Data--------------------------------
ph.print_title("Read Data")
fixed = st.Stack.from_filename(
file_path=args.fixed,
file_path_mask=args.fixed_mask,
extract_slices=False)
moving = st.Stack.from_filename(
file_path=args.moving,
file_path_mask=args.moving_mask,
extract_slices=False)
path_to_tmp_output = os.path.join(
DIR_TMP,
ph.append_to_filename(os.path.basename(args.moving), "_warped"))
# ---------------------------- Initialization ----------------------------
if args.initial_transform is None and args.init_pca:
ph.print_title("Estimate (initial) transformation using PCA")
if args.moving_mask is None or args.fixed_mask is None:
ph.print_warning("Fixed and moving masks are strongly recommended")
transform_initializer = tinit.TransformInitializer(
fixed=fixed,
moving=moving,
similarity_measure="NMI",
refine_pca_initializations=True,
)
transform_initializer.run()
transform_init_sitk = transform_initializer.get_transform_sitk()
elif args.initial_transform is not None:
transform_init_sitk = sitkh.read_transform_sitk(args.initial_transform)
else:
transform_init_sitk = None
if transform_init_sitk is not None:
sitk.WriteTransform(transform_init_sitk, args.output)
# -------------------Register Reconstruction to Template-------------------
ph.print_title("Registration")
# If --init-pca given, RegAladin run already performed
if args.method == "RegAladin" and not args.init_pca:
path_to_transform_regaladin = os.path.join(
DIR_TMP, "transform_regaladin.txt")
# Convert SimpleITK to RegAladin transform
if transform_init_sitk is not None:
cmd = "simplereg_transform -sitk2nreg %s %s" % (
args.output, path_to_transform_regaladin)
ph.execute_command(cmd, verbose=False)
# Run NiftyReg
cmd_args = ["reg_aladin"]
cmd_args.append("-ref '%s'" % args.fixed)
cmd_args.append("-flo '%s'" % args.moving)
cmd_args.append("-res '%s'" % path_to_tmp_output)
if transform_init_sitk is not None:
cmd_args.append("-inaff '%s'" % path_to_transform_regaladin)
cmd_args.append("-aff '%s'" % path_to_transform_regaladin)
cmd_args.append("-rigOnly")
cmd_args.append("-ln 2") # seems to perform better for spina bifida
cmd_args.append("-voff")
if args.fixed_mask is not None:
cmd_args.append("-rmask '%s'" % args.fixed_mask)
# To avoid error "0 correspondences between blocks were found" that can
# occur for some cases. Also, disable moving mask, as this would be ignored
# anyway
cmd_args.append("-noSym")
# if args.moving_mask is not None:
# cmd_args.append("-fmask '%s'" % args.moving_mask)
ph.print_info("Run Registration (RegAladin) ... ", newline=False)
ph.execute_command(" ".join(cmd_args), verbose=debug)
print("done")
# Convert RegAladin to SimpleITK transform
cmd = "simplereg_transform -nreg2sitk '%s' '%s'" % (
path_to_transform_regaladin, args.output)
ph.execute_command(cmd, verbose=False)
elif args.method == "FLIRT":
path_to_transform_flirt = os.path.join(DIR_TMP, "transform_flirt.txt")
# Convert SimpleITK into FLIRT transform
if transform_init_sitk is not None:
cmd = "simplereg_transform -sitk2flirt '%s' '%s' '%s' '%s'" % (
args.output, args.fixed, args.moving, path_to_transform_flirt)
ph.execute_command(cmd, verbose=False)
# Define search angle ranges for FLIRT in all three dimensions
# search_angles = ["-searchr%s -%d %d" % (x, 180, 180)
# for x in ["x", "y", "z"]]
cmd_args = ["flirt"]
cmd_args.append("-in '%s'" % args.moving)
cmd_args.append("-ref '%s'" % args.fixed)
if transform_init_sitk is not None:
cmd_args.append("-init '%s'" % path_to_transform_flirt)
cmd_args.append("-omat '%s'" % path_to_transform_flirt)
cmd_args.append("-out '%s'" % path_to_tmp_output)
cmd_args.append("-dof 6")
# cmd_args.append((" ").join(search_angles))
if args.moving_mask is not None:
cmd_args.append("-inweight '%s'" % args.moving_mask)
if args.fixed_mask is not None:
cmd_args.append("-refweight '%s'" % args.fixed_mask)
ph.print_info("Run Registration (FLIRT) ... ", newline=False)
ph.execute_command(" ".join(cmd_args), verbose=debug)
print("done")
# Convert FLIRT to SimpleITK transform
cmd = "simplereg_transform -flirt2sitk '%s' '%s' '%s' '%s'" % (
path_to_transform_flirt, args.fixed, args.moving, args.output)
ph.execute_command(cmd, verbose=False)
ph.print_info("Registration transformation written to '%s'" % args.output)
if args.dir_input_mc is not None:
ph.print_title("Update Motion-Correction Transformations")
transform_sitk = sitkh.read_transform_sitk(
args.output, inverse=1)
if args.dir_input_mc.endswith("/"):
subdir_mc = args.dir_input_mc.split("/")[-2]
else:
subdir_mc = args.dir_input_mc.split("/")[-1]
dir_output_mc = os.path.join(dir_output, subdir_mc)
ph.create_directory(dir_output_mc, delete_files=True)
pattern = REGEX_FILENAMES + "[.]tfm"
p = re.compile(pattern)
trafos = [t for t in os.listdir(args.dir_input_mc) if p.match(t)]
for t in trafos:
path_to_input_transform = os.path.join(args.dir_input_mc, t)
path_to_output_transform = os.path.join(dir_output_mc, t)
t_sitk = sitkh.read_transform_sitk(path_to_input_transform)
t_sitk = sitkh.get_composite_sitk_affine_transform(
transform_sitk, t_sitk)
sitk.WriteTransform(t_sitk, path_to_output_transform)
ph.print_info("%d transformations written to '%s'" % (
len(trafos), dir_output_mc))
# Copy rejected_slices.json file
path_to_rejected_slices = os.path.join(
args.dir_input_mc, "rejected_slices.json")
if ph.file_exists(path_to_rejected_slices):
ph.copy_file(path_to_rejected_slices, dir_output_mc)
if args.verbose:
cmd_args = ["simplereg_resample"]
cmd_args.append("-f '%s'" % args.fixed)
cmd_args.append("-m '%s'" % args.moving)
cmd_args.append("-t '%s'" % args.output)
cmd_args.append("-o '%s'" % path_to_tmp_output)
ph.execute_command(" ".join(cmd_args))
ph.show_niftis([args.fixed, path_to_tmp_output])
elapsed_time_total = ph.stop_timing(time_start)
# Summary
ph.print_title("Summary")
exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
print("%s | Computational Time: %s" % (exe_file_info, elapsed_time_total))
return 0
if __name__ == '__main__':
main()
|
f6c2b712ba5ad5340544139ac8a437bec567b4b4
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/unit/lib/utils/test_boto_utils.py
|
626d9d1ab2578ae7ae8b10693e01d9759da10c25
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,290
|
py
|
test_boto_utils.py
|
from unittest import TestCase
from unittest.mock import patch, Mock
from parameterized import parameterized
from samcli.lib.utils.boto_utils import (
get_boto_config_with_user_agent,
get_boto_client_provider_with_config,
get_boto_resource_provider_with_config,
get_boto_resource_provider_from_session_with_config,
get_boto_client_provider_from_session_with_config,
get_client_error_code,
)
TEST_VERSION = "1.0.0"
class TestBotoUtils(TestCase):
@parameterized.expand([(True,), (False,)])
@patch("samcli.lib.utils.boto_utils.GlobalConfig")
@patch("samcli.lib.utils.boto_utils.__version__", TEST_VERSION)
def test_get_boto_config_with_user_agent(
self,
telemetry_enabled,
patched_global_config,
):
given_global_config_instance = Mock()
patched_global_config.return_value = given_global_config_instance
given_global_config_instance.telemetry_enabled = telemetry_enabled
given_region_name = "us-west-2"
config = get_boto_config_with_user_agent(region_name=given_region_name)
self.assertEqual(given_region_name, config.region_name)
if telemetry_enabled:
self.assertEqual(
config.user_agent_extra, f"aws-sam-cli/{TEST_VERSION}/{given_global_config_instance.installation_id}"
)
else:
self.assertEqual(config.user_agent_extra, f"aws-sam-cli/{TEST_VERSION}")
@patch("samcli.lib.utils.boto_utils.get_boto_config_with_user_agent")
def test_get_boto_client_provider_from_session_with_config(self, patched_get_config):
given_client_name = "lambda"
given_session = Mock()
given_config_param = Mock()
given_client = Mock()
given_config = Mock()
given_session.client.return_value = given_client
patched_get_config.return_value = given_config
client_generator = get_boto_client_provider_from_session_with_config(given_session, param=given_config_param)
client = client_generator(given_client_name)
self.assertEqual(client, given_client)
patched_get_config.assert_called_with(param=given_config_param)
given_session.client.assert_called_with(given_client_name, config=given_config)
@patch("samcli.lib.utils.boto_utils.get_boto_client_provider_from_session_with_config")
@patch("samcli.lib.utils.boto_utils.Session")
def test_get_boto_client_provider_with_config(self, patched_session, patched_get_client):
given_session = Mock()
patched_session.return_value = given_session
given_client_generator = Mock()
patched_get_client.return_value = given_client_generator
given_config_param = Mock()
given_profile = Mock()
given_region = Mock()
client_generator = get_boto_client_provider_with_config(
region=given_region, profile=given_profile, param=given_config_param
)
patched_session.assert_called_with(region_name=given_region, profile_name=given_profile)
patched_get_client.assert_called_with(given_session, param=given_config_param)
self.assertEqual(given_client_generator, client_generator)
@patch("samcli.lib.utils.boto_utils.get_boto_resource_provider_from_session_with_config")
@patch("samcli.lib.utils.boto_utils.Session")
def test_get_boto_resource_provider_with_config(self, patched_session, patched_get_resource):
given_session = Mock()
patched_session.return_value = given_session
given_resource_generator = Mock()
patched_get_resource.return_value = given_resource_generator
given_config_param = Mock()
given_profile = Mock()
given_region = Mock()
client_generator = get_boto_resource_provider_with_config(
region=given_region, profile=given_profile, param=given_config_param
)
patched_session.assert_called_with(region_name=given_region, profile_name=given_profile)
patched_get_resource.assert_called_with(given_session, param=given_config_param)
self.assertEqual(given_resource_generator, client_generator)
@patch("samcli.lib.utils.boto_utils.get_boto_config_with_user_agent")
def test_get_boto_resource_provider_from_session_with_config(self, patched_get_config):
given_resource_name = "cloudformation"
given_session = Mock()
given_config_param = Mock()
given_resource = Mock()
given_config = Mock()
given_session.resource.return_value = given_resource
patched_get_config.return_value = given_config
resource_generator = get_boto_resource_provider_from_session_with_config(
given_session, param=given_config_param
)
resource = resource_generator(given_resource_name)
self.assertEqual(resource, given_resource)
patched_get_config.assert_called_with(param=given_config_param)
given_session.resource.assert_called_with(given_resource_name, config=given_config)
@parameterized.expand([({}, None), ({"Error": {}}, None), ({"Error": {"Code": "ErrorCode"}}, "ErrorCode")])
def test_get_client_error_code(self, response, expected):
self.assertEqual(expected, get_client_error_code(Mock(response=response)))
|
0474c1216340f5a96572bb5924fbec4f134cfaa4
|
6e235014528acc05996e6a4ef2b33e348bbc5114
|
/json_to_models/models/string_converters.py
|
988abdd0e80e619f739d351b346816ca6c85ab25
|
[
"MIT"
] |
permissive
|
bogdandm/json2python-models
|
f7cf02417c38de587d86aa045756c6b61aa42cb1
|
e2606e8f2c22d3bc11b09f5eb2bc73323ce151c5
|
refs/heads/master
| 2023-01-13T03:47:47.135160
| 2023-01-02T12:28:47
| 2023-01-02T12:28:47
| 144,019,032
| 153
| 12
|
MIT
| 2023-01-02T12:18:26
| 2018-08-08T13:40:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,197
|
py
|
string_converters.py
|
from functools import wraps
from inspect import isclass
from typing import Any, Callable, List, Optional, Tuple
from . import ClassType
from ..dynamic_typing import (
BaseType,
DDict,
DList,
DOptional,
DUnion,
MetaData,
ModelMeta,
ModelPtr,
StringLiteral,
StringSerializable
)
from ..dynamic_typing.base import NoneType
def convert_strings(str_field_paths: List[str], class_type: Optional[ClassType] = None,
method: Optional[str] = None) -> Callable[[type], type]:
"""
Decorator factory. Set up post-init method to convert strings fields values into StringSerializable types
If field contains complex data type path should be consist of field name and dotted list of tokens:
* `S` - string component
* `O` - Optional
* `L` - List
* `D` - Dict
So if field `'bar'` has type `Optional[List[List[IntString]]]` field path would be `'bar#O.L.L.S'`
! If type is too complex i.e. Union[List[IntString], List[List[IntString]]]
you can't specify field path and such field would be ignored
To specify name of post-init method you should provide it by class_type argument or directly by method argument:
>>> convert_strings([...], class_type=ClassType.Attrs)
is equivalent of
>>> convert_strings([...], method="__attrs_post_init__")
:param str_field_paths: Paths of StringSerializable fields (field name or field name + typing path)
:param class_type: attrs | dataclass - type of decorated class
:param method: post-init method name
:return: Class decorator
"""
method = {
ClassType.Attrs: '__attrs_post_init__',
ClassType.Dataclass: '__post_init__',
None: method
}.get(class_type)
def decorator(cls: type) -> type:
if hasattr(cls, method):
old_fn = getattr(cls, method)
@wraps(old_fn)
def __post_init__(self, *args, **kwargs):
post_init_converters(str_field_paths)(self)
old_fn(self, *args, **kwargs)
setattr(cls, method, __post_init__)
else:
fn = post_init_converters(str_field_paths)
fn.__name__ = method
setattr(cls, method, fn)
return cls
return decorator
def post_init_converters(str_fields: List[str], wrap_fn=None):
"""
Method factory. Return post_init method to convert string into StringSerializable types
To override generated __post_init__ you can call it directly:
>>> def __post_init__(self):
... post_init_converters(['a', 'b'])(self)
:param str_fields: names of StringSerializable fields
:return: __post_init__ method
"""
def __post_init__(self):
# `S` - string component
# `O` - Optional
# `L` - List
# `D` - Dict
for name in str_fields:
if '#' in name:
name, path_str = name.split('#')
path: List[str] = path_str.split('.')
else:
path = ['S']
new_value = _process_string_field_value(
path=path,
value=getattr(self, name),
current_type=self.__annotations__[name]
)
setattr(self, name, new_value)
if wrap_fn:
__post_init__ = wraps(wrap_fn)(__post_init__)
return __post_init__
def _process_string_field_value(path: List[str], value: Any, current_type: Any, optional=False) -> Any:
token, *path = path
if token == 'S':
try:
value = current_type.to_internal_value(value)
except (ValueError, TypeError) as e:
if not optional:
raise e
return value
elif token == 'O':
return _process_string_field_value(
path=path,
value=value,
current_type=current_type.__args__[0],
optional=True
)
elif token == 'L':
t = current_type.__args__[0]
return [
_process_string_field_value(path, item, current_type=t, optional=optional)
for item in value
]
elif token == 'D':
t = current_type.__args__[1]
return {
key: _process_string_field_value(path, item, current_type=t, optional=optional)
for key, item in value.items()
}
else:
raise ValueError(f"Unknown token {token}")
def get_string_field_paths(model: ModelMeta) -> List[Tuple[str, List[str]]]:
"""
Return paths for convert_strings function of given model
:return: Paths with raw names
"""
# `S` - string component
# `O` - Optional
# `L` - List
# `D` - Dict
str_fields: List[Tuple[str, List[str]]] = []
for name, t in model.type.items():
# Walk through nested types
paths: List[List[str]] = []
tokens: List[Tuple[MetaData, List[str]]] = [(t, ['#'])]
while tokens:
tmp_type, path = tokens.pop()
if isclass(tmp_type):
if issubclass(tmp_type, StringSerializable):
paths.append(path + ['S'])
elif isinstance(tmp_type, BaseType):
cls = type(tmp_type)
if cls is DOptional:
token = 'O'
elif cls is DList:
token = 'L'
elif cls is DDict:
token = 'D'
elif cls in (DUnion, ModelPtr):
# We could not resolve Union
paths = []
break
elif cls is NoneType:
continue
elif cls in (StringLiteral,):
continue
else:
raise TypeError(f"Unsupported meta-type for converter path {cls}")
for nested_type in tmp_type:
tokens.append((nested_type, path + [token]))
paths: List[str] = ["".join(p[1:]) for p in paths]
if len(paths) != 1:
continue
path = paths.pop()
if path == 'S':
str_fields.append((name, []))
else:
str_fields.append((name, path))
return str_fields
|
7826ab481bd46145aedaa5af279da3048327cf84
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-IOBluetoothUI/setup.py
|
d6348366ee9147205b7a01c47f3420d8ae26ca04
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 825
|
py
|
setup.py
|
"""
Wrappers for the "IOBluetoothUI" framework on macOS.
These wrappers don't include documentation, please check Apple's documentation
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
"""
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from pyobjc_setup import setup # noqa: E402
VERSION = "9.2.1"
setup(
name="pyobjc-framework-IOBluetoothUI",
description="Wrappers for the framework IOBluetoothUI on macOS",
packages=["IOBluetoothUI"],
version=VERSION,
install_requires=[
"pyobjc-core>=" + VERSION,
"pyobjc-framework-IOBluetooth>=" + VERSION,
],
long_description=__doc__,
options={"bdist_wheel": {"py_limited_api": "cp36"}},
)
|
59c49eaf756c01fae0fffa3756d2b8fd6d4ea950
|
c641636e184c0ec1dcc7b851bad678c898cdd05d
|
/legacy/examples/SAGPool/layers.py
|
3dfa0822ece9e564adfbc9b15e0a62e1e1f4b08d
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PGL
|
d8f0a82854a141bee1afdddd9a77bdd723c83ed8
|
7a55649d46d7ad93de31eb9b3ebf71b82d1fcffb
|
refs/heads/main
| 2023-08-17T10:33:02.425526
| 2023-08-04T02:52:06
| 2023-08-04T02:52:06
| 191,286,408
| 1,719
| 341
|
Apache-2.0
| 2023-08-04T02:52:07
| 2019-06-11T03:23:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,024
|
py
|
layers.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as L
import pgl
from pgl.graph_wrapper import GraphWrapper
from pgl.utils.logger import log
from conv import norm_gcn
from pgl.layers.conv import gcn
def topk_pool(gw, score, graph_id, ratio):
"""Implementation of topk pooling, where k means pooling ratio.
Args:
gw: Graph wrapper object.
score: The attention score of all nodes, which is used to select
important nodes.
graph_id: The graphs that the nodes belong to.
ratio: The pooling ratio of nodes we want to select.
Return:
perm: The index of nodes we choose.
ratio_length: The selected node numbers of each graph.
"""
graph_lod = gw.graph_lod
graph_nodes = gw.num_nodes
num_graph = gw.num_graph
num_nodes = L.ones(shape=[graph_nodes], dtype="float32")
num_nodes = L.lod_reset(num_nodes, graph_lod)
num_nodes_per_graph = L.sequence_pool(num_nodes, pool_type='sum')
max_num_nodes = L.reduce_max(num_nodes_per_graph, dim=0)
max_num_nodes = L.cast(max_num_nodes, dtype="int32")
index = L.arange(0, gw.num_nodes, dtype="int64")
offset = L.gather(graph_lod, graph_id, overwrite=False)
index = (index - offset) + (graph_id * max_num_nodes)
index.stop_gradient = True
# padding
dense_score = L.fill_constant(shape=[num_graph * max_num_nodes],
dtype="float32", value=-999999)
index = L.reshape(index, shape=[-1])
dense_score = L.scatter(dense_score, index, updates=score)
num_graph = L.cast(num_graph, dtype="int32")
dense_score = L.reshape(dense_score,
shape=[num_graph, max_num_nodes])
# record the sorted index
_, sort_index = L.argsort(dense_score, axis=-1, descending=True)
# recover the index range
graph_lod = graph_lod[:-1]
graph_lod = L.reshape(graph_lod, shape=[-1, 1])
graph_lod = L.cast(graph_lod, dtype="int64")
sort_index = L.elementwise_add(sort_index, graph_lod, axis=-1)
sort_index = L.reshape(sort_index, shape=[-1, 1])
# use sequence_slice to choose selected node index
pad_lod = L.arange(0, (num_graph + 1) * max_num_nodes, step=max_num_nodes, dtype="int32")
sort_index = L.lod_reset(sort_index, pad_lod)
ratio_length = L.ceil(num_nodes_per_graph * ratio)
ratio_length = L.cast(ratio_length, dtype="int64")
ratio_length = L.reshape(ratio_length, shape=[-1, 1])
offset = L.zeros(shape=[num_graph, 1], dtype="int64")
choose_index = L.sequence_slice(input=sort_index, offset=offset, length=ratio_length)
perm = L.reshape(choose_index, shape=[-1])
return perm, ratio_length
def sag_pool(gw, feature, ratio, graph_id, dataset, name, activation=L.tanh):
"""Implementation of self-attention graph pooling (SAGPool)
This is an implementation of the paper SELF-ATTENTION GRAPH POOLING
(https://arxiv.org/pdf/1904.08082.pdf)
Args:
gw: Graph wrapper object.
feature: A tensor with shape (num_nodes, feature_size).
ratio: The pooling ratio of nodes we want to select.
graph_id: The graphs that the nodes belong to.
dataset: To differentiate FRANKENSTEIN dataset and other datasets.
name: The name of SAGPool layer.
activation: The activation function.
Return:
new_feature: A tensor with shape (num_nodes, feature_size), and the unselected
nodes' feature is masked by zero.
ratio_length: The selected node numbers of each graph.
"""
if dataset == "FRANKENSTEIN":
gcn_ = gcn
else:
gcn_ = norm_gcn
score = gcn_(gw=gw,
feature=feature,
hidden_size=1,
activation=None,
norm=gw.node_feat["norm"],
name=name)
score = L.squeeze(score, axes=[])
perm, ratio_length = topk_pool(gw, score, graph_id, ratio)
mask = L.zeros_like(score)
mask = L.cast(mask, dtype="float32")
updates = L.ones_like(perm)
updates = L.cast(updates, dtype="float32")
mask = L.scatter(mask, perm, updates)
new_feature = L.elementwise_mul(feature, mask, axis=0)
temp_score = activation(score)
new_feature = L.elementwise_mul(new_feature, temp_score, axis=0)
return new_feature, ratio_length
|
a353f873272b601c3a1090f0b1c8c5f91146f789
|
04b020de6a2bc96bc8b30b369c4440e360f60e88
|
/tests/unit/server/server_tests.py
|
07dc7bf985e0d2d1008ea3df2ff22979b167b88f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"CC-BY-SA-3.0",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
reddit/baseplate.py
|
5dbfaac91dafab31078f44b3d3438669c9ddeac0
|
114248987ce0e8a0ddd102c80b00ef43f4dbf14e
|
refs/heads/develop
| 2023-09-05T15:34:26.476934
| 2023-08-07T21:52:49
| 2023-08-07T21:52:49
| 44,694,978
| 237
| 125
|
BSD-3-Clause
| 2023-09-05T19:22:27
| 2015-10-21T18:07:55
|
Python
|
UTF-8
|
Python
| false
| false
| 6,766
|
py
|
server_tests.py
|
import io
import socket
import sys
import unittest
from unittest import mock
import pytest
from baseplate import server
from baseplate.lib import config
EXAMPLE_ENDPOINT = config.EndpointConfiguration(socket.AF_INET, ("127.0.0.1", 1234))
class ParseArgsTests(unittest.TestCase):
def test_no_args(self):
with mock.patch("sys.stderr", mock.Mock()):
with self.assertRaises(SystemExit):
server.parse_args([])
def test_filename(self):
with mock.patch("argparse.FileType", autospec=True) as make_file:
args = server.parse_args(["filename"])
mock_file = make_file.return_value
self.assertEqual(mock_file.call_args, mock.call("filename"))
self.assertEqual(args.config_file, mock_file.return_value)
@mock.patch("argparse.FileType", autospec=True)
def test_options(self, make_file):
args = server.parse_args(
[
"filename",
"--debug",
"--app-name",
"app",
"--server-name",
"server",
"--bind",
"1.2.3.4:81",
]
)
self.assertTrue(args.debug)
self.assertEqual(args.app_name, "app")
self.assertEqual(args.server_name, "server")
self.assertEqual(args.bind, config.EndpointConfiguration(socket.AF_INET, ("1.2.3.4", 81)))
class MakeListenerTests(unittest.TestCase):
@mock.patch("baseplate.server.einhorn.get_socket")
@mock.patch("baseplate.server.einhorn.is_worker")
def test_einhorn_managed(self, is_worker, get_socket):
is_worker.return_value = True
listener = server.make_listener(EXAMPLE_ENDPOINT)
self.assertEqual(listener, get_socket.return_value)
@mock.patch.dict("os.environ", {}, clear=True)
@mock.patch("fcntl.fcntl")
@mock.patch("socket.socket")
def test_manually_bound(self, mocket, fcntl):
listener = server.make_listener(EXAMPLE_ENDPOINT)
self.assertEqual(mocket.call_args, mock.call(socket.AF_INET, socket.SOCK_STREAM))
self.assertEqual(listener, mocket.return_value)
self.assertEqual(listener.bind.call_args, mock.call(("127.0.0.1", 1234)))
class LoadFactoryTests(unittest.TestCase):
@mock.patch("importlib.import_module", autospec=True)
def test_full_url(self, import_module):
factory = server._load_factory("package.module:callable", "default_name")
self.assertEqual(import_module.call_args, mock.call("package.module"))
self.assertEqual(factory, import_module.return_value.callable)
@mock.patch("importlib.import_module", autospec=True)
def test_default_name(self, import_module):
factory = server._load_factory("package.module", "default_name")
self.assertEqual(import_module.call_args, mock.call("package.module"))
self.assertEqual(factory, import_module.return_value.default_name)
class CheckFnSignatureTests(unittest.TestCase):
def test_no_args(self):
def foo():
pass
with self.assertRaises(ValueError):
server._fn_accepts_additional_args(foo, [])
def test_var_args(self):
def foo(*args):
pass
server._fn_accepts_additional_args(foo, [])
server._fn_accepts_additional_args(foo, ["arg1"])
server._fn_accepts_additional_args(foo, ["arg1", "arg2"])
def test_config_arg_only(self):
def foo(app_config):
pass
server._fn_accepts_additional_args(foo, [])
with self.assertRaises(ValueError):
server._fn_accepts_additional_args(foo, ["extra_arg"])
def test_config_arg_with_var_args(self):
def foo(app_config, *args):
pass
server._fn_accepts_additional_args(foo, [])
server._fn_accepts_additional_args(foo, ["arg1"])
server._fn_accepts_additional_args(foo, ["arg1", "arg2"])
def test_additional_args(self):
def foo(app_config, args):
pass
server._fn_accepts_additional_args(foo, [])
server._fn_accepts_additional_args(foo, ["arg1"])
server._fn_accepts_additional_args(foo, ["arg1", "arg2"])
def test_additional_args_with_var_args(self):
def foo(app_config, args, *extra):
pass
server._fn_accepts_additional_args(foo, [])
server._fn_accepts_additional_args(foo, ["arg1"])
server._fn_accepts_additional_args(foo, ["arg1", "arg2"])
def test_kwargs(self):
def foo(app_config, arg1, *, bar, **kwargs):
pass
server._fn_accepts_additional_args(foo, [])
server._fn_accepts_additional_args(foo, ["arg1", "arg2", "arg3"])
server._fn_accepts_additional_args(foo, ["arg1"])
class ParseBaseplateScriptArgs(unittest.TestCase):
@mock.patch.object(sys, "argv", ["baseplate-script", "mock.ini", "package.module:callable"])
@mock.patch("baseplate.server._load_factory")
@mock.patch("builtins.open", mock.mock_open())
def test_simple_call(self, _load_factory):
args, extra_args = server._parse_baseplate_script_args()
self.assertEqual(args.app_name, "main")
self.assertEqual(extra_args, [])
@mock.patch.object(
sys, "argv", ["baseplate-script", "mock.ini", "package.module:callable", "--app-name", "ci"]
)
@mock.patch("baseplate.server._load_factory")
@mock.patch("builtins.open", mock.mock_open())
def test_specifying_app_name(self, _load_factory):
args, extra_args = server._parse_baseplate_script_args()
self.assertEqual(args.app_name, "ci")
self.assertEqual(extra_args, [])
@mock.patch.object(
sys,
"argv",
["baseplate-script", "mock.ini", "package.module:callable", "extra_arg1", "extra_arg2"],
)
@mock.patch("baseplate.server._load_factory")
@mock.patch("builtins.open", mock.mock_open())
def test_extra_args(self, _load_factory):
args, extra_args = server._parse_baseplate_script_args()
self.assertEqual(args.app_name, "main")
self.assertEqual(extra_args, ["extra_arg1", "extra_arg2"])
@mock.patch.dict("os.environ", {"FOO_FROM_ENV": "environmental"})
@pytest.mark.parametrize(
"config_text,expected",
(
("", None),
("foo = bar", "bar"),
("foo = $FOO_FROM_ENV", "environmental"),
("foo = ${FOO_FROM_ENV}", "environmental"),
("foo = ${this:is:not:valid}", "${this:is:not:valid}"),
),
)
def test_read_config(config_text, expected):
config_file = io.StringIO(f"[app:main]\n{config_text}\n")
config_file.name = "<test>"
config = server.read_config(config_file, server_name=None, app_name="main")
assert config.app.get("foo") == expected
|
6dc7c3da9af6f34bc37700d143dfebfa2e88625b
|
52245910f830dbfb2b1432ad2a967df7321ee6de
|
/panel/theme/__init__.py
|
80b6d93a7cf824640223ad2e5bfb47ea503cc1bb
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/panel
|
92c19f979353d456512abbce5a027dff6ddb3a5c
|
2c6e165e2bba96c0cb97947aa072d4429133cf7a
|
refs/heads/main
| 2023-08-17T11:28:06.581979
| 2023-08-17T11:23:09
| 2023-08-17T11:23:09
| 145,848,899
| 2,544
| 373
|
BSD-3-Clause
| 2023-09-14T17:13:31
| 2018-08-23T12:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
__init__.py
|
"""
The theme module contains Design and Theme components.
Each Design applies a coherent design system (e.g. bootstrap or
material) to a template or a set of components, while Theme objects
implement different color palettes (e.g. dark or default).
"""
from .base import ( # noqa
THEMES, DarkTheme, DefaultTheme, Design, Inherit, Theme,
)
from .bootstrap import Bootstrap
from .fast import Fast
from .material import Material
from .native import Native
__all__ = (
"THEMES",
"Bootstrap",
"DarkTheme",
"DefaultTheme",
"Design",
"Fast",
"Inherit",
"Material",
"Native",
"Theme"
)
|
04c9f14aa427f8f19fc1f37bafd6d2ee8f443f63
|
a79c7c01b97e391bcd833a8b7b1dfd7cf982d59c
|
/tests/unit/experience_replay_tests.py
|
50f61a69cae5bf0fe570de4ab1cc87cc3cc00a98
|
[
"Apache-2.0"
] |
permissive
|
learnables/cherry
|
32f3a545563e0446ad3b0243a5cc8225033c0a7f
|
f4164a53dcc762ac5ce53a761fb54f3f69847f90
|
refs/heads/master
| 2023-06-27T14:20:06.027516
| 2023-06-26T01:34:54
| 2023-06-26T01:34:54
| 159,752,575
| 185
| 33
|
Apache-2.0
| 2023-06-26T01:34:56
| 2018-11-30T01:46:38
|
Python
|
UTF-8
|
Python
| false
| false
| 21,118
|
py
|
experience_replay_tests.py
|
#!/usr/bin/env python3
import unittest
import random
import numpy as np
import torch as th
import cherry as ch
import os
import copy
NUM_SAMPLES = 100
VECTOR_SIZE = 5
def close(a, b):
return (a-b).norm(p=2) <= 1e-8
class TestExperienceReplay(unittest.TestCase):
def setUp(self):
self.replay = ch.ExperienceReplay()
def tearDown(self):
pass
def test_empty(self):
vector = np.random.rand(VECTOR_SIZE)
for i in range(NUM_SAMPLES):
self.replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
self.replay.empty()
self.assertEqual(len(self.replay._storage), 0)
def test_len(self):
vector = np.random.rand(VECTOR_SIZE)
for i in range(NUM_SAMPLES):
self.replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
self.assertEqual(len(self.replay), NUM_SAMPLES)
self.assertEqual(len(self.replay._storage), NUM_SAMPLES)
def test_add_numpy(self):
for shape in [(VECTOR_SIZE,), (1, VECTOR_SIZE)]:
vector = np.random.rand(*shape)
for i in range(NUM_SAMPLES):
self.replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
ref_size = th.Size([NUM_SAMPLES, VECTOR_SIZE])
self.assertTrue(isinstance(self.replay.state(), th.Tensor))
self.assertEqual(self.replay.state().size(), ref_size)
self.assertTrue(isinstance(self.replay.action(), th.Tensor))
self.assertEqual(self.replay.action().size(), ref_size)
self.assertTrue(isinstance(self.replay.reward(), th.Tensor))
self.assertTrue(isinstance(self.replay.next_state(), th.Tensor))
self.assertEqual(self.replay.next_state().size(), ref_size)
self.assertTrue(isinstance(self.replay.done(), th.Tensor))
self.replay.empty()
def test_add_torch(self):
for shape in [(VECTOR_SIZE, ), (1, VECTOR_SIZE)]:
vector = th.randn(*shape)
for i in range(NUM_SAMPLES):
self.replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
ref_size = th.Size([NUM_SAMPLES, VECTOR_SIZE])
self.assertTrue(isinstance(self.replay.state(), th.Tensor))
self.assertEqual(self.replay.state().size(), ref_size)
self.assertTrue(isinstance(self.replay.action(), th.Tensor))
self.assertEqual(self.replay.action().size(), ref_size)
self.assertTrue(isinstance(self.replay.reward(), th.Tensor))
self.assertTrue(isinstance(self.replay.next_state(), th.Tensor))
self.assertEqual(self.replay.next_state().size(), ref_size)
self.assertTrue(isinstance(self.replay.done(), th.Tensor))
self.replay.empty()
def test_slice(self):
# Fill replay
count = 0
for shape in [(VECTOR_SIZE, ), (1, VECTOR_SIZE)]:
vector = th.randn(*shape)
for i in range(NUM_SAMPLES):
count += 1
self.replay.append(vector,
vector,
i,
vector,
random.choice([False, True]),
vector=vector,
id=count)
subsample = self.replay[0:len(self.replay)//2]
self.assertTrue(isinstance(subsample, ch.ExperienceReplay))
self.assertEqual(len(subsample), len(self.replay)//2)
self.assertTrue(isinstance(self.replay[0], ch.Transition))
subsample = self.replay[-1:]
self.assertEqual(len(subsample), 1)
def test_sample(self):
# Test empty
sample = self.replay.sample()
self.assertEqual(len(sample), 0)
self.assertTrue(isinstance(self.replay, ch.ExperienceReplay))
# Fill replay
count = 0
for shape in [(VECTOR_SIZE, ), (1, VECTOR_SIZE)]:
vector = th.randn(*shape)
for i in range(NUM_SAMPLES):
count += 1
self.replay.append(vector,
vector,
i,
vector,
random.choice([False, True]),
vector=vector,
id=count)
for _ in range(30):
# Test default arguments
sample = self.replay.sample()
self.assertEqual(len(sample), 1)
# Test size
sample = self.replay.sample(size=NUM_SAMPLES//2)
self.assertEqual(len(sample), NUM_SAMPLES//2)
# Test contiguous
sample = self.replay.sample(size=NUM_SAMPLES//3,
contiguous=True)
ids = sample.id()
for i, id in enumerate(ids[:-1]):
self.assertEqual(id + 1, ids[i+1])
# Test single episode
sample = self.replay.sample(size=1, episodes=True)
self.assertTrue(bool(sample[-1].done.item()))
for i, sars in enumerate(sample[:-1]):
self.assertTrue(not sample[i].done)
self.assertEqual(sars.id + 1,
sample[i+1].id)
# Test multiple episodes
total_episodes = self.replay.done().sum().int().item()
for num_episodes in [total_episodes, total_episodes//2, 1]:
sample = self.replay.sample(size=num_episodes,
episodes=True)
num_sampled_episodes = sample.done().sum().int().item()
self.assertEqual(num_sampled_episodes, num_episodes)
# Test multiple contiguous episodes
total_episodes = self.replay.done().sum().int().item()
for num_episodes in [total_episodes, total_episodes//2, 1]:
sample = self.replay.sample(size=num_episodes,
episodes=True,
contiguous=True)
num_sampled_episodes = sample.done().sum().int().item()
self.assertEqual(num_sampled_episodes, num_episodes)
for i, sars in enumerate(sample[:-1]):
if not sars.done:
self.assertEqual(sample[i].id+1,
sample[i+1].id)
def test_append(self):
new_replay = ch.ExperienceReplay()
vector = np.random.rand(VECTOR_SIZE)
for i in range(NUM_SAMPLES):
self.replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
new_replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
self.assertEqual(len(self.replay), len(new_replay))
new_replay = self.replay + new_replay
self.assertEqual(NUM_SAMPLES * 2, len(new_replay))
self.replay += new_replay
self.assertEqual(NUM_SAMPLES * 3, len(self.replay))
def test_save_and_load(self):
old_replay = self.replay
vector = np.random.rand(VECTOR_SIZE)
for i in range(NUM_SAMPLES):
old_replay.append(vector,
vector,
i,
vector,
False,
vector=vector)
# save the old file
old_replay.save('testing_temp_file.pt')
# load the saved file to a new file
new_replay = ch.ExperienceReplay()
new_replay.load('testing_temp_file.pt')
# check size
self.assertEqual(len(old_replay._storage),
len(new_replay._storage))
self.assertEqual(len(old_replay.state()),
len(new_replay.state()))
self.assertEqual(len(old_replay.action()),
len(new_replay.action()))
self.assertEqual(len(old_replay.reward()),
len(new_replay.reward()))
self.assertEqual(len(old_replay.next_state()),
len(new_replay.next_state()))
self.assertEqual(len(old_replay.done()),
len(new_replay.done()))
self.assertEqual(len(old_replay.vector()),
len(new_replay.vector()))
# check content
for a, b in zip(old_replay, new_replay):
self.assertTrue(close(a.state, b.state))
self.assertTrue(close(a.action, b.action))
self.assertTrue(close(a.reward, b.reward))
self.assertTrue(close(a.next_state, b.next_state))
self.assertTrue(close(a.done, b.done))
self.assertTrue(close(a.vector, b.vector))
os.remove('testing_temp_file.pt')
def test_replay_myattr(self):
standard_replay = self.replay
vector = np.random.rand(VECTOR_SIZE)
# a random tensor to be stuffed in
test_tensor = th.randn(3, 3, dtype=th.double)
# initialization, stuff just tensors in
# and the results type should still be tensor
for i in range(NUM_SAMPLES):
standard_replay.append(vector,
vector,
i,
vector,
False,
test=test_tensor)
self.assertTrue(isinstance(standard_replay.test(), th.Tensor))
def test_slices(self):
for i in range(NUM_SAMPLES):
self.replay.append(th.randn(VECTOR_SIZE),
th.randn(VECTOR_SIZE),
i,
th.randn(VECTOR_SIZE),
False,
vector=th.randn(VECTOR_SIZE))
sliced = self.replay[0:-3]
self.assertEqual(len(sliced), len(self.replay) - 3)
for sars, sars_ in zip(self.replay, sliced):
self.assertTrue(close(sars.state, sars_.state))
self.assertTrue(close(sars.action, sars_.action))
self.assertTrue(close(sars.reward, sars_.reward))
self.assertTrue(close(sars.next_state, sars_.next_state))
self.assertTrue(close(sars.vector, sars_.vector))
def test_to_device(self):
for i in range(NUM_SAMPLES):
self.replay.append(th.randn(VECTOR_SIZE),
th.randn(VECTOR_SIZE),
i,
th.randn(VECTOR_SIZE),
False,
vector=th.randn(VECTOR_SIZE))
# Test function calls
replay = self.replay.to(None)
self.assertEqual(len(replay), len(self.replay))
replay = self.replay.to('cpu')
self.assertEqual(len(replay), len(self.replay))
replay = self.replay.cpu()
self.assertEqual(len(replay), len(self.replay))
for cr, sr in zip(replay, self.replay):
self.assertTrue(close(cr.state, sr.state))
self.assertTrue(close(cr.action, sr.action))
self.assertTrue(close(cr.next_state, sr.next_state))
self.assertTrue(close(cr.reward, sr.reward))
self.assertTrue(close(cr.vector, sr.vector))
self.assertTrue(close(cr.vector, sr.vector))
# Test cuda
if th.cuda.is_available():
cuda_replay = self.replay.cuda()
self.assertEqual(len(cuda_replay), len(self.replay))
for cr, sr in zip(cuda_replay, self.replay):
self.assertTrue(close(cr.state, sr.state.cuda()))
self.assertTrue(close(cr.action, sr.action.cuda()))
self.assertTrue(close(cr.next_state, sr.next_state.cuda()))
self.assertTrue(close(cr.reward, sr.reward.cuda()))
self.assertTrue(close(cr.vector, sr.vector.cuda()))
self.assertTrue(close(cr.vector, sr.vector.cuda()))
replay = cuda_replay.to('cpu')
self.assertEqual(len(replay), len(self.replay))
for cr, sr in zip(replay, self.replay):
self.assertTrue(close(cr.state, sr.state))
self.assertTrue(close(cr.action, sr.action))
self.assertTrue(close(cr.next_state, sr.next_state))
self.assertTrue(close(cr.reward, sr.reward))
self.assertTrue(close(cr.vector, sr.vector))
self.assertTrue(close(cr.vector, sr.vector))
def test_to_dtype(self):
for i in range(NUM_SAMPLES):
self.replay.append(th.randn(VECTOR_SIZE),
th.randn(VECTOR_SIZE),
i,
th.randn(VECTOR_SIZE),
False,
vector=th.randn(VECTOR_SIZE))
f32 = self.replay.to(th.float32)
f64 = self.replay.to(th.float64)
i32 = self.replay.to(th.int32)
i64 = self.replay.to(th.int64)
def test_half_double(self):
for i in range(NUM_SAMPLES):
self.replay.append(th.randn(VECTOR_SIZE),
th.randn(VECTOR_SIZE),
i,
th.randn(VECTOR_SIZE),
False,
vector=th.randn(VECTOR_SIZE))
half = self.replay.half()
half_dtype = self.replay[0].state.half().dtype
for sars in half:
self.assertTrue(sars.state.dtype == half_dtype)
double = self.replay.double()
double_dtype = self.replay[0].state.double().dtype
for sars in double:
self.assertTrue(sars.state.dtype == double_dtype)
if th.cuda.is_available():
cuda_replay = self.replay.cuda()
half = cuda_replay.half()
half_dtype = cuda_replay[0].state.half().dtype
for sars in half:
self.assertTrue(sars.state.dtype == half_dtype)
double = cuda_replay.double()
double_dtype = cuda_replay[0].state.double().dtype
for sars in double:
self.assertTrue(sars.state.dtype == double_dtype)
def test_flatten(self):
def original_flatten(replay): # slow but correct
if not replay.vectorized:
return replay
flat_replay = ch.ExperienceReplay(device=replay.device, vectorized=False)
for sars in replay._storage:
for i in range(sars.done.shape[0]):
for field in sars._fields:
if getattr(sars, field) is None:
__import__('pdb').set_trace()
transition = {
field: getattr(sars, field)[i] for field in sars._fields
}
# need to add dimension back because of indexing above.
transition = {
k: v.unsqueeze(0)
if ch._utils._istensorable(v) else v
for k, v in transition.items()
}
flat_replay.append(**transition)
return flat_replay
num_envs = 8
batch_size = 2^5
replay_size = 2^6
s_shape = (num_envs, 9, 84, 84)
a_shape = (num_envs, 84)
for device in ['cpu', 'cuda']:
if not th.cuda.is_available() and device == 'cuda':
continue
# generate data
replay = ch.ExperienceReplay(vectorized=True)
for step in range(replay_size):
action = th.randn(*a_shape)
state = th.randn(*s_shape)
done = th.randint(low=0, high=1, size=(num_envs, 1))
reward = th.randn((num_envs, 1))
info = {
'success': [0.0, ] * num_envs,
'numpy': np.random.randn(num_envs, 23, 4)
}
replay.append(state, action, reward, state, done, **info)
replay.to(device)
# test the two flatten are identical
for batch in [replay, replay.sample(batch_size)]:
b1 = original_flatten(batch)
b2 = batch.flatten()
for sars1, sars2 in zip(b1, b2):
for field in sars1._fields:
val1 = getattr(sars1, field)
val2 = getattr(sars2, field)
self.assertTrue(
(val1.double() - val2.double()).norm().item() < 1e-8,
'flatten values mismatch',
)
self.assertTrue(
val1.shape == val2.shape,
'flatten shape mismatch',
)
self.assertTrue(
val1.device == val2.device,
'flatten device misatch',
)
def test_nsteps(self):
episode_length = 10
num_episodes = 20
tensor = th.ones(10)
replay = ch.ExperienceReplay()
for i in range(1, 1+(num_episodes * episode_length)):
replay.append(
state=tensor * i,
action=tensor * i,
reward=i,
next_state=tensor * i,
done=bool(i % episode_length == 0),
extra1=tensor + 1,
extra2=tensor + 2,
extra3=tensor + 3,
idx=i-1,
)
for bsz in [0, 1, 16]:
for nsteps in [1, 3, 15]:
for contiguous in [False, True]:
for episodes in [False, True]:
for discount in [0.0, 0.5, 1.0, 1]:
batch = replay.sample(
size=bsz,
contiguous=contiguous,
episodes=episodes,
nsteps=nsteps,
discount=discount,
)
# test basic things
length = bsz * episode_length if episodes else bsz
self.assertEqual(len(batch), length)
if episodes:
num_eps = sum([replay[sars.idx.int().item()].done for sars in batch])
self.assertEqual(bsz, num_eps)
for i, sars in enumerate(batch):
self.assertTrue(close(sars.extra1, tensor+1))
self.assertTrue(close(sars.extra2, tensor+2))
self.assertTrue(close(sars.extra3, tensor+3))
if contiguous and i < length - 1:
self.assertTrue(batch[i].idx + 1 == batch[i+1].idx)
# test next_state, done, discounting works
for sars in batch:
idx = sars.idx.int().item()
sars_reward = 0.0
for n in range(nsteps):
next_sars = replay[idx+n]
sars_reward = sars_reward + discount**n * next_sars.reward.item()
if next_sars.done:
break
self.assertTrue(close(sars.next_state, next_sars.next_state))
self.assertTrue(close(sars.done, next_sars.done))
self.assertTrue(close(sars.reward, sars_reward))
if __name__ == '__main__':
unittest.main()
|
45973df7debe7c5b0f52bf00a6989cb4cb83faa6
|
87d9e6c0606ec9b18d2016fe4e21601c7b37b216
|
/src/postmarker/models/domains.py
|
b44280d3722b5ebb73658ae65d9e02f106cd3caf
|
[
"Python-2.0",
"MIT"
] |
permissive
|
Stranger6667/postmarker
|
d693f1b4d03f8770c5415048ad896601ba618a04
|
c5717014b6c837502353a8a5361832cb3bc49d7b
|
refs/heads/master
| 2023-06-22T01:57:01.797338
| 2022-01-15T14:09:43
| 2022-01-15T14:09:43
| 69,878,276
| 116
| 28
|
MIT
| 2023-06-20T20:17:31
| 2016-10-03T14:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,119
|
py
|
domains.py
|
from .base import Model, ModelManager
class Domain(Model):
def __str__(self):
return "{}: {} ({})".format(
self.__class__.__name__,
self._data.get("Name"),
self._data.get("ID"),
)
def get(self):
new_instance = self._manager.get(self.ID)
self._data = new_instance._data
return self
def edit(self, **kwargs):
response = self._manager.edit(self.ID, **kwargs)
self._update(response)
def delete(self):
return self._manager.delete(self.ID)
def verifyspf(self):
return self._manager.verifyspf(self.ID)
def verifydkim(self):
return self._manager.verifydkim(self.ID)
def verifyreturnpath(self):
return self._manager.verifyreturnpath(self.ID)
def rotatedkim(self):
return self._manager.rotatedkim(self.ID)
class DomainsManager(ModelManager):
name = "domains"
model = Domain
token_type = "account"
def get(self, id):
response = self.call("GET", "/domains/%s" % id)
return self._init_instance(response)
def create(self, Name, ReturnPathDomain=None):
data = {"Name": Name, "ReturnPathDomain": ReturnPathDomain}
return self._init_instance(self.call("POST", "/domains", data=data))
def edit(self, id, ReturnPathDomain):
data = {"ReturnPathDomain": ReturnPathDomain}
return self.call("PUT", "/domains/%s" % id, data=data)
def all(self, count=500, offset=0):
responses = self.call_many("GET", "/domains", count=count, offset=offset)
return self.expand_responses(responses, "Domains")
def delete(self, id):
return self.call("DELETE", "/domains/%s" % id)["Message"]
def verifyspf(self, id):
return self.call("POST", "/domains/%s/verifyspf" % id)
def verifydkim(self, id):
return self.call("PUT", "/domains/%s/verifyDkim" % id)
def verifyreturnpath(self, id):
return self.call("PUT", "/domains/%s/verifyReturnPath" % id)
def rotatedkim(self, id):
return self.call("POST", "/domains/%s/rotatedkim" % id)
|
d2c778f4bf398e89bae51307d1abf0d9dd4563dd
|
b4faab9b904d155ce6e781a675f972dcb810c008
|
/nunif/modules/charbonnier_loss.py
|
4149bd784782a6f99ca7426aef64ee5a97c2ca5b
|
[
"MIT",
"CC-BY-NC-4.0",
"Apache-2.0"
] |
permissive
|
nagadomi/nunif
|
0c595d3e61f3c89082ce7481cfba139b85ac863d
|
6d4b92da09801572e984b05f6733d460b60250aa
|
refs/heads/master
| 2023-08-31T21:29:56.460275
| 2023-08-21T18:16:01
| 2023-08-21T18:16:01
| 202,088,108
| 486
| 59
|
MIT
| 2023-08-04T05:51:17
| 2019-08-13T07:23:32
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
charbonnier_loss.py
|
from torch import nn
import torch
def charbonnier_loss(input, target, reduction="mean", eps=1.0e-6):
loss = torch.sqrt(((input - target) ** 2) + eps ** 2)
if reduction is None or reduction == "none":
return loss
elif reduction == "mean":
return loss.mean()
else:
return loss.sum()
class CharbonnierLoss(nn.Module):
def __init__(self, eps=1e-6, reduction="mean"):
super().__init__()
self.eps = eps
self.reduction = reduction
def forward(self, input, target):
return charbonnier_loss(input, target, eps=self.eps, reduction=self.reduction)
|
02145d5e509634892ea13491ac95d3e4d6f67dbc
|
74164811083a502d294d970ade1ec81aa49e130c
|
/MicroTokenizer/ensemble/merge_solutions.py
|
8f8e4337ab4bdf24c09c72173d5c395ace888f04
|
[
"MIT"
] |
permissive
|
howl-anderson/MicroTokenizer
|
51d89a52edd84f863e8f06426a8da5517b2b9c1a
|
f0ad19ee42fc947f432dfcbe687a474ccc78c7c8
|
refs/heads/master
| 2023-01-05T12:42:27.294464
| 2021-09-28T10:32:02
| 2021-09-28T10:32:02
| 137,062,055
| 153
| 19
|
MIT
| 2022-12-26T20:40:49
| 2018-06-12T11:28:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
merge_solutions.py
|
import networkx as nx
class MergeSolutions(object):
def __init__(self):
self.G = nx.DiGraph()
self.start_node = "<start>"
self.G.add_node(self.start_node, label=self.start_node)
self.end_node = "<end>"
self.G.add_node(self.end_node, label=self.end_node)
self.existing_nodes = {self.start_node, self.end_node}
self.existing_checkpoint_notes = set()
def merge(self, candidate_token_list):
for token_list in candidate_token_list:
index = 0
# default previous node is start node
previous_node = self.start_node
current_node = None
current_checkpoint_node = None
for token in token_list:
current_node = "{}_{}".format(index, token)
# update immediately
index += len(token)
current_checkpoint_node = "ck_{}".format(index)
if current_node not in self.existing_nodes:
self.G.add_node(current_node, token=token, label=token)
self.existing_nodes.add(current_node)
if current_checkpoint_node not in self.existing_checkpoint_notes:
self.G.add_node(
current_checkpoint_node, token="", label=current_checkpoint_node
)
self.existing_checkpoint_notes.add(current_checkpoint_node)
self.G.add_edge(previous_node, current_node, weight=1)
self.G.add_edge(current_node, current_checkpoint_node, weight=0)
# update variable
previous_node = current_checkpoint_node
# link last token to end node
self.G.add_edge(current_checkpoint_node, self.end_node, weight=1)
raw_shortest_path_nodes = nx.shortest_path(
self.G, source=self.start_node, target=self.end_node
)
# remove start and end nodes
shortest_path_nodes = raw_shortest_path_nodes[1:-1]
# remove all the checkpoint node
cleaned_shortest_path_nodes = filter(
lambda x: self.G.nodes.get(x).get("token"), shortest_path_nodes
)
# extract tokens
best_solution_tokens = list(
map(lambda x: self.G.nodes.get(x)["token"], cleaned_shortest_path_nodes)
)
return best_solution_tokens
def write_graph(self, graph_path):
nx.write_graphml(self.G, graph_path)
if __name__ == "__main__":
solutions = [
["王小明", "来到", "了", "网易", "杭", "研", "大厦"],
["王", "小明", "来到", "了", "网易", "杭研", "大", "厦"],
]
merge_solutions = MergeSolutions()
best_solution = merge_solutions.merge(solutions)
print(best_solution)
merge_solutions.write_graph("./test.graphml")
|
53c4e89784ea79247256eedb1dc4d6ed13ea8b66
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_run/ascend/prelu_run.py
|
ac8101af8ba5acba9887263f551f12d6db146a04
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
prelu_run.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from akg.utils import kernel_exec as utils
import numpy as np
from akg.topi.util import get_const_tuple
from tests.common.test_op.ascend import prelu
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
def prelu_run(shape, w_shape, dtype, rtol, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input_data, w_data = gen_data(dtype, shape, w_shape)
return mod, expect, (input_data, w_data, expect)
else:
return mod
else:
mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name='prelu', attrs=attrs)
expect, input_data, w_data = gen_data(dtype, shape, w_shape)
output = utils.mod_launch(mod, (input_data, w_data, expect), expect=expect)
# #ctx.sync()
# reshape_output = output_b.reshape(output_b.size)
# reshape_output_np = output_np.reshape(output_np.size)
# errorcount = 0
# for i in range(reshape_output.size):
# limitError = abs(reshape_output[i] * rtol)
# if abs(reshape_output[i] - reshape_output_np[i]) > limitError:
# errorcount += 1
return (input_data, w_data), output, expect, compare_tensor(output, expect, rtol=rtol)
def gen_data(dtype, shape, w_shape):
# input_data = random_gaussian(shape, miu=1, sigma=50.0).astype(dtype.lower())
input_data = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(shape)).astype(dtype)
w_data = random_gaussian(w_shape, miu=1, sigma=2.0).astype(dtype.lower())
# expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0]
if w_shape[0] == 1:
# pass
expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0]
else:
w_reshape = w_data.reshape(1, w_shape[0], 1, 1)
w_broadcast = np.broadcast_to(w_reshape, shape)
expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_broadcast
# pass
# for j in range(shape[1]):
# for i in range(shape[0]):
# for k in range(shape[2]):
# for l in range(shape[3]):
# expect[i, j, k, l] = input_data[i, j, k, l] * (input_data[i, j, k, l] > 0) + input_data[i, j, k, l] * (input_data[i, j, k, l] < 0) * w_data[j]
return expect, input_data, w_data
|
f83267d1fc636e99cbf1b3b104a834af0a1b3017
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/automation/test_recorder.py
|
4aa84dbd6027eeee50828515f325a0225861adaf
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
test_recorder.py
|
"""The tests for automation recorder."""
from __future__ import annotations
import pytest
from homeassistant.components import automation
from homeassistant.components.automation import (
ATTR_CUR,
ATTR_LAST_TRIGGERED,
ATTR_MAX,
ATTR_MODE,
CONF_ID,
)
from homeassistant.components.recorder import Recorder
from homeassistant.components.recorder.history import get_significant_states
from homeassistant.const import ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import async_mock_service
from tests.components.recorder.common import async_wait_recording_done
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_exclude_attributes(
recorder_mock: Recorder, hass: HomeAssistant, calls
) -> None:
"""Test automation registered attributes to be excluded."""
now = dt_util.utcnow()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
await hass.async_block_till_done()
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID)
await async_wait_recording_done(hass)
states = await hass.async_add_executor_job(
get_significant_states, hass, now, None, hass.states.async_entity_ids()
)
assert len(states) == 1
for entity_states in states.values():
for state in entity_states:
assert ATTR_LAST_TRIGGERED not in state.attributes
assert ATTR_MODE not in state.attributes
assert ATTR_CUR not in state.attributes
assert CONF_ID not in state.attributes
assert ATTR_MAX not in state.attributes
assert ATTR_FRIENDLY_NAME in state.attributes
|
4212a4edb1893a80b545e6994ddf43b1c2e9501a
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/collections/nlp/models/language_modeling/megatron_gpt_peft_models.py
|
c32c9a8c5d237090ba4c325076c309b770897bb5
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 24,598
|
py
|
megatron_gpt_peft_models.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
LoraKQVAdapterConfig,
LoraKQVAdapterWeightTyingConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
ParallelLinearAdapterWeightTyingConfig,
PromptEncoderAdapterConfig,
)
from nemo.core.classes.mixins import adapter_mixins
from nemo.utils import logging, model_utils
class MegatronGPTPEFTModel(MegatronGPTSFTModel):
"""
base class for all mixin based adapter models
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.setup_complete = False
self.base_keys = self.get_all_keys()
self.freeze()
self.init_peft_modules()
self.adapter_keys = self.get_all_keys() - self.base_keys
def first_stage_of_pipeline(self):
if hasattr(self, "model") and hasattr(self.model, "pre_process"):
return self.model.pre_process
logging.warning("no attribute named model or no model.pre_process found. Can not detect stage of pipeline...")
return False
def init_peft_modules(self):
"""
Randomly initialize the peft params and add them to the appropriate modules.
"""
assert len(self.peft_name_keys) > 0, "peft_name_keys have not been set no PEFT modules will be added"
assert len(self.name_key_to_cfg) > 0, "name_key_to_cfg has not been set no PEFT modules will be added"
logging.info(f"Before adding PEFT params:\n{self.summarize()}")
for _, module in self.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for peft_key in self.peft_name_keys:
peft_cfg = self.name_key_to_cfg[peft_key]
if model_utils.import_class_by_path(peft_cfg._target_) in module.get_accepted_adapter_types():
module.add_adapter(
name=peft_key, cfg=peft_cfg,
)
logging.info(f"After adding PEFT params:\n{self.summarize()}")
return True
def setup(self, stage=None):
super().setup(stage)
self.setup_complete = True
def get_all_keys(self,):
"""
Returns all the keys in the model
"""
k = [n for n, p in self.named_parameters()]
return set(k)
def get_peft_state_dict(self,):
"""
Gets the keys associated with the adapters only.
"""
state_dict = self.model.state_dict(prefix="model.")
peft_state_dict = {}
for k in self.adapter_keys:
peft_state_dict[k] = state_dict[k]
return peft_state_dict
def state_dict(self, destination=None, prefix=None, keep_vars=False):
if self.setup_complete:
# Once setup is complete we no longer need to track the frozen part of the model. Only there adapter state dict keeps changing so state_dict only track these.
return self.get_peft_state_dict()
else:
# we want all the params with the same keys as calling self.state_dict()
# but we can't call self.state_dict() here as it would be a recursive call.
# so we call self.model.state_dict(prefix="model.") which will return all the keys and params same as calling self.state_dict()
return self.model.state_dict(prefix="model.")
def load_state_dict(self, state_dict, strict: bool = True):
if self.setup_complete:
# at this stage only PEFT params will appear in the state_dict arg
# so we only update those while the rest of the model is frozen.
# setting strict=False will ignore the missing keys (which are not being updated anyway)
# explicitly check if state_dict.keys matches all the expected self.adapter_keys since we don't have the
# safety in strict=True anymore.
assert set(state_dict.keys()) == self.adapter_keys
super().load_state_dict(state_dict, strict=False)
else:
super().load_state_dict(state_dict, strict=True)
def setup_optimizer_param_groups(self):
"""
ModelPT override. Optimizer will get self._optimizer_param_groups.
Makes two optimizer param groups, one for the frozen model params
and one for the prompt-table/prompt-encoder params. The learning
rate for the frozen model's params will always be zero effectively
freezing the model's params but still allowing for the needed gradients
to be passed around in pipeline parallel models. The prompt-encoder
and/or prompt table will use the learning rate set by the user.
"""
self.freeze() # Freeze the entire model
opt_params = []
for _, module in self.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():
module.set_enabled_adapters(enabled=True)
module.unfreeze_enabled_adapters() # selectively unfreeze the adapter modules.
opt_params += [p for p in module.parameters() if p.requires_grad]
self._optimizer_param_groups = ({"params": opt_params},)
logging.info(f"Optimizer groups set:\n{self.summarize()}")
class MegatronGPTLayerwisePEFTModel(MegatronGPTPEFTModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
super().__init__(cfg, trainer)
def init_peft_modules(self):
"""
Randomly initialize the peft params and add them to the appropriate modules.
"""
assert len(self.peft_name_keys) > 0, "peft_name_keys have not been set no PEFT modules will be added"
assert len(self.name_key_to_cfg) > 0, "name_key_to_cfg has not been set no PEFT modules will be added"
logging.info(f"Before adding PEFT params:\n{self.summarize()}")
for layer in self.model.language_model.encoder.layers:
if layer.layer_number in self.layer_selection:
for _, module in layer.named_modules():
if isinstance(module, adapter_mixins.AdapterModuleMixin):
for peft_key in self.peft_name_keys:
peft_cfg = self.name_key_to_cfg[peft_key]
if (
model_utils.import_class_by_path(peft_cfg._target_)
in module.get_accepted_adapter_types()
):
module.add_adapter(
name=peft_key, cfg=peft_cfg,
)
logging.info(f"After adding PEFT params:\n{self.summarize()}")
return True
class MegatronGPTAdapterModel(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTAdapterLearningModel is a model that combines a base model (GPTSFTModel) with a adapters.
This class only supports the canonical Adapter training described in Houlsby et al. (https://arxiv.org/pdf/1902.00751.pdf)
Two adapter's are inserted into each Transformer layer in the base GPT Model.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
]
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.layer_selection = adapter_tuning_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
class MegatronGPTAdapterModelWeightTying(MegatronGPTLayerwisePEFTModel):
"""
TODO
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
]
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterWeightTyingConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
num_position_embeddings=cfg.num_layers * 2,
dim_position_embeddings=cfg.hidden_size,
position_embedding_strategy=adapter_tuning_cfg.get("position_embedding_strategy", None),
)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.layer_selection = adapter_tuning_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
self.tie_weights()
def tie_weights(self,):
pos_idx = 0
layer0 = self.model.language_model.encoder.layers[0]
for adapter_name in layer0.adapter_layer:
adapter = layer0.get_adapter_module(adapter_name)
print(adapter_name, pos_idx)
adapter.set_position(pos_idx)
pos_idx += 1
for layer in self.model.language_model.encoder.layers[1:]:
for adapter_name in layer.adapter_layer:
print(adapter_name, pos_idx)
adapter_l = layer.get_adapter_module(adapter_name)
adapter_0 = layer0.get_adapter_module(adapter_name)
if hasattr(adapter_0, "layer_norm"):
lnorm = adapter_0.layer_norm
else:
lnorm = None
adapter_l.tie_weights(pos_idx, adapter_0)
pos_idx += 1
class MegatronGPTIA3Model(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTInfusedAdapterModel is a model that combines a base model (GPTSFTModel) with a "Infused Adapter that can Inhibiting and Amplify Inner Activations", known as IA3.
This class supports the addition of IA3 into a transformer based LM as described in Liu et al. (https://arxiv.org/pdf/2205.05638.pdf)
Three adapter's are inserted into each Transformer layer in the base GPT Model. Each adapter is basically a vector that simply scales the key, value or ffn hidden representations.
It is assumed that these set of adapters will then be trained for a specific task.
Once trained, the adapter weights will be saved and can be re-loaded
and infused into the same GPT Model for inference.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [AdapterName.KEY_INFUSED, AdapterName.VALUE_INFUSED, AdapterName.MLP_INFUSED]
mlp_infused_adapter_cfg = MLPInfusedAdapterConfig(
in_features=cfg.ffn_hidden_size // cfg.tensor_model_parallel_size
)
infused_adapter_cfg = InfusedAdapterConfig(in_features=cfg.hidden_size // cfg.tensor_model_parallel_size)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
if k == AdapterName.MLP_INFUSED:
self.name_key_to_cfg[k] = mlp_infused_adapter_cfg
elif k in [
AdapterName.KEY_INFUSED,
AdapterName.VALUE_INFUSED,
]:
self.name_key_to_cfg[k] = infused_adapter_cfg
else:
raise ValueError(f"PEFT Key {k} is unknown.")
super().__init__(cfg, trainer)
class MegatronGPTPTuningModel(MegatronGPTPEFTModel):
"""
MegatronGPTPTuningModel is a model that combines a base model (GPTSFTModel) with a p-tuning prefix in the
input word embedding representations using a prompt-encoder as descripted in Liu et al. https://arxiv.org/pdf/2103.10385.pdf
The mixin framework adds the output of prompt-encoder (i.e. the virtual embeddings) inside
nemo/collections/nlp/modules/common/megatron/language_model.py
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [AdapterName.PTUNING_ADAPTER]
adapter_cfg = PromptEncoderAdapterConfig(
cfg.peft.p_tuning.virtual_tokens,
cfg.peft.p_tuning.bottleneck_dim,
cfg.peft.p_tuning.embedding_dim,
cfg.peft.p_tuning.init_std,
cfg.hidden_size,
)
self.name_key_to_cfg = {AdapterName.PTUNING_ADAPTER: adapter_cfg}
super().__init__(cfg, trainer)
self.virtual_tokens = cfg.peft.p_tuning.virtual_tokens
self.trainable_keys = self.adapter_keys - set(
[
"model.language_model.adapter_layer.ptuning_adapter.inference_table.prompt_table.taskname.prompt_embeddings.weight"
]
)
# we exclude the above parameter from training because it is present for backward compatibility for inference using FasterTransformer (@adithyare)
def init_peft_modules(self,):
"""
Initialize the p-tuning prompt encoder in the mixin.
This should only happen in the first stage of the pipeline unlike other PEFT methods like Lora or Adapters
because p-tuning only adds params at input to the encoder layer.
"""
if not self.first_stage_of_pipeline():
# There are no params to add if we are not in the first state of the pipeline
return True
super().init_peft_modules()
return True
def state_dict(self, destination=None, prefix=None, keep_vars=False):
"""
Reimplement state_dict for ptuning because we also need to check the stage of the pipeline.
The check is required to make pp>1 to work.
"""
if self.setup_complete:
if self.first_stage_of_pipeline():
return self.get_peft_state_dict()
# if we are not in the first state of pipeline after setup is done
# there should be no params in the state_dict
return {}
else:
return self.model.state_dict(prefix="model.")
def load_state_dict(self, state_dict, strict: bool = True):
"""
Reimplement load_state_dict for ptuning because we also need to check the stage of the pipeline.
The check is required to make pp>1 to work.
"""
if self.setup_complete:
if self.first_stage_of_pipeline():
# if we are not in the first state of pipeline after setup is done
# there should be no params to load...
assert set(state_dict.keys()) == self.adapter_keys
super().load_state_dict(state_dict, strict=False)
else:
super().load_state_dict(state_dict, strict=True)
def setup_optimizer_param_groups(self):
if self.first_stage_of_pipeline():
# super().setup_optimizer_param_groups()
self.freeze() # Freeze the entire model
opt_params = []
for n, p in self.named_parameters():
if n in self.trainable_keys:
p.requires_grad = True
opt_params.append(p)
self._optimizer_param_groups = ({"params": opt_params},)
else:
self.freeze() # Freeze the entire model
self._optimizer_param_groups = ({"params": []},)
logging.info(f"Optimizer groups set:\n{self.summarize()}")
class MegatronGPTAdapterPTuningModel(MegatronGPTPEFTModel):
"""
Want to combine adapters and p-tuning? Why not? they are orthogonal methods.
This class includes both sets of params.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
self.peft_name_keys = [
AdapterName.PRE_ATTN_ADAPTER,
AdapterName.POST_ATTN_ADAPTER,
AdapterName.PTUNING_ADAPTER,
]
ptuning_cfg = PromptEncoderAdapterConfig(
cfg.peft.p_tuning.virtual_tokens,
cfg.peft.p_tuning.bottleneck_dim,
cfg.peft.p_tuning.embedding_dim,
cfg.peft.p_tuning.init_std,
cfg.hidden_size,
)
adapter_tuning_cfg = cfg.peft.adapter_tuning
adapter_cfg = ParallelLinearAdapterConfig(
in_features=cfg.hidden_size,
out_features=cfg.hidden_size,
dim=adapter_tuning_cfg.adapter_dim,
norm_position=adapter_tuning_cfg.get("norm_position", "pre"),
norm_type=adapter_tuning_cfg.get("norm_type", "mixedfusedlayernorm"),
column_init_method=adapter_tuning_cfg.get("column_init_method", "xavier"),
row_init_method=adapter_tuning_cfg.get("row_init_method", "zero"),
dropout=adapter_tuning_cfg.adapter_dropout,
)
self.name_key_to_cfg = {
AdapterName.PRE_ATTN_ADAPTER: adapter_cfg,
AdapterName.POST_ATTN_ADAPTER: adapter_cfg,
AdapterName.PTUNING_ADAPTER: ptuning_cfg,
}
super().__init__(cfg, trainer)
self.virtual_tokens = cfg.peft.p_tuning.virtual_tokens
class MegatronGPTLoRAModel(MegatronGPTLayerwisePEFTModel):
"""
MegatronGPTLoRAModel is a model that combines a base model (GPTSFTModel) with a low-rank adapters.
The lora adapters will be added in `nemo/collections/nlp/modules/common/megatron/attention.py`
The implementation is based on Hu et al. nemo/collections/nlp/modules/common/megatron/attention.py
A single low-rank feedfowrad layer is used in parallel with the KQV projection layer.
TODO: Add support to also include an option to adda low-rank adapter in the output projection layer.
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.LORA_KQV_ADAPTER,
]
lora_cfg = cfg.peft.lora_tuning
if cfg.get("kv_channels", None) is None:
assert (
cfg.hidden_size % cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = cfg.hidden_size // cfg.num_attention_heads
else:
kv_channels = cfg.kv_channels
projection_size = kv_channels * cfg.num_attention_heads
adapter_cfg = LoraKQVAdapterConfig(
in_features=cfg.hidden_size,
out_features=3 * projection_size,
dim=lora_cfg.adapter_dim,
norm_position=None,
norm_type=None,
activation="identity",
column_init_method=lora_cfg.get("column_init_method", "normal"),
row_init_method=lora_cfg.get("row_init_method", "zero"),
gather_output=False,
dropout=lora_cfg.adapter_dropout,
)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.layer_selection = lora_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
class MegatronGPTLoRAModelWeightTying(MegatronGPTLayerwisePEFTModel):
"""
TODO
"""
def __init__(
self, cfg: DictConfig, trainer: Trainer,
):
self.peft_name_keys = [
AdapterName.LORA_KQV_ADAPTER,
]
lora_cfg = cfg.peft.lora_tuning
if cfg.get("kv_channels", None) is None:
assert (
cfg.hidden_size % cfg.num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = cfg.hidden_size // cfg.num_attention_heads
else:
kv_channels = cfg.kv_channels
projection_size = kv_channels * cfg.num_attention_heads
position_embedding_strategy = lora_cfg.get("position_embedding_strategy", None)
if position_embedding_strategy is None:
dim_position_embeddings = 0
elif position_embedding_strategy == "add":
dim_position_embeddings = cfg.hidden_size
elif position_embedding_strategy == "biasadd":
dim_position_embeddings = 3 * projection_size
elif position_embedding_strategy == "concat":
dim_position_embeddings = lora_cfg.adapter_dim
elif position_embedding_strategy == "mlpconcat":
dim_position_embeddings = lora_cfg.adapter_dim
else:
raise RuntimeError(f"Unknown position embedding strategy {position_embedding_strategy} for tied weights")
adapter_cfg = LoraKQVAdapterWeightTyingConfig(
in_features=cfg.hidden_size,
out_features=3 * projection_size,
dim=lora_cfg.adapter_dim,
norm_position=None,
norm_type=None,
activation="identity",
column_init_method=lora_cfg.get("column_init_method", "normal"),
row_init_method=lora_cfg.get("row_init_method", "zero"),
gather_output=False,
dropout=lora_cfg.adapter_dropout,
num_position_embeddings=cfg.num_layers,
dim_position_embeddings=dim_position_embeddings,
position_embedding_strategy=position_embedding_strategy,
)
self.name_key_to_cfg = {}
for k in self.peft_name_keys:
self.name_key_to_cfg[k] = adapter_cfg
self.layer_selection = lora_cfg.get("layer_selection", None)
if self.layer_selection is None:
self.layer_selection = list(range(1, cfg.num_layers + 1))
super().__init__(cfg, trainer)
self.tie_weights()
def tie_weights(self,):
pos_idx = 0
layer0 = self.model.language_model.encoder.layers[0]
for adapter_name in layer0.self_attention.adapter_layer:
adapter = layer0.self_attention.get_adapter_module(adapter_name)
print(adapter_name, pos_idx)
adapter.set_position(pos_idx)
pos_idx += 1
for layer in self.model.language_model.encoder.layers[1:]:
for adapter_name in layer.self_attention.adapter_layer:
print(adapter_name, pos_idx)
adapter_l = layer.self_attention.get_adapter_module(adapter_name)
adapter_0 = layer0.self_attention.get_adapter_module(adapter_name)
position_embeddings_0 = None
if adapter_0.position_embedding_strategy:
position_embeddings_0 = adapter_0.position_embeddings
adapter_l.tie_weights(pos_idx, adapter_0)
pos_idx += 1
|
54c6354058966ea39fddc399ce65ad91ab5718cb
|
811f4cdb25e26f3b27640aaa2e2bca93e660d2d7
|
/tools/benchmarking/utils/__init__.py
|
b9eebfed7836c8126a66de81f48d7e1ee64757fc
|
[
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"CC-BY-NC-SA-4.0",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
openvinotoolkit/anomalib
|
4467dfc392398845e816387267cdf979ff76fe15
|
4abfa93dcfcb98771bc768b334c929ff9a02ce8b
|
refs/heads/main
| 2023-09-03T16:49:05.019269
| 2023-08-28T14:22:19
| 2023-08-28T14:22:19
| 423,775,360
| 2,325
| 454
|
Apache-2.0
| 2023-09-14T11:21:33
| 2021-11-02T09:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
__init__.py
|
"""Utils specific to running benchmarking scripts."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .metrics import upload_to_comet, upload_to_wandb, write_metrics
__all__ = ["write_metrics", "upload_to_comet", "upload_to_wandb"]
|
8e7439bbfa2655c6d8deb5074f0ac35c55bbe3eb
|
7e844c1b6f06941285a91bd488700a90ce142216
|
/tests/conftest.py
|
530a4c2a5f9917d62ffed2e2eecfdb8164655740
|
[
"Apache-2.0"
] |
permissive
|
psf/requests
|
8e0ae54deb392c335de687522394ffb8445292e7
|
8b560ecb24ee4fa4e839272dcf2653a4fa525a34
|
refs/heads/main
| 2023-09-04T15:46:56.978881
| 2023-08-29T14:27:10
| 2023-08-29T14:27:10
| 1,362,490
| 13,941
| 3,733
|
Apache-2.0
| 2023-09-14T09:01:08
| 2011-02-13T18:38:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
conftest.py
|
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import ssl
import threading
import pytest
from requests.compat import urljoin
def prepare_url(value):
# Issue #1483: Make sure the URL always has a trailing slash
httpbin_url = value.url.rstrip("/") + "/"
def inner(*suffix):
return urljoin(httpbin_url, "/".join(suffix))
return inner
@pytest.fixture
def httpbin(httpbin):
return prepare_url(httpbin)
@pytest.fixture
def httpbin_secure(httpbin_secure):
return prepare_url(httpbin_secure)
@pytest.fixture
def nosan_server(tmp_path_factory):
# delay importing until the fixture in order to make it possible
# to deselect the test via command-line when trustme is not available
import trustme
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only commonName, no subjectAltName
server_cert = ca.issue_cert(common_name="localhost")
ca_bundle = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_bundle)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_cert.configure_cert(context)
server = HTTPServer(("localhost", 0), SimpleHTTPRequestHandler)
server.socket = context.wrap_socket(server.socket, server_side=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
yield "localhost", server.server_address[1], ca_bundle
server.shutdown()
server_thread.join()
|
aeadc457adde8c5a5a1074465fa3785d889fe25b
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/aggregation/last.py
|
45728cb3fed49f36a2a4d8c76e8925e94fa89811
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
last.py
|
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Last(AggregationPrimitive):
"""Determines the last value in a list.
Examples:
>>> last = Last()
>>> last([1, 2, 3, 4, 5, None])
nan
"""
name = "last"
input_types = [ColumnSchema()]
return_type = None
stack_on_self = False
description_template = "the last instance of {}"
def get_function(self, agg_type=Library.PANDAS):
def pd_last(x):
return x.iloc[-1]
return pd_last
|
dcb8ada4ce86a1053dff4ca05e41cf422b2839bc
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/great_expectations/dataset/pandas_dataset.py
|
979294410278cfd94e1337aa0887ada654abbac5
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 68,612
|
py
|
pandas_dataset.py
|
import inspect
import json
import logging
from datetime import datetime
from functools import wraps
from operator import ge, gt, le, lt
from typing import List
import jsonschema
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from great_expectations.compatibility.typing_extensions import override
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.dataset import Dataset
from great_expectations.dataset.util import (
_scipy_distribution_positional_args_from_dict,
is_valid_continuous_partition_object,
validate_distribution_parameters,
validate_mostly,
)
logger = logging.getLogger(__name__)
class MetaPandasDataset(Dataset):
"""MetaPandasDataset is a thin layer between Dataset and PandasDataset.
This two-layer inheritance is required to make @classmethod decorators work.
Practically speaking, that means that MetaPandasDataset implements \
expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \
and PandasDataset implements the expectation methods themselves.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The MetaPandasDataset implementation replaces the "column" parameter supplied by the user with a pandas Series
object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation
logic while preserving the standard Dataset signature and expected behavior.
See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \
for full documentation of this function.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper( # noqa: PLR0913, PLR0912
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
data = self._apply_row_condition(
row_condition=row_condition, condition_parser=condition_parser
)
else:
data = self
series = data[column]
func_args = inspect.getfullargspec(func)[0][1:]
if (
"parse_strings_as_datetimes" in func_args
and pd.api.types.is_datetime64_any_dtype(series)
):
kwargs["parse_strings_as_datetimes"] = True
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
# FIXME rename to mapped_ignore_values?
boolean_mapped_null_values = np.full(series.shape, False)
result_format["partial_unexpected_count"] = 0
else:
boolean_mapped_null_values = series.isnull().values
element_count = int(len(series))
# FIXME rename nonnull to non_ignored?
nonnull_values = series[boolean_mapped_null_values == False]
nonnull_count = int((boolean_mapped_null_values == False).sum())
boolean_mapped_success_values = func(self, nonnull_values, *args, **kwargs)
success_count = np.count_nonzero(boolean_mapped_success_values)
unexpected_list = list(
nonnull_values[boolean_mapped_success_values == False]
)
unexpected_index_list = list(
nonnull_values[boolean_mapped_success_values == False].index
)
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
parsed_unexpected_list = []
for val in unexpected_list:
if val is None:
parsed_unexpected_list.append(val)
else:
if isinstance(val, str):
val = parse(val) # noqa: PLW2901
parsed_unexpected_list.append(
datetime.strftime(val, output_strftime_format)
)
unexpected_list = parsed_unexpected_list
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
# FIXME Temp fix for result format
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def column_pair_map_expectation(cls, func):
"""
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per row basis across a pair of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper( # noqa: PLR0913
self,
column_A,
column_B,
mostly=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition)
series_A = self[column_A]
series_B = self[column_B]
if ignore_row_if == "both_values_are_missing":
boolean_mapped_null_values = series_A.isnull() & series_B.isnull()
elif ignore_row_if == "either_value_is_missing":
boolean_mapped_null_values = series_A.isnull() | series_B.isnull()
# elif ignore_row_if == "neither":
elif ignore_row_if == "never":
"""
TODO: <Alex>Note: The value of the "ignore_row_if" directive in the commented out line above is correct.
However, fixing the error would constitute a breaking change. Hence, the documentation is updated now
(8/16/2021), while the implementation is corrected as part of the Expectations V3 API release.
</Alex>
"""
boolean_mapped_null_values = series_A.map(lambda x: False)
else:
raise ValueError(f"Unknown value of ignore_row_if: {ignore_row_if}")
assert len(series_A) == len(
series_B
), "Series A and B must be the same length"
# This next bit only works if series_A and _B are the same length
element_count = int(len(series_A))
nonnull_count = (boolean_mapped_null_values == False).sum()
nonnull_values_A = series_A[boolean_mapped_null_values == False]
nonnull_values_B = series_B[boolean_mapped_null_values == False]
nonnull_values = [
value_pair
for value_pair in zip(list(nonnull_values_A), list(nonnull_values_B))
]
boolean_mapped_success_values = func(
self, nonnull_values_A, nonnull_values_B, *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
unexpected_list = [
value_pair
for value_pair in zip(
list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
list(
series_B[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
)
]
unexpected_index_list = list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
].index
)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def multicolumn_map_expectation(cls, func):
"""
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of
evaluating truthiness of some condition on a per row basis across a set of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper( # noqa: PLR0913
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition)
test_df = self[column_list]
if ignore_row_if == "all_values_are_missing":
boolean_mapped_skip_values = test_df.isnull().all(axis=1)
elif ignore_row_if == "any_value_is_missing":
boolean_mapped_skip_values = test_df.isnull().any(axis=1)
elif ignore_row_if == "never":
boolean_mapped_skip_values = pd.Series([False] * len(test_df))
else:
raise ValueError(f"Unknown value of ignore_row_if: {ignore_row_if}")
validate_mostly(mostly)
boolean_mapped_success_values = func(
self, test_df[boolean_mapped_skip_values == False], *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
nonnull_count = (~boolean_mapped_skip_values).sum()
element_count = len(test_df)
unexpected_list = test_df[
(boolean_mapped_skip_values == False)
& (boolean_mapped_success_values == False)
]
unexpected_index_list = list(unexpected_list.index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list.to_dict(orient="records"),
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
class PandasDataset(MetaPandasDataset, pd.DataFrame):
"""
PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.
For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`
Notes:
1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \
data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \
property on the original data frame.
2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection
is performed by default).
--ge-feature-maturity-info--
id: validation_engine_pandas
title: Validation Engine - Pandas
icon:
short_description: Use Pandas DataFrame to validate data
description: Use Pandas DataFrame to validate data
how_to_guide_url:
maturity: Production
maturity_details:
api_stability: Stable
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation
documentation_completeness: Complete
bug_risk: Low
expectation_completeness: Complete
--ge-feature-maturity-info--
"""
# this is necessary to subclass pandas in a proper way.
# NOTE: specifying added properties in this way means that they will NOT be carried over when
# the dataframe is manipulated, which we might want. To specify properties that are carried over
# to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely
# case is that we want the former, but also want to re-initialize these values to None so we don't
# get an attribute error when trying to access them (I think this could be done in __finalize__?)
_internal_names = pd.DataFrame._internal_names + [
"_batch_kwargs",
"_batch_markers",
"_batch_parameters",
"_batch_id",
"_expectation_suite",
"_config",
"caching",
"default_expectation_args",
"discard_subset_failing_expectations",
]
_internal_names_set = set(_internal_names)
_supports_row_condition = True
# We may want to expand or alter support for subclassing dataframes in the future:
# See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.discard_subset_failing_expectations = kwargs.get(
"discard_subset_failing_expectations", False
)
@property
def _constructor(self):
return self.__class__
def __finalize__(self, other, method=None, **kwargs):
if isinstance(other, PandasDataset):
self._initialize_expectations(other._expectation_suite)
# If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)
# then it may not have discard_subset_failing_expectations set. Default to self value
self.discard_subset_failing_expectations = getattr(
other,
"discard_subset_failing_expectations",
self.discard_subset_failing_expectations,
)
if self.discard_subset_failing_expectations:
self.discard_failing_expectations()
super().__finalize__(other, method, **kwargs)
return self
def _apply_row_condition(self, row_condition, condition_parser):
if condition_parser not in ["python", "pandas"]:
raise ValueError(
"condition_parser is required when setting a row_condition,"
" and must be 'python' or 'pandas'"
)
else:
return self.query(row_condition, parser=condition_parser)
def get_row_count(self):
return self.shape[0]
def get_column_count(self):
return self.shape[1]
@override
def get_table_columns(self) -> List[str]:
return list(self.columns)
def get_column_sum(self, column):
return self[column].sum()
def get_column_max(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.max()
def get_column_min(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.min()
def get_column_mean(self, column):
return self[column].mean()
def get_column_nonnull_count(self, column):
series = self[column]
null_indexes = series.isnull()
nonnull_values = series[null_indexes == False]
return len(nonnull_values)
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset")
counts = self[column].value_counts()
if sort == "value":
try:
counts.sort_index(inplace=True)
except TypeError:
# Having values of multiple types in a object dtype column (e.g., strings and floats)
# raises a TypeError when the sorting method performs comparisons.
if self[column].dtype == object:
counts.index = counts.index.astype(str)
counts.sort_index(inplace=True)
elif sort == "counts":
counts.sort_values(inplace=True)
counts.name = "count"
counts.index.name = "value"
return counts
def get_column_unique_count(self, column):
return self.get_column_value_counts(column).shape[0]
def get_column_modes(self, column):
return list(self[column].mode().values)
def get_column_median(self, column):
return self[column].median()
def get_column_quantiles(self, column, quantiles, allow_relative_error=False):
interpolation_options = ("linear", "lower", "higher", "midpoint", "nearest")
if not allow_relative_error:
allow_relative_error = "nearest"
if allow_relative_error not in interpolation_options:
raise ValueError(
f"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'"
f"parameter of .quantile() (one of {interpolation_options})"
)
return (
self[column]
.quantile(quantiles, interpolation=allow_relative_error)
.tolist()
)
def get_column_stdev(self, column):
return self[column].std()
def get_column_hist(self, column, bins):
hist, bin_edges = np.histogram(self[column], bins, density=False)
return list(hist)
def get_column_count_in_range( # noqa: PLR0913
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
# TODO this logic could probably go in the non-underscore version if we want to cache
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
result = self[column]
if min_val is not None:
if strict_min:
result = result[result > min_val]
else:
result = result[result >= min_val]
if max_val is not None:
if strict_max:
result = result[result < max_val]
else:
result = result[result <= max_val]
return len(result)
def get_crosstab( # noqa: PLR0913
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
series_A = self.get_binned_values(self[column_A], bins_A, n_bins_A)
series_B = self.get_binned_values(self[column_B], bins_B, n_bins_B)
return pd.crosstab(series_A, columns=series_B)
def get_binned_values(self, series, bins, n_bins): # noqa: PLR0912
"""
Get binned values of series.
Args:
Series (pd.Series): Input series
bins (list):
Bins for the series. List of numeric if series is numeric or list of list
of series values else.
n_bins (int): Number of bins. Ignored if bins is not None.
"""
if n_bins is None:
n_bins = 10
if series.dtype in ["int", "float"]:
if bins is not None:
bins = sorted(np.unique(bins))
if np.min(series) < bins[0]:
bins = [np.min(series)] + bins
if np.max(series) > bins[-1]:
bins = bins + [np.max(series)]
if bins is None:
bins = np.histogram_bin_edges(series[series.notnull()], bins=n_bins)
# Make sure max of series is included in rightmost bin
bins[-1] = np.nextafter(bins[-1], bins[-1] + 1)
# Create labels for returned series
# Used in e.g. crosstab that is printed as observed value in data docs.
precision = int(np.log10(min(bins[1:] - bins[:-1]))) + 2
labels = [
f"[{round(lower, precision)}, {round(upper, precision)})"
for lower, upper in zip(bins[:-1], bins[1:])
]
if any(np.isnan(series)):
# Missing get digitized into bin = n_bins+1
labels += ["(missing)"]
return pd.Categorical.from_codes(
codes=np.digitize(series, bins=bins) - 1,
categories=labels,
ordered=True,
)
else:
if bins is None:
value_counts = series.value_counts(sort=True)
if len(value_counts) < n_bins + 1:
return series.fillna("(missing)")
else:
other_values = sorted(value_counts.index[n_bins:])
replace = {value: "(other)" for value in other_values}
else:
replace = {}
for x in bins:
replace.update({value: ", ".join(x) for value in x})
return (
series.replace(to_replace=replace)
.fillna("(missing)")
.astype("category")
)
### Expectation methods ###
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_unique( # noqa: PLR0913
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.duplicated(keep=False)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_null( # noqa: PLR0913
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
include_nulls=True,
):
return ~column.isnull()
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_null( # noqa: PLR0913
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.isnull()
@DocInherit
def expect_column_values_to_be_of_type(
self,
column,
type_,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format=None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if (
self[column].dtype != "object"
or type_ is None
or type_ in ["object", "object_", "O"]
):
res = self._expect_column_values_to_be_of_type__aggregate(
column, type_, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_of_type__map(column, type_, **kwargs)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@DataAsset.expectation(["column", "type_", "mostly"])
def _expect_column_values_to_be_of_type__aggregate( # noqa: PLR0913
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_ is None:
success = True
else:
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@staticmethod
def _native_type_type_map(type_): # noqa: PLR0911
# We allow native python types in cases where the underlying type is "object":
if type_.lower() == "none":
return (type(None),)
elif type_.lower() == "bool":
return (bool,)
elif type_.lower() in ["int", "long"]:
return (int,)
elif type_.lower() == "float":
return (float,)
elif type_.lower() == "bytes":
return (bytes,)
elif type_.lower() == "complex":
return (complex,)
elif type_.lower() in ["str", "string_types"]:
return (str,)
elif type_.lower() == "list":
return (list,)
elif type_.lower() == "dict":
return (dict,)
elif type_.lower() == "unicode":
return None
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_of_type__map( # noqa: PLR0913
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError(f"Unrecognized numpy/python type: {type_}")
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format = None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if self[column].dtype != "object" or type_list is None:
res = self._expect_column_values_to_be_in_type_list__aggregate(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_in_type_list__map(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@MetaPandasDataset.expectation(["column", "type_list", "mostly"])
def _expect_column_values_to_be_in_type_list__aggregate( # noqa: PLR0913, PLR0912
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_list is None:
success = True
else:
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_in_type_list__map( # noqa: PLR0913
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError(f"No recognized numpy/python type in list: {type_list}")
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_in_set( # noqa: PLR0913
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# Vacuously true
return np.ones(len(column), dtype=np.bool_)
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_in_set( # noqa: PLR0913
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return ~column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_between( # noqa: PLR0913
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
parse_strings_as_datetimes=None,
output_strftime_format=None,
allow_cross_type_comparisons=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
try:
temp_column = column.map(parse)
except TypeError:
temp_column = column
else:
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def comparator_factory(comparator, comparison_value):
def new_comparator(value):
return comparator(value, comparison_value)
def always_true(value):
return True
return always_true if comparison_value is None else new_comparator
min_comparator = comparator_factory(gt if strict_min else ge, min_value)
max_comparator = comparator_factory(lt if strict_max else le, max_value)
def cross_type_comparator(val):
try:
return min_comparator(val) & max_comparator(val)
except TypeError:
return False
try:
return min_comparator(temp_column) & max_comparator(temp_column)
except TypeError:
if allow_cross_type_comparisons:
return pd.Series(cross_type_comparator(val) for val in temp_column)
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_increasing( # noqa: PLR0913
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(1)
if strictly:
return col_diff > pd.Timedelta(0)
else:
return col_diff >= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = 1
if strictly:
return col_diff > 0
else:
return col_diff >= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_decreasing( # noqa: PLR0913
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(-1)
if strictly:
return col_diff < pd.Timedelta(0)
else:
return col_diff <= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = -1
if strictly:
return col_diff < 0
else:
return col_diff <= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_be_between( # noqa: PLR0913
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
column_lengths = column.astype(str).str.len()
if min_value is not None and max_value is not None:
return column_lengths.between(min_value, max_value)
elif min_value is None and max_value is not None:
return column_lengths <= max_value
elif min_value is not None and max_value is None:
return column_lengths >= min_value
else:
return False
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_equal( # noqa: PLR0913
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.str.len() == value
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex( # noqa: PLR0913
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex( # noqa: PLR0913
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex_list( # noqa: PLR0913
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
if match_on == "any":
return regex_match_df.any(axis="columns")
elif match_on == "all":
return regex_match_df.all(axis="columns")
else:
raise ValueError("match_on must be either 'any' or 'all'")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list( # noqa: PLR0913
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
return ~regex_match_df.any(axis="columns")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_strftime_format( # noqa: PLR0913
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Below is a simple validation that the provided format can both format and parse a datetime object.
# %D is an example of a format that can format but not parse, e.g.
try:
datetime.strptime( # noqa: DTZ007
datetime.strftime(datetime.now(), strftime_format), # noqa: DTZ005
strftime_format,
)
except ValueError as e:
raise ValueError(f"Unable to use provided strftime_format. {e!s}")
def is_parseable_by_format(val):
try:
datetime.strptime(val, strftime_format) # noqa: DTZ007
return True
except TypeError:
raise TypeError(
"Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
except ValueError:
return False
return column.map(is_parseable_by_format)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_dateutil_parseable( # noqa: PLR0913
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_parseable(val):
try:
if type(val) != str: # noqa: E721
raise TypeError(
"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
parse(val)
return True
except (ValueError, OverflowError):
return False
return column.map(is_parseable)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_json_parseable( # noqa: PLR0913
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
return column.map(is_json)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_json_schema( # noqa: PLR0913
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def matches_json_schema(val):
try:
val_json = json.loads(val)
jsonschema.validate(val_json, json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
return column.map(matches_json_schema)
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than( # noqa: PLR0913
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive")
# Validate params
try:
validate_distribution_parameters(distribution=distribution, params=params)
except ValueError as e:
raise e
# Format arguments for scipy.kstest
if isinstance(params, dict):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params
)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution, args=positional_parameters)
return {
"success": ks_result[1] >= p_value,
"result": {
"observed_value": ks_result[1],
"details": {
"expected_params": positional_parameters,
"observed_ks_result": ks_result,
},
},
}
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than( # noqa: PLR0913
self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if not is_valid_continuous_partition_object(partition_object):
raise ValueError("Invalid continuous partition object.")
# TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object
if (partition_object["bins"][0] == -np.inf) or (
partition_object["bins"][-1] == np.inf
):
raise ValueError("Partition endpoints must be finite.")
if (
"tail_weights" in partition_object
and np.sum(partition_object["tail_weights"]) > 0
):
raise ValueError(
"Partition cannot have tail weights -- endpoints must be finite."
)
test_cdf = np.append(np.array([0]), np.cumsum(partition_object["weights"]))
def estimated_cdf(x):
return np.interp(x, partition_object["bins"], test_cdf)
if bootstrap_samples is None:
bootstrap_samples = 1000
if bootstrap_sample_size is None:
# Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've
# compressed via a partition.
# Sampling too few elements will make the test insensitive to significant differences, especially
# for nonoverlapping ranges.
bootstrap_sample_size = len(partition_object["weights"]) * 2
results = [
stats.kstest(
np.random.choice(column, size=bootstrap_sample_size), estimated_cdf
)[1]
for _ in range(bootstrap_samples)
]
test_result = (1 + sum(x >= p for x in results)) / (bootstrap_samples + 1)
hist, bin_edges = np.histogram(column, partition_object["bins"])
below_partition = len(np.where(column < partition_object["bins"][0])[0])
above_partition = len(np.where(column > partition_object["bins"][-1])[0])
# Expand observed partition to report, if necessary
if below_partition > 0 and above_partition > 0:
observed_bins = (
[np.min(column)] + partition_object["bins"] + [np.max(column)]
)
observed_weights = np.concatenate(
([below_partition], hist, [above_partition])
) / len(column)
elif below_partition > 0:
observed_bins = [np.min(column)] + partition_object["bins"]
observed_weights = np.concatenate(([below_partition], hist)) / len(column)
elif above_partition > 0:
observed_bins = partition_object["bins"] + [np.max(column)]
observed_weights = np.concatenate((hist, [above_partition])) / len(column)
else:
observed_bins = partition_object["bins"]
observed_weights = hist / len(column)
observed_cdf_values = np.cumsum(observed_weights)
return_obj = {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"bootstrap_samples": bootstrap_samples,
"bootstrap_sample_size": bootstrap_sample_size,
"observed_partition": {
"bins": observed_bins,
"weights": observed_weights.tolist(),
},
"expected_partition": {
"bins": partition_object["bins"],
"weights": partition_object["weights"],
},
"observed_cdf": {
"x": observed_bins,
"cdf_values": [0] + observed_cdf_values.tolist(),
},
"expected_cdf": {
"x": partition_object["bins"],
"cdf_values": test_cdf.tolist(),
},
},
},
}
return return_obj
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_equal( # noqa: PLR0913
self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column_A == column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_A_to_be_greater_than_B( # noqa: PLR0913
self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# FIXME
if allow_cross_type_comparisons == True:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal == True:
return temp_column_A >= temp_column_B
else:
return temp_column_A > temp_column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_in_set( # noqa: PLR0913
self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_pairs_set is None:
# vacuously true
return np.ones(len(column_A), dtype=np.bool_)
temp_df = pd.DataFrame({"A": column_A, "B": column_B})
value_pairs_set = {(x, y) for x, y in value_pairs_set}
results = []
for i, t in temp_df.iterrows():
if pd.isnull(t["A"]):
a = None
else:
a = t["A"]
if pd.isnull(t["B"]):
b = None
else:
b = t["B"]
results.append((a, b) in value_pairs_set)
return pd.Series(results, temp_df.index)
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_select_column_values_to_be_unique_within_record( # noqa: PLR0913
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
threshold = len(column_list.columns)
# Do not dropna here, since we have separately dealt with na in decorator
return column_list.nunique(dropna=False, axis=1) >= threshold
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_multicolumn_sum_to_equal( # noqa: PLR0913
self,
column_list,
sum_total,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
""" Multi-Column Map Expectation
Expects that the sum of row values is the same for each row, summing only values in columns specified in
column_list, and equal to the specific value, sum_total.
Args:
column_list (List[str]): \
Set of columns to be checked
sum_total (int): \
expected sum of columns
"""
return column_list.sum(axis=1) == sum_total
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_compound_columns_to_be_unique( # noqa: PLR0913
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Do not dropna here, since we have separately dealt with na in decorator
# Invert boolean so that duplicates are False and non-duplicates are True
return ~column_list.duplicated(keep=False)
|
4746eb98546fcdab2d925c17c06c1bd8a8181433
|
375a222af8a11353c86d4bcdfbcb5e6ac1b81a41
|
/perceiver/data/text/collator.py
|
f32854b48dd24ff31ddd280c49b19e09b0f3f0c6
|
[
"Apache-2.0"
] |
permissive
|
krasserm/perceiver-io
|
5a1ac14ea399d46c0331d82d11d8924b91265a31
|
4ac9b2c43164cf0372abf7d0239eba74ef3658e8
|
refs/heads/main
| 2023-06-25T07:28:11.173330
| 2023-06-12T11:02:45
| 2023-06-12T11:02:45
| 413,288,566
| 334
| 34
|
Apache-2.0
| 2023-06-12T11:00:26
| 2021-10-04T05:42:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,753
|
py
|
collator.py
|
from collections import defaultdict
from typing import Optional
import numpy as np
import torch
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorWithPadding,
DefaultDataCollator,
PreTrainedTokenizerFast,
)
from transformers.utils import PaddingStrategy
class Collator:
def collate(self, examples):
raise NotImplementedError()
def __call__(self, examples):
result = self.collate(examples)
return result["labels"], result["input_ids"], ~result["attention_mask"].type(torch.bool)
class RandomTruncateCollator(Collator):
def __init__(self, collator: Collator, min_seq_len: int):
self.collator = collator
self.min_seq_len = min_seq_len
def collate(self, examples):
result = self.collator.collate(examples)
seq_len = result["input_ids"].shape[1]
if seq_len <= self.min_seq_len:
return result
else:
drop = torch.randint(1, seq_len - self.min_seq_len + 1, size=(1,))
result["labels"] = result["labels"][:, :-drop]
result["input_ids"] = result["input_ids"][:, :-drop]
result["attention_mask"] = result["attention_mask"][:, :-drop]
return result
class DefaultCollator(Collator):
label_keys = ["label", "labels"]
def __init__(self, tokenizer: PreTrainedTokenizerFast, max_seq_len: Optional[int] = None):
self.collator = DefaultDataCollator()
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
def collate(self, examples):
cur_length = max(len(example["input_ids"]) for example in examples)
max_length = min(cur_length, self.max_seq_len)
return self.collator([self._prepare(example, max_length=max_length) for example in examples])
def _prepare(self, example, max_length):
# FIXME: ensure proper handling of special tokens in example
# Sequences longer than max_length are truncated including any
# special tokens at the end of the sequence. These special tokens
# must be preserved though. Setting add_special_tokens=true doesn't
# work either because this would duplicate (some) special tokens
# already contained in the input sequence.
prepared = self._prepare_sequence(example["input_ids"], max_length)
if "label_ids" in example:
prepared_label_ids = self._prepare_sequence(example["label_ids"], max_length)
prepared["label_ids"] = prepared_label_ids["input_ids"]
for label_key in self.label_keys:
if label_key in example:
prepared[label_key] = example[label_key]
return prepared
def _prepare_sequence(self, sequence, max_length):
return self.tokenizer.prepare_for_model(
sequence,
add_special_tokens=False,
return_token_type_ids=False,
padding=False if self.tokenizer.pad_token is None else PaddingStrategy.MAX_LENGTH,
max_length=max_length,
truncation=True,
)
class WordMaskingCollator(Collator):
def __init__(self, tokenizer: PreTrainedTokenizerFast, mask_prob: float = 0.15):
self.collator = DataCollatorWithPadding(tokenizer)
self.mask_token_id = tokenizer.mask_token_id
self.vocab_size = tokenizer.vocab_size
self.mask_prob = mask_prob
def collate(self, examples):
return self.collator(self.mask_words(examples))
def mask_words(self, examples):
"""A modified version of whole word masking as described in https://huggingface.co/course/chapter7/3.
The implementation in the linked document replaces words, randomly selected with `wwm_probability`, with mask
tokens (one or more per word). The implementation here, however, only replaces 80% of selected words with mask
tokens and replaces 10% with random words and leaves 10% unchanged.
"""
for example in examples:
self.mask_words_1(example)
return examples
def mask_words_1(self, example):
# ------------------
# Mutates argument
# ------------------
word_ids = example.pop("word_ids")
input_ids = example["input_ids"]
labels = [-100] * len(input_ids)
mapping = defaultdict(list)
current_word_index = -1
current_word_id = None
for idx, word_id in enumerate(word_ids):
if word_id is not None:
if word_id != current_word_id:
current_word_id = word_id
current_word_index += 1
mapping[current_word_index].append(idx)
# Randomly mask words
mask = np.random.binomial(1, self.mask_prob, len(mapping))
for word_index in np.where(mask)[0]:
rand_nr = np.random.rand(2)
for idx in mapping[word_index]:
labels[idx] = input_ids[idx]
if rand_nr[0] < 0.8:
# in 80% of cases replace word with mask token(s)
input_ids[idx] = self.mask_token_id
elif rand_nr[1] < 0.5:
# in 10% of cases replace word token(s) with random tokens
input_ids[idx] = np.random.randint(self.vocab_size)
else:
# in 10% of cases leave word token(s) unchanged
pass
example["labels"] = labels
return example
class TokenMaskingCollator(Collator):
def __init__(self, tokenizer: PreTrainedTokenizerFast, mask_prob=0.15):
self.collator = DataCollatorForLanguageModeling(tokenizer, mlm_probability=mask_prob)
def collate(self, examples):
return self.collator(examples)
|
cebe3db025e8533d23250302394f7131a5ceee86
|
6e56e6b4bb562cd1db6e38b5f089b863b77e087f
|
/dragonfly/exd/domains.py
|
0b0130d34188bcdf460f14e273b79023dd2c199d
|
[
"MIT"
] |
permissive
|
dragonfly/dragonfly
|
aa5f3a64bfe7800c44c32e58b487b5733c40035d
|
3eef7d30bcc2e56f2221a624bd8ec7f933f81e40
|
refs/heads/master
| 2023-08-06T08:34:29.317771
| 2022-10-01T22:21:50
| 2022-10-01T22:21:50
| 130,418,835
| 868
| 374
|
MIT
| 2023-06-19T20:23:17
| 2018-04-20T22:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 19,702
|
py
|
domains.py
|
"""
Harness to manage optimisation domains.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
# pylint: disable=arguments-differ
import numpy as np
from numbers import Number
from scipy.spatial.distance import cdist
class Domain(object):
""" Domain class. An abstract class which implements domains. """
def get_type(self):
""" Returns the type of the domain. """
raise NotImplementedError('Implement in a child class.')
def get_dim(self):
""" Returns the dimension of the space. """
raise NotImplementedError('Implement in a child class.')
def is_a_member(self, point):
""" Returns True if point is a member of this domain. """
raise NotImplementedError('Implement in a child class.')
@classmethod
def members_are_equal(cls, point_1, point_2):
""" Compares two members and returns True if they are the same. """
return point_1 == point_2
def compute_distance(self, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
raise NotImplementedError('Implement in a child class.')
def __str__(self):
""" Returns a string representation. """
raise NotImplementedError('Implement in a child class.')
# Universal Domain ----------
class UniversalDomain(Domain):
""" A universal domian. Everything is a part of this.
Used mostly in instances where the domain is not critical for lazy coding.
"""
def get_type(self):
""" Returns the type of the domain. """
return 'universal'
def get_dim(self):
""" Return the dimensions. """
return None
def is_a_member(self, _):
""" Returns true if point is in the domain. """
return True
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
raise ValueError('Distance not defined for Universal Domain.')
def __str__(self):
""" Returns a string representation. """
return 'Universal Domain'
# Euclidean spaces ---------
class EuclideanDomain(Domain):
""" Domain for Euclidean spaces. """
def __init__(self, bounds):
""" Constructor. """
_check_if_valid_euc_int_bounds(bounds)
self.bounds = np.array(bounds)
self.diameter = np.linalg.norm(self.bounds[:, 1] - self.bounds[:, 0])
self.dim = len(bounds)
super(EuclideanDomain, self).__init__()
def get_type(self):
""" Returns the type of the domain. """
return 'euclidean'
def get_dim(self):
""" Return the dimensions. """
return self.dim
def is_a_member(self, point):
""" Returns true if point is in the domain. """
return is_within_bounds(self.bounds, point)
def members_are_equal(self, point_1, point_2):
""" Compares two members and returns True if they are the same. """
return self.compute_distance(point_1, point_2) < 1e-8 * self.diameter
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return np.linalg.norm(np.array(point_1) - np.array(point_2))
def __str__(self):
""" Returns a string representation. """
return 'Euclidean: %s'%(_get_bounds_as_str(self.bounds))
# Integral spaces ------------
class IntegralDomain(Domain):
""" Domain for vector valued integers. """
def __init__(self, bounds):
""" Constructor. """
_check_if_valid_euc_int_bounds(bounds)
self.bounds = np.array(bounds, dtype=np.int)
self.diameter = np.linalg.norm(self.bounds[:, 1] - self.bounds[:, 0])
self.dim = len(bounds)
super(IntegralDomain, self).__init__()
def get_type(self):
""" Returns the type of the domain. """
return 'integral'
def get_dim(self):
""" Return the dimensions. """
return self.dim
def is_a_member(self, point):
""" Returns true if point is in the domain. """
are_ints = [isinstance(x, (int, np.int, np.int64)) for x in point]
return all(are_ints) and is_within_bounds(self.bounds, point)
def members_are_equal(self, point_1, point_2):
""" Compares two members and returns True if they are the same. """
dist = self.compute_distance(point_1, point_2)
return dist == 0 or dist < 1e-8 * self.diameter
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return np.linalg.norm(np.array(point_1) - np.array(point_2))
def __str__(self):
""" Returns a string representation. """
return 'Integral: %s'%(_get_bounds_as_str(self.bounds))
# Discrete spaces -------------
class DiscreteDomain(Domain):
""" A Domain for discrete objects. """
def __init__(self, list_of_items):
""" Constructor. """
self.list_of_items = list_of_items
self.size = len(list_of_items)
super(DiscreteDomain, self).__init__()
def get_type(self):
""" Returns the type of the domain. """
return 'discrete'
def get_dim(self):
""" Return the dimensions. """
return 1
def is_a_member(self, point):
""" Returns true if point is in the domain. """
return point in self.list_of_items
@classmethod
def _get_disc_domain_type(cls):
""" Prefix for __str__. Can be overridden by a child class. """
return "Disc"
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return float(point_1 != point_2)
def __str__(self):
""" Returns a string representation. """
base_str = '%s(%d)'%(self._get_disc_domain_type(), self.size)
if self.size < 4:
return '%s: %s'%(base_str, self.list_of_items)
return base_str
class DiscreteNumericDomain(DiscreteDomain):
""" A domain for discrete objects all of which are numeric. """
def __init__(self, list_of_items):
""" Constructor. """
if not all_items_are_numeric(list_of_items):
raise ValueError('list_of_items must be a list of numbers.')
super(DiscreteNumericDomain, self).__init__(list_of_items)
def get_type(self):
""" Returns the type of the domain. """
return 'discrete_numeric'
def _get_disc_domain_type(self):
""" Prefix for __str__. Can be overridden by a child class. """
return "DiscNum"
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return abs(point_1 - point_2)
def is_a_member(self, point):
""" Returns true if point is in the domain. """
return discrete_numeric_element_is_in_list(point, self.list_of_items)
class DiscreteEuclideanDomain(DiscreteDomain):
""" Domain for Discrete Euclidean spaces. """
def __init__(self, list_of_items):
""" Constructor. """
list_of_items = np.array(list_of_items)
self.dim = list_of_items.shape[1]
self.size = len(list_of_items)
self.diameter = np.sqrt(self.dim) * (list_of_items.max() - list_of_items.min())
super(DiscreteEuclideanDomain, self).__init__(list_of_items)
def get_type(self):
""" Returns the type of the domain. """
return 'discrete_euclidean'
def _get_disc_domain_type(self):
""" Prefix for __str__. Can be overridden by a child class. """
return "DiscEuc"
def get_dim(self):
""" Return the dimensions. """
return self.dim
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return np.linalg.norm(np.array(point_1) - np.array(point_2))
def is_a_member(self, point):
""" Returns true if point is in the domain. """
# Naively find the nearest point in the domain
return cdist([point], self.list_of_items).min() < 1e-8 * self.diameter
def members_are_equal(self, point_1, point_2):
""" Compares two members and returns True if they are the same. """
return self.compute_distance(point_1, point_2) < 1e-8 * self.diameter
# A product of discrete spaces -----------------------------------------------------
class ProdDiscreteDomain(Domain):
""" A product of discrete objects. """
def __init__(self, list_of_list_of_items):
""" Constructor. """
self.list_of_list_of_items = list_of_list_of_items
self.dim = len(list_of_list_of_items)
self.size = np.prod([len(loi) for loi in list_of_list_of_items])
def get_type(self):
""" Returns the type of the domain. """
return 'prod_discrete'
def get_dim(self):
""" Return the dimensions. """
return self.dim
def is_a_member(self, point):
""" Returns true if point is in the domain. """
if not hasattr(point, '__iter__') or len(point) != self.dim:
return False
ret = [elem in loi for elem, loi in zip(point, self.list_of_list_of_items)]
return all(ret)
def members_are_equal(self, point_1, point_2):
""" Compares two members and returns True if they are the same. """
elems_are_equal = [point_1[i] == point_2[i] for i in range(self.dim)]
return all(elems_are_equal)
@classmethod
def _get_prod_disc_domain_type(cls):
""" Prefix for __str__. Can be overridden by a child class. """
return "ProdDisc"
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return float(sum([elem_1 != elem_2 for (elem_1, elem_2) in zip(point_1, point_2)]))
def __str__(self):
""" Returns a string representation. """
return '%s(d=%d,size=%d)'%(self._get_prod_disc_domain_type(), self.dim, self.size)
class ProdDiscreteNumericDomain(ProdDiscreteDomain):
""" A product of discrete numeric objects. """
def __init__(self, list_of_list_of_items):
""" Constructor. """
if not all_lists_of_items_are_numeric(list_of_list_of_items):
raise ValueError('list_of_list_of_items must of a list where each element is ' +
'a list of numeric objects.')
super(ProdDiscreteNumericDomain, self).__init__(list_of_list_of_items)
def get_type(self):
""" Returns the type of the domain. """
return 'prod_discrete_numeric'
def is_a_member(self, point):
""" Returns True if point is in the domain. """
if not hasattr(point, '__iter__') or len(point) != self.dim:
return False
ret = [discrete_numeric_element_is_in_list(elem, loi)
for elem, loi in zip(point, self.list_of_list_of_items)]
return all(ret)
@classmethod
def compute_distance(cls, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return np.linalg.norm(np.array(point_1) - np.array(point_2))
@classmethod
def _get_prod_disc_domain_type(cls):
""" Prefix for __str__. Can be overridden by a child class. """
return "ProdDiscNum"
# Compound Domains ------------------------------------------
# Implementing a series of domains derived from the above
class CartesianProductDomain(Domain):
""" The cartesian product of several domains. """
def __init__(self, list_of_domains, domain_info=None):
""" Constructor.
list_of_domains is a list of domain objects.
An element in this domain is represented by a list whose ith element
belongs to list_of_domains[i].
"""
self.list_of_domains = list_of_domains
self.num_domains = len(list_of_domains)
try:
self.dim = sum([dom.get_dim() for dom in self.list_of_domains])
except TypeError:
self.dim = None
# Domain info
self.domain_info = domain_info
self._has_constraints = False
if self.domain_info is not None:
from .cp_domain_utils import get_raw_point_from_processed_point
self.raw_name_ordering = self.domain_info.config_orderings.raw_name_ordering
self.get_raw_point = lambda x: get_raw_point_from_processed_point(x,
self, self.domain_info.config_orderings.index_ordering,
self.domain_info.config_orderings.dim_ordering)
if hasattr(self.domain_info, 'config_file') and \
self.domain_info.config_file is not None:
import os
self.config_file = self.domain_info.config_file
self.config_file_dir = os.path.dirname(os.path.abspath(os.path.realpath(
self.domain_info.config_file)))
if hasattr(self.domain_info, 'constraints'):
self._has_constraints = True
self._constraint_eval_set_up()
def _constraint_eval_set_up(self):
""" Set up for evaluating constraints. """
from importlib import import_module
import sys
from ..utils.general_utils import evaluate_strings_with_given_variables
self.str_constraint_evaluator = evaluate_strings_with_given_variables
self.domain_constraints = self.domain_info.constraints
self.num_domain_constraints = len(self.domain_constraints)
# Separate the constraints into different types
self.eval_as_pyfile_idxs = [idx for idx in range(self.num_domain_constraints) if
isinstance(self.domain_constraints[idx]['constraint'], str) and
self.domain_constraints[idx]['constraint'].endswith('.py')]
self.eval_as_str_idxs = [idx for idx in range(self.num_domain_constraints) if
isinstance(self.domain_constraints[idx]['constraint'], str) and
idx not in self.eval_as_pyfile_idxs]
self.eval_as_pyfunc_idxs = [idx for idx in range(self.num_domain_constraints) if
hasattr(self.domain_constraints[idx]['constraint'], '__call__')]
# Save constraints here
self.pyfunc_constraints = [self.domain_constraints[idx]['constraint'] for idx
in self.eval_as_pyfunc_idxs]
self.str_constraints = [self.domain_constraints[idx]['constraint'] for idx in
self.eval_as_str_idxs]
# pyfile constraints
self.pyfile_constraints = []
if len(self.eval_as_pyfile_idxs) > 0:
if not hasattr(self, 'config_file_dir'):
raise ValueError('Constraints can be specified in a python file only when'
' using a configuration file.')
# This is relevant only if the domain is loaded via a configuration file.
pyfile_modules = [self.domain_constraints[idx]['constraint'] for idx
in self.eval_as_pyfile_idxs]
sys.path.append(self.config_file_dir)
for pfm_file_name in pyfile_modules:
pfm = pfm_file_name.split('.')[0]
constraint_source_module = import_module(pfm, self.config_file_dir)
self.pyfile_constraints.append(constraint_source_module.constraint)
sys.path.remove(self.config_file_dir)
def get_type(self):
""" Returns the type of the domain. """
return 'cartesian_product'
def has_constraints(self):
""" Returns True if the domain has constraints. """
return self._has_constraints
def get_dim(self):
""" Returns the dimension. """
return self.dim
def is_a_member(self, point):
""" Returns true if the point is in the domain. """
if not hasattr(point, '__iter__') or len(point) != self.num_domains:
return False
for dom_pt, dom in zip(point, self.list_of_domains):
if not dom.is_a_member(dom_pt): # check if each element is in the respective domain.
return False
# Now check if the constraints are satisfied
if not self.constraints_are_satisfied(point):
return False
return True
def _evaluate_all_constraints(self, raw_point, name_to_pt_dict):
""" Evaluates all constraints. """
# Evaluate all constraints
ret_str_all = self.str_constraint_evaluator(self.str_constraints, name_to_pt_dict)
ret_pyfile_all = [elem(raw_point) for elem in self.pyfile_constraints]
ret_pyfunc_all = [elem(raw_point) for elem in self.pyfunc_constraints]
# Merge results
ret_all = [None] * self.num_domain_constraints
for str_idx, orig_idx in enumerate(self.eval_as_str_idxs):
ret_all[orig_idx] = ret_str_all[str_idx]
for pyfile_idx, orig_idx in enumerate(self.eval_as_pyfile_idxs):
ret_all[orig_idx] = ret_pyfile_all[pyfile_idx]
for pyfunc_idx, orig_idx in enumerate(self.eval_as_pyfunc_idxs):
ret_all[orig_idx] = ret_pyfunc_all[pyfunc_idx]
return ret_all
def constraints_are_satisfied(self, point):
""" Checks if the constraints are satisfied. """
if hasattr(self, 'domain_constraints') and self.domain_constraints is not None:
raw_point = self.get_raw_point(point)
name_to_pt_dict = {k:v for (k, v) in zip(self.raw_name_ordering, raw_point)}
ret_all = self._evaluate_all_constraints(raw_point, name_to_pt_dict)
for idx, elem in enumerate(ret_all):
if not isinstance(elem, (bool, np.bool, np.bool_)):
raise ValueError(
'Constraint %d:%s (%s) returned %s. It should return type bool.'%(idx,
self.domain_constraints[idx][0], self.domain_constraints[idx][1], str(elem)))
return all(ret_all)
else:
return True
def members_are_equal(self, point_1, point_2):
""" Compares two members and returns True if they are the same. """
for i, dom in enumerate(self.list_of_domains):
if not dom.members_are_equal(point_1[i], point_2[i]):
return False
return True
def compute_distance(self, point_1, point_2):
""" Computes the distance between point_1 and point_2. """
return sum([dom.compute_distance(elem_1, elem_2) for (elem_1, elem_2, dom) in
zip(point_1, point_2, self.list_of_domains)])
def __str__(self):
""" Returns a string representation of the domain. """
list_of_domains_str = ', '.join([str(dom) for dom in self.list_of_domains])
ret1 = 'CartProd(N=%d,d=%d)::[%s]'%(self.num_domains, self.dim,
list_of_domains_str)
if self.has_constraints():
constraints_as_list_of_strs = ['%s: %s'%(elem['name'], elem['constraint'])
for elem in self.domain_constraints]
constraints_as_str = ', '.join(constraints_as_list_of_strs)
ret2 = ', Constraints:: %s'%(constraints_as_str)
else:
ret2 = ''
return ret1 + ret2
# Utilities we will need for the above ------------------------------------------
def is_within_bounds(bounds, point):
""" Returns true if point is within bounds. point is a d-array and bounds is a
dx2 array. bounds is expected to be an np.array object.
"""
point = np.array(point)
if point.shape != (bounds.shape[0],):
return False
above_lb = np.all((point - bounds[:, 0] >= 0))
below_ub = np.all((bounds[:, 1] - point >= 0))
return above_lb * below_ub
def _check_if_valid_euc_int_bounds(bounds):
""" Checks if the bounds are valid. """
for bd in bounds:
if bd[0] > bd[1]:
raise ValueError('Given bound %s is not in [lower_bound, upper_bound] format.'%(
bd))
def _get_bounds_as_str(bounds):
""" returns a string representation of bounds. """
bounds_list = [list(b) for b in bounds]
return str(bounds_list)
def all_items_are_numeric(list_of_items):
""" Returns true if all items in the list are numeric. """
for elem in list_of_items:
if not isinstance(elem, Number):
return False
return True
def all_lists_of_items_are_numeric(list_of_list_of_items):
""" Returns true if all lists in list_of_list_of_items are numeric. """
for elem in list_of_list_of_items:
if not all_items_are_numeric(elem):
return False
return True
def discrete_numeric_element_is_in_list(elem, list_of_num_elements, tol=1e-8):
""" Returns True if elem is in list_of_num_elements. Writing this separately due to
precision issues with Python.
"""
if not isinstance(elem, Number):
return False
# Iterate through the list and check if the element exists within tolerance.
for list_elem in list_of_num_elements:
if abs(elem - list_elem) < tol:
return True
return False
|
253b97c01f0e75944f29f05790ff224aca5935a0
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/scripts/py_matter_yamltests/matter_yamltests/pseudo_clusters/clusters/equality_commands.py
|
30f6c80b8dea0fa0a16fb2f648483f7b873a8c93
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
equality_commands.py
|
#
# Copyright (c) 2023 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..pseudo_cluster import PseudoCluster
_DEFINITION = '''<?xml version="1.0"?>
<configurator>
<cluster>
<name>EqualityCommands</name>
<code>0xFFF1FD08</code>
<command source="client" code="0" name="BooleanEquals" response="EqualityResponse">
<arg name="Value1" type="boolean"/>
<arg name="Value2" type="boolean"/>
</command>
<command source="client" code="1" name="SignedNumberEquals" response="EqualityResponse">
<arg name="Value1" type="int64s"/>
<arg name="Value2" type="int64s"/>
</command>
<command source="client" code="2" name="UnsignedNumberEquals" response="EqualityResponse">
<arg name="Value1" type="int64u"/>
<arg name="Value2" type="int64u"/>
</command>
<command source="server" code="254" name="EqualityResponse">
<arg name="Equals" type="bool"/>
</command>
</cluster>
</configurator>
'''
def Compare(request):
value1 = None
value2 = None
for argument in request.arguments['values']:
if argument['name'] == 'Value1':
value1 = argument['value']
elif argument['name'] == 'Value2':
value2 = argument['value']
return value1 == value2
class EqualityCommands(PseudoCluster):
name = 'EqualityCommands'
definition = _DEFINITION
async def BooleanEquals(self, request):
return {'value': {'Equals': Compare(request)}}
async def SignedNumberEquals(self, request):
return {'value': {'Equals': Compare(request)}}
async def UnsignedNumberEquals(self, request):
return {'value': {'Equals': Compare(request)}}
|
740d7596120f1863668bbfe8b58da002604aa9b1
|
aaabf5c6cea75fb6649fafff57d4e92e9b7142b8
|
/smote_variants/_version.py
|
270ce0483a784da82e5a31930c91ef037421e040
|
[
"MIT"
] |
permissive
|
analyticalmindsltd/smote_variants
|
febcdad7c64c5d6bce5a69a51d7228ea629aa7c4
|
708568e5b44abdc798d9329f993ac6561ad1439d
|
refs/heads/master
| 2023-05-12T21:24:57.222534
| 2023-04-17T10:43:06
| 2023-04-17T10:43:06
| 151,773,885
| 434
| 106
|
MIT
| 2023-05-04T14:18:28
| 2018-10-05T20:21:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
_version.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 28 17:21:13 2018
@author: gykovacs
"""
__version__= '0.7.1'
|
fb8d5cdb4b53966d71e0d06167c3ab2f5d9be19a
|
f8dee139258b7d971bd1cfa16bd16e356537bbac
|
/Contents/Libraries/Shared/subliminal/providers/addic7ed.py
|
2926081e0d5644343d839c20dfd8276d66d42b83
|
[
"MIT"
] |
permissive
|
pannal/Sub-Zero.bundle
|
79673016ae68d1f2e9886fd30b8763b73a8f6cf8
|
4ced7d8c8f9f5fb47d12410f87fa33d782e9f0f4
|
refs/heads/master
| 2023-07-27T23:04:32.925845
| 2023-07-09T13:07:38
| 2023-07-09T13:08:04
| 21,959,699
| 1,820
| 178
|
NOASSERTION
| 2022-11-28T03:23:13
| 2014-07-17T22:19:13
|
Python
|
UTF-8
|
Python
| false
| false
| 12,524
|
py
|
addic7ed.py
|
# -*- coding: utf-8 -*-
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from . import ParserBeautifulSoup, Provider
from .. import __short_version__
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded
from ..score import get_equivalent_release_groups
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..utils import sanitize, sanitize_release_group
from ..video import Episode
logger = logging.getLogger(__name__)
language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
# Series cell matching regex
show_cells_re = re.compile(b'<td class="version">.*?</td>', re.DOTALL)
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$')
class Addic7edSubtitle(Subtitle):
"""Addic7ed Subtitle."""
provider_name = 'addic7ed'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
# series name
if video.series and sanitize(self.series) in (
sanitize(name) for name in [video.series] + video.alternative_series):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# title of the episode
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# release_group
if (video.release_group and self.version and
any(r in sanitize_release_group(self.version)
for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))):
matches.add('release_group')
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# format
if video.format and self.version and video.format.lower() in self.version.lower():
matches.add('format')
# other properties
matches |= guess_matches(video, guessit(self.version), partial=True)
return matches
class Addic7edProvider(Provider):
"""Addic7ed Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',
'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',
'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'
]}
video_types = (Episode,)
server_url = 'http://www.addic7ed.com/'
subtitle_class = Addic7edSubtitle
def __init__(self, username=None, password=None):
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
# login
if self.username and self.password:
logger.info('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10)
if r.status_code != 302:
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'logout.php', timeout=10)
r.raise_for_status()
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
# LXML parser seems to fail when parsing Addic7ed.com HTML markup.
# Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)
# Assuming the site's markup is bad, and stripping it down to only contain what's needed.
show_cells = re.findall(show_cells_re, r.content)
if show_cells:
soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])
else:
# If RegEx fails, fall back to original r.content and use 'html.parser'
soup = ParserBeautifulSoup(r.content, ['html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'search.php', params=params, timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id
def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series %s not found in show ids', series)
show_id = self._search_show_id(series)
return show_id
def query(self, show_id, series, season, year=None, country=None):
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over subtitle rows
match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])
series = match.group('series')
year = int(match.group('year')) if match.group('year') else None
subtitles = []
for row in soup.select('tr.epeven'):
cells = row('td')
# ignore incomplete subtitles
status = cells[5].text
if status != 'Completed':
logger.debug('Ignoring subtitle with status %s', status)
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
episode = int(cells[1].text)
title = cells[2].text
version = cells[4].text
download_link = cells[9].a['href'][1:]
subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,
version, download_link)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
# lookup show_id
titles = [video.series] + video.alternative_series
show_id = None
for title in titles:
show_id = self.get_show_id(title, video.year)
if show_id is not None:
break
# query for subtitles with the show_id
if show_id is not None:
subtitles = [s for s in self.query(show_id, title, video.season, video.year)
if s.language in languages and s.episode == video.episode]
if subtitles:
return subtitles
else:
logger.error('No show id found for %r (%r)', video.series, {'year': video.year})
return []
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('Unable to download subtitle. No data returned from provider')
return
# detect download limit exceeded
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
|
3584cdd944ab91f1b991a6bb3deab618a1a3343f
|
c7fda71ef70a7ffcc59ba648e164f5b7666d3fc8
|
/test/parser/unit_operators/create_testcases/onnx_runner_generator.py
|
af350a1363496adbb4b8e7d1c59eef267fda0a77
|
[
"Apache-2.0"
] |
permissive
|
ai-techsystems/deepC
|
6c7df32ba61f2e85fc1eab5af4ef4f5c1368cf71
|
064a7cc04f96e20013ad7ae26a6abd6ef16ab77c
|
refs/heads/master
| 2023-08-04T06:08:20.817613
| 2022-10-29T15:12:26
| 2022-10-29T15:12:26
| 193,276,319
| 343
| 69
|
Apache-2.0
| 2021-08-19T19:39:50
| 2019-06-22T20:23:12
|
C++
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
onnx_runner_generator.py
|
import os
def generate_onnx_runner(op_name, inputs):
py_file = '''
# Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
\n\n'''
py_file += "import onnx\n"
py_file += "import caffe2.python.onnx.backend\n"
py_file += "from caffe2.python import core, workspace\n"
py_file += "import numpy as np\n\n"
py_file += "onnx_path = '../testcases/" + op_name + "/" + op_name + ".onnx'\n"
py_file += "modelFile = onnx.load(onnx_path)\n"
py_file += "inputs = " + inputs + "\n"
py_file += "output = caffe2.python.onnx.backend.run_model(modelFile, inputs)\n"
py_file += "print(output)\n\n"
path_name = '../testcases/' + op_name
file_name = path_name + "/" + op_name + "_onnx_runner.py"
if not os.path.isdir(path_name):
os.system("mkdir -p " + path_name)
with open(file_name, 'w') as f:
f.write(py_file)
os.system('python ' + file_name)
|
53d8af48707d94d482cc250ce94cc1fb52089dd2
|
f53d2df2c979063c48ecf1e751d002c8a389c15c
|
/test/test_samples/pytest/test/inner_fixture_test.py
|
6bb875efa1d88c8b4da9dada7e86c5c210152000
|
[
"MIT"
] |
permissive
|
kondratyev-nv/vscode-python-test-adapter
|
b5c98653d197dbf58de02070f627ca74e148e9e7
|
2d3a15a174ea48d19f9978d7942874eafc95107a
|
refs/heads/master
| 2023-08-17T17:02:20.643958
| 2023-08-13T09:16:08
| 2023-08-13T09:16:08
| 147,100,981
| 120
| 33
|
MIT
| 2023-09-09T10:26:30
| 2018-09-02T16:08:33
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
inner_fixture_test.py
|
import pytest
import unittest
class Test_CheckMyApp:
@unittest.skip("demonstrating skipping")
def test_simple_check_skipped(self):
pass
def test_complex_check_passed(self):
pass
class Test_NestedClassB:
class Test_nested_classC_Of_B:
def test_e_passed(self):
assert True
class Test_NestedClassA:
def test_nested_class_methodB_passed(self):
assert True
class Test_nested_classB_Of_A:
def test_d_passed(self):
assert True
def test_nested_class_methodC_passed(self):
assert True
def test_simple_check2_passed(self):
pass
def test_complex_check2_passed(self):
pass
@pytest.fixture
def parametrized_username():
return 'overridden-username'
@pytest.fixture(params=['one', 'two', 'three'])
def non_parametrized_username(request):
return request.param
def test_username_passed(parametrized_username):
assert parametrized_username == 'overridden-username'
def test_parametrized_username_passed(non_parametrized_username):
assert non_parametrized_username in ['one', 'two', 'three']
|
cb407ded0e3d9a38d2023b81de55dcc43ef186ef
|
0d6e9c15edafe14b5d373f2180809b94786cdb88
|
/torch_mimicry/datasets/imagenet/imagenet.py
|
4c3540ad70a70fc840898c14095ebf3ca5acea5e
|
[
"MIT"
] |
permissive
|
kwotsin/mimicry
|
6190e159f96418e9773a453aa17450f372767ffa
|
a7fda06c4aff1e6af8dc4c4a35ed6636e434c766
|
refs/heads/master
| 2023-08-02T21:49:54.864461
| 2022-08-07T18:28:05
| 2022-08-07T18:28:05
| 251,697,485
| 621
| 70
|
MIT
| 2022-08-07T18:30:23
| 2020-03-31T18:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,704
|
py
|
imagenet.py
|
"""
Modified version of imagenet torchvision dataset source code to allow customised token
for automatic download.
"""
from __future__ import print_function
import os
import shutil
import tempfile
import torch
from torchvision.datasets import ImageFolder
from torchvision.datasets.utils import check_integrity, download_and_extract_archive, extract_archive, \
verify_str_arg
class ImageNet(ImageFolder):
"""
`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Args:
- root (string): Root directory of the ImageNet Dataset.
- split (string, optional): The dataset split, supports ``train``, or ``val``.
- token (string): ImageNet token for download.
- download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
- transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
- target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
- loader (callable, optional): A function to load an image given its path.
Attributes:
- classes (list): List of the class name tuples.
- class_to_idx (dict): Dict with items (class_name, class_index).
- wnids (list): List of the WordNet IDs.
- wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
- imgs (list): List of (image path, class_index) tuples
- targets (list): The class_index value for each image in the dataset
"""
def __init__(self,
root,
token='',
split='train',
download=False,
**kwargs):
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
self.archive_dict = {
'train': {
'url':
'http://www.image-net.org/challenges/LSVRC/2012/{}/ILSVRC2012_img_train.tar'
.format(token),
'md5':
'1d675b47d978889d74fa0da5fadfb00e',
},
'val': {
'url':
'http://www.image-net.org/challenges/LSVRC/2012/{}/ILSVRC2012_img_val.tar'
.format(token),
'md5':
'29b22e2961454d5413ddabcf34fc5622',
},
'devkit': {
'url':
'http://www.image-net.org/challenges/LSVRC/2012/{}/ILSVRC2012_devkit_t12.tar.gz'
.format(token),
'md5':
'fa75699e90414af021442c21a62c3abf',
}
}
if download:
if len(token) == 0:
raise ValueError(
"ImageNet token is empty. Please obtain permission token from the official website."
)
self.download()
wnid_to_classes = self._load_meta_file()[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {
cls: idx
for idx, clss in enumerate(self.classes) for cls in clss
}
def download(self):
if not check_integrity(self.meta_file):
tmp_dir = tempfile.mkdtemp()
archive_dict = self.archive_dict['devkit']
download_and_extract_archive(archive_dict['url'],
self.root,
extract_root=tmp_dir,
md5=archive_dict['md5'])
devkit_folder = _splitexts(os.path.basename(
archive_dict['url']))[0]
meta = parse_devkit(os.path.join(tmp_dir, devkit_folder))
self._save_meta_file(*meta)
shutil.rmtree(tmp_dir)
if not os.path.isdir(self.split_folder):
archive_dict = self.archive_dict[self.split]
download_and_extract_archive(archive_dict['url'],
self.root,
extract_root=self.split_folder,
md5=archive_dict['md5'])
if self.split == 'train':
prepare_train_folder(self.split_folder)
elif self.split == 'val':
val_wnids = self._load_meta_file()[1]
prepare_val_folder(self.split_folder, val_wnids)
else:
msg = (
"You set download=True, but a folder '{}' already exist in "
"the root directory. If you want to re-download or re-extract the "
"archive, delete the folder.")
print(msg.format(self.split))
@property
def meta_file(self):
return os.path.join(self.root, 'meta.bin')
def _load_meta_file(self):
if check_integrity(self.meta_file):
return torch.load(self.meta_file)
else:
raise RuntimeError("Meta file not found or corrupted.",
"You can use download=True to create it.")
def _save_meta_file(self, wnid_to_class, val_wnids):
torch.save((wnid_to_class, val_wnids), self.meta_file)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def parse_devkit(root):
idx_to_wnid, wnid_to_classes = parse_meta(root)
val_idcs = parse_val_groundtruth(root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
return wnid_to_classes, val_wnids
def parse_meta(devkit_root, path='data', filename='meta.mat'):
import scipy.io as sio
metafile = os.path.join(devkit_root, path, filename)
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [
meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0
]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth(devkit_root,
path='data',
filename='ILSVRC2012_validation_ground_truth.txt'):
with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
def prepare_train_folder(folder):
for archive in [
os.path.join(folder, archive) for archive in os.listdir(folder)
]:
extract_archive(archive,
os.path.splitext(archive)[0],
remove_finished=True)
def prepare_val_folder(folder, wnids):
img_files = sorted(
[os.path.join(folder, file) for file in os.listdir(folder)])
for wnid in set(wnids):
os.mkdir(os.path.join(folder, wnid))
for wnid, img_file in zip(wnids, img_files):
shutil.move(img_file,
os.path.join(folder, wnid, os.path.basename(img_file)))
def _splitexts(root):
exts = []
ext = '.'
while ext:
root, ext = os.path.splitext(root)
exts.append(ext)
return root, ''.join(reversed(exts))
|
2eb22a2a7c62c893a979c6ce8f460f9cb1d554ea
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/0945-Minimum-Increment-to-Make-Array-Unique/0945.py
|
2de851d68712850986a9c4c51a53a5ade9f835c8
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
0945.py
|
class Solution:
def minIncrementForUnique(self, A):
"""
:type A: List[int]
:rtype: int
"""
box, result, max_A = [0]*40000, 0, 0
for a in A:
box[a] += 1
if max_A < a:
max_A = a
for i in range(max_A):
if box[i] <= 1:
continue
ano = box[i] - 1
result += ano
box[i+1] += ano
box[i] = 1
last_ano = box[max_A] - 1
result += (1 + last_ano)*last_ano//2
return result
|
d5396da83f13c99ad0e15eb74d21acdce3291e1e
|
3c6b36eb1f4f9760c52903f6d0ec4a501f948c90
|
/osp/test/citations/utils/test_clean_field.py
|
6b56816a35ced7eac7df62ca9313a0c89a1f6938
|
[
"Apache-2.0"
] |
permissive
|
davidmcclure/open-syllabus-project
|
38444249af845013e3f281a7a713dca83159c56e
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
refs/heads/master
| 2021-06-30T21:47:07.636558
| 2021-06-27T15:15:35
| 2021-06-27T15:15:35
| 50,152,020
| 220
| 14
|
Apache-2.0
| 2021-06-27T15:11:15
| 2016-01-22T02:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
test_clean_field.py
|
import pytest
from osp.citations.utils import clean_field
@pytest.mark.parametrize('raw,clean', [
('War and Peace', 'War and Peace'),
# Strip whitespace.
(' War and Peace ', 'War and Peace'),
# Empty -> None.
(None, None),
('', None),
(' ', None),
])
def test_clean_field(raw, clean, mock_hlom):
assert clean_field(raw) == clean
|
cbe9305740f7e0a9e8c7be9dbfcb606f8abb2758
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/web_platform_tests/XMLHttpRequest/resources/auth2/auth.py
|
8b6682686c8709994a19ae430ed2120a047f9398
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
auth.py
|
import imp
import os
here = os.path.split(os.path.abspath(__file__))[0]
def main(request, response):
auth = imp.load_source("", os.path.join(here,
"..",
"authentication.py"))
return auth.main(request, response)
|
11be347a938f5c901b7ccf57ca56b023b6988126
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/infoxlm/fairseq/fairseq/tasks/semisupervised_translation.py
|
612ea48c46b1e28a4ab39522ac6f3323d0e6316a
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 19,504
|
py
|
semisupervised_translation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import os
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from .multilingual_translation import MultilingualTranslationTask
from . import register_task
def _get_bt_dataset_key(lang_pair):
return "bt:" + lang_pair
def _get_denoising_dataset_key(lang_pair):
return "denoising:" + lang_pair
# ported from UnsupervisedMT
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
split = x.split(',')
if len(split) == 1:
return float(x), None
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
@register_task('semisupervised_translation')
class SemisupervisedTranslationTask(MultilingualTranslationTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, instead of `--lang-pairs`.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (parallel data). '
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG',
help='Cross-entropy reconstruction coefficient (denoising autoencoding)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N',
help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config(args.lambda_parallel_config)
self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config(args.lambda_otf_bt_config)
self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config(args.lambda_denoising_config)
if (self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None):
denoising_lang_pairs = [
"%s-%s" % (tgt, tgt)
for tgt in {lang_pair.split('-')[1] for lang_pair in args.lang_pairs}
]
self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs
self.backtranslate_datasets = {}
self.backtranslators = {}
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=0, **kwargs):
"""Load a dataset split."""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
def split_exists(split, src, tgt, lang):
if src is not None:
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
else:
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, src, tgt))
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedDataset.exists(path):
if self.args.lazy_load:
return IndexedDataset(path, fix_lua_indexing=True)
else:
return IndexedCachedDataset(path, fix_lua_indexing=True)
return None
# load parallel datasets
src_datasets, tgt_datasets = {}, {}
if (self.lambda_parallel > 0.0 or self.lambda_parallel_steps is not None or not split.startswith("train")):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
if split_exists(split, src, tgt, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt))
elif split_exists(split, tgt, src, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, tgt, src))
else:
continue
src_datasets[lang_pair] = indexed_dataset(prefix + src, self.dicts[src])
tgt_datasets[lang_pair] = indexed_dataset(prefix + tgt, self.dicts[tgt])
print('| parallel-{} {} {} examples'.format(data_path, split, len(src_datasets[lang_pair])))
if len(src_datasets) == 0:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
# back translation datasets
backtranslate_datasets = {}
if (self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None) and split.startswith("train"):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
if not split_exists(split, tgt, None, tgt):
raise FileNotFoundError('Dataset not found: backtranslation {} ({})'.format(split, data_path))
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
dataset = indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(
dataset,
dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
lang_pair_dataset = LanguagePairDataset(
dataset,
dataset.sizes,
src_dict=self.dicts[src],
tgt=dataset,
tgt_sizes=dataset.sizes,
tgt_dict=self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
backtranslate_datasets[lang_pair] = BacktranslationDataset(
tgt_dataset=self.alter_dataset_langtok(
lang_pair_dataset_tgt,
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_lang=src,
),
backtranslation_fn=self.backtranslators[lang_pair],
src_dict=self.dicts[src], tgt_dict=self.dicts[tgt],
output_collater=self.alter_dataset_langtok(
lang_pair_dataset=lang_pair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
).collater,
)
print('| backtranslate-{}: {} {} {} examples'.format(
tgt, data_path, split, len(backtranslate_datasets[lang_pair]),
))
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[lang_pair]
# denoising autoencoder
noising_datasets = {}
if (self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None) and split.startswith("train"):
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split('-')
if not split_exists(split, tgt, None, tgt):
continue
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
tgt_dataset1 = indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(
tgt_dataset1,
self.dicts[tgt],
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noising_datasets[lang_pair] = self.alter_dataset_langtok(
LanguagePairDataset(
noising_dataset,
tgt_dataset1.sizes,
self.dicts[tgt],
tgt_dataset2,
tgt_dataset2.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
print('| denoising-{}: {} {} {} examples'.format(
tgt, data_path, split, len(noising_datasets[lang_pair]),
))
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair]
return self.alter_dataset_langtok(
LanguagePairDataset(
src_dataset, src_dataset.sizes, self.dicts[src],
tgt_dataset, tgt_dataset.sizes, self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
),
self.dicts[src].eos(),
src,
self.dicts[tgt].eos(),
tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in src_datasets.keys()
] + [
(_get_bt_dataset_key(lang_pair), dataset)
for lang_pair, dataset in backtranslate_datasets.items()
] + [
(_get_denoising_dataset_key(lang_pair), dataset)
for lang_pair, dataset in noising_datasets.items()
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('SemisupervisedTranslationTask requires a FairseqMultiModel architecture')
# create SequenceGenerator for each model that has backtranslation dependency on it
self.sequence_generators = {}
if (self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None) and self.training:
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
key = '{}-{}'.format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(
tgt_dict=self.dicts[src],
beam_size=args.bt_beam_size,
max_len_a=args.bt_max_len_a,
max_len_b=args.bt_max_len_b,
)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(
sample, model=model.models[key],
bos_token=decoder_lang_tok_idx,
sequence_generator=self.sequence_generators[key],
):
return sequence_generator.generate(
[model],
sample,
bos_token=bos_token,
)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
agg_logging_output[logging_output_key] = logging_output
if self.lambda_parallel > 0.0:
for lang_pair in self.lang_pairs:
forward_backward(model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel)
if self.lambda_otf_bt > 0.0:
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt)
if self.lambda_denoising > 0.0:
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split('-')
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(model.models['{0}-{0}'.format(tgt)], sample[sample_key], sample_key, self.lambda_denoising)
return agg_loss, agg_sample_size, agg_logging_output
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
if self.lambda_parallel_steps is not None:
self.lambda_parallel = lambda_step_func(self.lambda_parallel_steps, num_updates)
if self.lambda_denoising_steps is not None:
self.lambda_denoising = lambda_step_func(self.lambda_denoising_steps, num_updates)
if self.lambda_otf_bt_steps is not None:
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
def aggregate_logging_outputs(self, logging_outputs, criterion):
# aggregate logging outputs for each language pair
logging_output_keys = {
key
for logging_output in logging_outputs
for key in logging_output
}
lang_pair_keys = set(self.lang_pairs + [
_get_bt_dataset_key(lang_pair)
for lang_pair in self.lang_pairs
] + [
_get_denoising_dataset_key(lang_pair)
for lang_pair in self.lang_pairs
])
logging_output_keys = logging_output_keys.intersection(lang_pair_keys)
return super().aggregate_logging_outputs(logging_outputs, criterion, logging_output_keys)
|
00131fab13b704870de4a2453e7aa99e38d101fc
|
e3cfab409afb5ff9a0b3812bf848be6ca9239cee
|
/pygeodesy/auxilats/_CX_4.py
|
8f0be523af660c0f819c189cb7568d29290f9271
|
[
"MIT"
] |
permissive
|
mrJean1/PyGeodesy
|
565266a4f7f6cda5abe98e915bbd868f6cbe1760
|
eba35704b248a7a0388b30f3cea19793921e99b7
|
refs/heads/master
| 2023-08-23T13:58:20.069917
| 2023-08-20T18:50:45
| 2023-08-20T18:50:45
| 68,028,481
| 283
| 66
| null | 2022-04-09T00:40:52
| 2016-09-12T16:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,662
|
py
|
_CX_4.py
|
# -*- coding: utf-8 -*-
u'''Coeficients for C{_AUXLATITUDE_ORDER} 4 from I{Karney}'s C++ class U{AuxLatitude
<https://GeographicLib.SourceForge.io/C++/doc/classGeographicLib_1_1AuxLatitude.html>}
trancoded to a double, uniquified Python C{dict[auxout][auxin]}.
Copyright (C) Charles Karney (2022-2023) Karney@Alum.MIT.edu> and licensed under the
MIT/X11 License. For more information, see <https:#GeographicLib.SourceForge.io>.
'''
# make sure int/int division yields float quotient
from __future__ import division as _; del _ # PYCHOK semicolon
from pygeodesy.auxilats.auxily import Aux, _Ufloats
from pygeodesy.constants import _0_0, _0_25, _0_5, _1_0, _N_1_0, \
_1_5, _2_0, _N_2_0, _4_0
__all__ = ()
__version__ = '23.08.19'
_f, _u = float, _Ufloats()
_coeffs_4 = _u._Coeffs(4, { # GEOGRAPHICLIB_AUXLATITUDE_ORDER == 4
Aux.PHI: {
# C[phi,phi] skipped
Aux.BETA: _u( # C[phi,beta]; even coeffs only
_0_0, _1_0,
_0_0, _0_5,
1 / _f(3),
_0_25,),
Aux.THETA: _u( # C[phi,theta]; even coeffs only
_N_2_0, _2_0,
-_4_0, _2_0,
8 / _f(3),
_4_0,),
Aux.MU: _u( # C[phi,mu]; even coeffs only
-27 / _f(32), _1_5,
-55 / _f(32), 21 / _f(16),
151 / _f(96),
1097 / _f(512),),
Aux.CHI: _u( # C[phi,chi]
116 / _f(45), _N_2_0, -2 / _f(3), _2_0,
-227 / _f(45), -8 / _f(5), 7 / _f(3),
-136 / _f(35), 56 / _f(15),
4279 / _f(630),),
Aux.XI: _u( # C[phi,xi]
-2582 / _f(14175), -16 / _f(35), 4 / _f(45), 4 / _f(3),
-11966 / _f(14175), 152 / _f(945), 46 / _f(45),
3802 / _f(14175), 3044 / _f(2835),
6059 / _f(4725),)
},
Aux.BETA: {
Aux.PHI: _u( # C[beta,phi]; even coeffs only
_0_0, _N_1_0,
_0_0, _0_5,
-1 / _f(3),
_0_25,),
# C[beta,beta] skipped
Aux.THETA: _u( # C[beta,theta]; even coeffs only
_0_0, _1_0,
_0_0, _0_5,
1 / _f(3),
_0_25,),
Aux.MU: _u( # C[beta,mu]; even coeffs only
-9 / _f(32), _0_5,
-37 / _f(96), 5 / _f(16),
29 / _f(96),
539 / _f(1536),),
Aux.CHI: _u( # C[beta,chi]
38 / _f(45), -1 / _f(3), -2 / _f(3), _1_0,
-7 / _f(9), -14 / _f(15), 5 / _f(6),
-34 / _f(21), 16 / _f(15),
2069 / _f(1260),),
Aux.XI: _u( # C[beta,xi]
-1082 / _f(14175), -46 / _f(315), 4 / _f(45), 1 / _f(3),
-338 / _f(2025), 68 / _f(945), 17 / _f(90),
1102 / _f(14175), 461 / _f(2835),
3161 / _f(18900),)
},
Aux.THETA: {
Aux.PHI: _u( # C[theta,phi]; even coeffs only
_2_0, _N_2_0,
-_4_0, _2_0,
-8 / _f(3),
_4_0,),
Aux.BETA: _u( # C[theta,beta]; even coeffs only
_0_0, _N_1_0,
_0_0, _0_5,
-1 / _f(3),
_0_25,),
# C[theta,theta] skipped
Aux.MU: _u( # C[theta,mu]; even coeffs only
-23 / _f(32), -1 / _f(2),
-5 / _f(96), 5 / _f(16),
1 / _f(32),
283 / _f(1536),),
Aux.CHI: _u( # C[theta,chi]
4 / _f(9), -2 / _f(3), -2 / _f(3), _0_0,
-23 / _f(45), -4 / _f(15), 1 / _f(3),
-24 / _f(35), 2 / _f(5),
83 / _f(126),),
Aux.XI: _u( # C[thet),a,xi]
-2102 / _f(14175), -158 / _f(315), 4 / _f(45), -2 / _f(3),
934 / _f(14175), -16 / _f(945), 16 / _f(45),
922 / _f(14175), -232 / _f(2835),
719 / _f(4725),)
},
Aux.MU: {
Aux.PHI: _u( # C[mu,phi]; even coeffs only
9 / _f(16), -3 / _f(2),
-15 / _f(32), 15 / _f(16),
-35 / _f(48),
315 / _f(512),),
Aux.BETA: _u( # C[mu,beta]; even coeffs only
3 / _f(16), -1 / _f(2),
1 / _f(32), -1 / _f(16),
-1 / _f(48),
-5 / _f(512),),
Aux.THETA: _u( # C[mu,theta]; even coeffs only
13 / _f(16), _0_5,
33 / _f(32), -1 / _f(16),
-5 / _f(16),
-261 / _f(512),),
# C[mu,mu] skipped
Aux.CHI: _u( # C[mu,chi]
41 / _f(180), 5 / _f(16), -2 / _f(3), _0_5,
557 / _f(1440), -3 / _f(5), 13 / _f(48),
-103 / _f(140), 61 / _f(240),
49561 / _f(161280),),
Aux.XI: _u( # C[mu,xi]
-1609 / _f(28350), 121 / _f(1680), 4 / _f(45), -1 / _f(6),
16463 / _f(453600), 26 / _f(945), -29 / _f(720),
449 / _f(28350), -1003 / _f(45360),
-40457 / _f(2419200),)
},
Aux.CHI: {
Aux.PHI: _u( # C[chi,phi]
-82 / _f(45), 4 / _f(3), 2 / _f(3), _N_2_0,
-13 / _f(9), -16 / _f(15), 5 / _f(3),
34 / _f(21), -26 / _f(15),
1237 / _f(630),),
Aux.BETA: _u( # C[chi,beta]
-16 / _f(45), _0_0, 2 / _f(3), _N_1_0,
19 / _f(45), -2 / _f(5), 1 / _f(6),
16 / _f(105), -1 / _f(15),
17 / _f(1260),),
Aux.THETA: _u( # C[chi,theta]
-2 / _f(9), 2 / _f(3), 2 / _f(3), _0_0,
43 / _f(45), 4 / _f(15), -1 / _f(3),
2 / _f(105), -2 / _f(5),
-55 / _f(126),),
Aux.MU: _u( # C[chi,mu]
1 / _f(360), -37 / _f(96), 2 / _f(3), -1 / _f(2),
437 / _f(1440), -1 / _f(15), -1 / _f(48),
37 / _f(840), -17 / _f(480),
-4397 / _f(161280),),
# C[chi,chi] skipped
Aux.XI: _u( # C[chi,xi]
-2312 / _f(14175), -88 / _f(315), 34 / _f(45), -2 / _f(3),
6079 / _f(14175), -184 / _f(945), 1 / _f(45),
772 / _f(14175), -106 / _f(2835),
-167 / _f(9450),)
},
Aux.XI: {
Aux.PHI: _u( # C[xi,phi]
538 / _f(4725), 88 / _f(315), -4 / _f(45), -4 / _f(3),
-2482 / _f(14175), 8 / _f(105), 34 / _f(45),
-898 / _f(14175), -1532 / _f(2835),
6007 / _f(14175),),
Aux.BETA: _u( # C[xi,beta]
34 / _f(675), 32 / _f(315), -4 / _f(45), -1 / _f(3),
74 / _f(2025), -4 / _f(315), -7 / _f(90),
2 / _f(14175), -83 / _f(2835),
-797 / _f(56700),),
Aux.THETA: _u( # C[xi,theta]
778 / _f(4725), 62 / _f(105), -4 / _f(45), 2 / _f(3),
12338 / _f(14175), -32 / _f(315), 4 / _f(45),
-1618 / _f(14175), -524 / _f(2835),
-5933 / _f(14175),),
Aux.MU: _u( # C[xi,mu]
1297 / _f(18900), -817 / _f(10080), -4 / _f(45), 1 / _f(6),
-29609 / _f(453600), -2 / _f(35), 49 / _f(720),
-2917 / _f(56700), 4463 / _f(90720),
331799 / _f(7257600),),
Aux.CHI: _u( # C[xi,chi]
2458 / _f(4725), 46 / _f(315), -34 / _f(45), 2 / _f(3),
3413 / _f(14175), -256 / _f(315), 19 / _f(45),
-15958 / _f(14175), 248 / _f(567),
16049 / _f(28350),) # PYCHOK exported
# C[xi,xi] skipped
}
})
# _ptrs_4 = (0, 0, 6, 12, 18, 28, 38, 44, 44, 50, 56, 66,
# 76, 82, 88, 88, 94, 104, 114, 120, 126, 132, 132, 142,
# 152, 162, 172, 182, 192, 192, 202, 212, 222, 232, 242, 252,
# 252) # PYCHOK exported
del _f, _u
# **) MIT License
#
# Copyright (C) 2023-2023 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
|
7467182243f7e13e03f50be1eb53f31f8e21229d
|
f86c72ebd4a46d12cf8c750c1f7526c42e40111d
|
/utils/hershey_to_py.py
|
1f1d396bf20004f36b3098daebf8878539bdf5ec
|
[
"MIT"
] |
permissive
|
russhughes/st7789_mpy
|
65e5052d4ca28db6423af38d24a3c90622a39cea
|
47db47361eb89719cbc1c59b36e31cb69be03911
|
refs/heads/master
| 2023-07-24T10:32:43.458118
| 2023-06-11T06:26:28
| 2023-06-11T06:26:28
| 237,678,059
| 404
| 103
|
NOASSERTION
| 2023-07-13T04:30:05
| 2020-02-01T21:07:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,189
|
py
|
hershey_to_py.py
|
#!/usr/bin/env python3
"""
Convert Hershey font data to python module.
Usage: hershey_to_py.py <glyph_file> [map_file]
The glyph_file (hf) is the Hershey font data file. The map_file (hmp) is an optional file that maps
the Hershey font data to a character set. The hershey_to_py.py script is compatible with the output
from my fork of LingDong's ttf2hershey python2 program available from my github repository at
https://github.com/russhughes/ttf2hershey. Not all TrueType fonts can be converted. Some may
result in a font with out-of-order or missing characters.
A Hershey font file is a text file with the following format:
Optional header lines:
# WIDTH = 40 width of the font
# HEIGHT = 45 height of the font
# FIRST = 32 first character in the font
# LAST = 127 last character in the font
Comment lines start with a # and are ignored with the exception of the optional header lines.
Glyph data lines have the following format:
Bytes 1-5: The character number
Bytes 6-8: The number of vector pairs in the glyph
Bytes 9: left hand position
Bytes 10: right hand position
Bytes 11+: The vector data as a string of characters, 2 characters per vector.
Vector values are relative to the ascii value of 'R'. A value of " R" non-drawing move to operation.
Example:
45 6JZLBXBXFLFLB
Character number: 45 (ASCII '-')
Number of vectors: 6
Left hand position: J (ascii value 74 - 82 = -8)
Right hand position: Z (ascii value 90 - 82 = 8)
Vector data: LBXBXFLFLB
The vector data is interpreted as follows:
LB - Line to (-6, -16)
XB - Line to (6, -16)
XF - Line to (6, -12)
LF - Line to (-6, -12)
LB - Line to (-6, -16)
A Hershey Map file is a text file with the following format:
Comment lines start with a # and are ignored.
Map data lines have the following format:
Number of the first glyph to include in the font followed by space and the number of the last glyph
in the font. If the last glyph is 0 then only the first glyph is included.
Example:
32 64
65 127
"""
import argparse
import re
from struct import pack
class HersheyFont:
"""
Hershey font data
"""
def __init__(self, width=40, height=45, first=32, last=127, glyphs=None):
self.width = width
self.height = height
self.first = first
self.last = last
self.glyphs = glyphs or {}
class Glyph:
"""
Glyph data
"""
def __init__(self, num, vectors, count):
self.num = num
self.vectors = vectors
self.count = count
def parse_line(keyword_dict, line):
"""
Perform regex search against all defined regexes and
return the key and match result from the first match.
"""
for key, rx in keyword_dict.items():
match = rx.search(line)
if match:
return key, match
return None, None
HF_KEYWORDS = {
'glyph': re.compile(r'^(?P<num>[0-9 ]{5})(?P<length>[0-9 ]{3})(?P<vectors>.*)$'),
'width': re.compile(r'^# WIDTH = (?P<width>\d+)$'),
'height': re.compile(r'^# HEIGHT = (?P<height>\d+)$'),
'first': re.compile(r'^# FIRST = (?P<first>\d+)$'),
'last': re.compile(r'^# LAST = (?P<last>\d+)$')}
def hershey_load(glyph_file_name, map_file_name=None):
"""
Load Hershey font, optionally using a map file.
"""
glyphs = {}
font = []
width = 40
height = 45
first = 32
last = 127
# Read the glyphs file
with open(glyph_file_name, "r") as file:
for line in file:
key, glyph_data = parse_line(HF_KEYWORDS, line.rstrip())
if key == 'glyph':
num = int(glyph_data['num'])
if map_file_name is None:
font.append(
Glyph(num, glyph_data['vectors'], int(glyph_data['length'])-1))
else:
glyphs[num] = Glyph(
num, glyph_data['vectors'], int(glyph_data['length'])-1)
elif key == 'width':
width = int(glyph_data['width'])
elif key == 'height':
height = int(glyph_data['height'])
elif key == 'first':
first = int(glyph_data['first'])
elif key == 'last':
last = int(glyph_data['last'])
# Read the map file if one was specified
if map_file_name is not None:
map_line = re.compile(r'(?P<begin>\d+)\s+(?P<end>\d+)$')
with open(map_file_name, "r") as file:
for line in file:
if line[0] == '#':
continue
match = map_line.search(line.rstrip())
if match:
begin = int(match['begin'])
end = int(match['end'])
if end > 0:
font.extend(glyphs[glyph_num] for glyph_num in range(begin, end + 1))
else:
font.append(glyphs[begin])
return HersheyFont(width, height, first, last, font)
def write_font(font):
"""
Write _fronts.
"""
font_data = bytes()
for glyph in font.glyphs:
count = glyph.count
f_c = bytearray(count.to_bytes(1, byteorder='little'))
f_v = bytearray(glyph.vectors, 'utf-8')
font_data += f_c + f_v
print("_font =\\")
print("b'", sep='', end='')
count = 0
for byte in (font_data):
print(f'\\x{byte:02x}', sep='', end='')
count += 1
if count == 15:
print("'\\\nb'", sep='', end='')
count = 0
print("'")
def write_offsets(offsets):
"""
Write the 16 bit integer table to the start of the vector data for each
glyph in the font.
"""
index_data = bytes()
for offset in offsets:
index_data += bytearray(pack('H', offset))
print("\n_index =\\")
print("b'", sep='', end='')
for count, byte in enumerate(index_data):
if count > 0 and count % 15 == 0:
print("'\\\nb'", sep='', end='')
print(f'\\x{byte:02x}', sep='', end='')
print("'")
def create_module(font):
"""
Create python module from Hershey glyphs, optionally using a map file.
"""
print(f"FIRST = {font.first}")
print(f"LAST = {font.last}")
print(f"WIDTH = {font.width}")
print(f"HEIGHT = {font.height}\n")
write_font(font)
offsets = []
offset = 0
for glyph in font.glyphs:
offsets.append(offset)
offset += len(glyph.vectors) + 1
write_offsets(offsets)
print("\nFONT = memoryview(_font)")
print("INDEX = memoryview(_index)\n")
parser = argparse.ArgumentParser(
prog='hershey2py',
description=('''
Convert hershey format font to python module for use
with the draw method in the st7789 and ili9342 drivers.'''))
parser.add_argument(
'hershey_file',
type=str,
help='name of hershey font file to convert.')
parser.add_argument(
'map_file',
type=str,
nargs='?',
default=None,
help='Hershey glyph map file.')
args = parser.parse_args()
font = hershey_load(args.hershey_file, args.map_file)
create_module(font)
|
920117c4e439d7bda4b90683be3b7bd5fa33da1d
|
b95d2aef1da5489d20b0d068ef7cece1ae32466f
|
/src/auditwheel/repair.py
|
23f172e90d7cfa11cae05d448d3bb8c38c2df9b0
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
pypa/auditwheel
|
55fc242ebc541b7fb24ddd65acafba08aa742344
|
ebd4bc0de49394adcad557532eb19fe6598cd886
|
refs/heads/main
| 2023-09-02T19:51:37.625078
| 2023-07-17T23:27:37
| 2023-07-25T08:17:53
| 49,615,802
| 375
| 136
|
NOASSERTION
| 2023-09-12T08:58:06
| 2016-01-14T02:05:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,967
|
py
|
repair.py
|
from __future__ import annotations
import itertools
import logging
import os
import platform
import re
import shutil
import stat
from collections import OrderedDict
from os.path import abspath, basename, dirname, exists, isabs
from os.path import join as pjoin
from subprocess import check_call
from typing import Iterable
from auditwheel.patcher import ElfPatcher
from .elfutils import elf_read_dt_needed, elf_read_rpaths, is_subdir
from .hashfile import hashfile
from .policy import get_replace_platforms
from .wheel_abi import get_wheel_elfdata
from .wheeltools import InWheelCtx, add_platforms
logger = logging.getLogger(__name__)
# Copied from wheel 0.31.1
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))(-(?P<build>\d.*?))?
-(?P<pyver>[a-z].+?)-(?P<abi>.+?)-(?P<plat>.+?)(\.whl|\.dist-info)$""",
re.VERBOSE,
).match
def repair_wheel(
wheel_path: str,
abis: list[str],
lib_sdir: str,
out_dir: str,
update_tags: bool,
patcher: ElfPatcher,
exclude: list[str],
strip: bool = False,
) -> str | None:
external_refs_by_fn = get_wheel_elfdata(wheel_path)[1]
# Do not repair a pure wheel, i.e. has no external refs
if not external_refs_by_fn:
return None
soname_map: dict[str, tuple[str, str]] = {}
if not isabs(out_dir):
out_dir = abspath(out_dir)
wheel_fname = basename(wheel_path)
with InWheelCtx(wheel_path) as ctx:
ctx.out_wheel = pjoin(out_dir, wheel_fname)
match = WHEEL_INFO_RE(wheel_fname)
if not match:
raise ValueError("Failed to parse wheel file name: %s", wheel_fname)
dest_dir = match.group("name") + lib_sdir
if not exists(dest_dir):
os.mkdir(dest_dir)
# here, fn is a path to a python extension library in
# the wheel, and v['libs'] contains its required libs
for fn, v in external_refs_by_fn.items():
ext_libs: dict[str, str] = v[abis[0]]["libs"]
replacements: list[tuple[str, str]] = []
for soname, src_path in ext_libs.items():
if soname in exclude:
logger.info(f"Excluding {soname}")
continue
if src_path is None:
raise ValueError(
(
"Cannot repair wheel, because required "
'library "%s" could not be located'
)
% soname
)
new_soname, new_path = copylib(src_path, dest_dir, patcher)
soname_map[soname] = (new_soname, new_path)
replacements.append((soname, new_soname))
if replacements:
patcher.replace_needed(fn, *replacements)
if len(ext_libs) > 0:
new_rpath = os.path.relpath(dest_dir, os.path.dirname(fn))
new_rpath = os.path.join("$ORIGIN", new_rpath)
append_rpath_within_wheel(fn, new_rpath, ctx.name, patcher)
# we grafted in a bunch of libraries and modified their sonames, but
# they may have internal dependencies (DT_NEEDED) on one another, so
# we need to update those records so each now knows about the new
# name of the other.
for old_soname, (new_soname, path) in soname_map.items():
needed = elf_read_dt_needed(path)
replacements = []
for n in needed:
if n in soname_map:
replacements.append((n, soname_map[n][0]))
if replacements:
patcher.replace_needed(path, *replacements)
if update_tags:
ctx.out_wheel = add_platforms(ctx, abis, get_replace_platforms(abis[0]))
if strip:
libs_to_strip = [path for (_, path) in soname_map.values()]
extensions = external_refs_by_fn.keys()
strip_symbols(itertools.chain(libs_to_strip, extensions))
return ctx.out_wheel
def strip_symbols(libraries: Iterable[str]) -> None:
for lib in libraries:
logger.info("Stripping symbols from %s", lib)
check_call(["strip", "-s", lib])
def copylib(src_path: str, dest_dir: str, patcher: ElfPatcher) -> tuple[str, str]:
"""Graft a shared library from the system into the wheel and update the
relevant links.
1) Copy the file from src_path to dest_dir/
2) Rename the shared object from soname to soname.<unique>
3) If the library has a RUNPATH/RPATH, clear it and set RPATH to point to
its new location.
"""
# Copy the a shared library from the system (src_path) into the wheel
# if the library has a RUNPATH/RPATH we clear it and set RPATH to point to
# its new location.
with open(src_path, "rb") as f:
shorthash = hashfile(f)[:8]
src_name = os.path.basename(src_path)
base, ext = src_name.split(".", 1)
if not base.endswith("-%s" % shorthash):
new_soname = f"{base}-{shorthash}.{ext}"
else:
new_soname = src_name
dest_path = os.path.join(dest_dir, new_soname)
if os.path.exists(dest_path):
return new_soname, dest_path
logger.debug("Grafting: %s -> %s", src_path, dest_path)
rpaths = elf_read_rpaths(src_path)
shutil.copy2(src_path, dest_path)
statinfo = os.stat(dest_path)
if not statinfo.st_mode & stat.S_IWRITE:
os.chmod(dest_path, statinfo.st_mode | stat.S_IWRITE)
patcher.set_soname(dest_path, new_soname)
if any(itertools.chain(rpaths["rpaths"], rpaths["runpaths"])):
patcher.set_rpath(dest_path, dest_dir)
return new_soname, dest_path
def append_rpath_within_wheel(
lib_name: str, rpath: str, wheel_base_dir: str, patcher: ElfPatcher
) -> None:
"""Add a new rpath entry to a file while preserving as many existing
rpath entries as possible.
In order to preserve an rpath entry it must:
1) Point to a location within wheel_base_dir.
2) Not be a duplicate of an already-existing rpath entry.
"""
if not isabs(lib_name):
lib_name = abspath(lib_name)
lib_dir = dirname(lib_name)
if not isabs(wheel_base_dir):
wheel_base_dir = abspath(wheel_base_dir)
def is_valid_rpath(rpath: str) -> bool:
return _is_valid_rpath(rpath, lib_dir, wheel_base_dir)
old_rpaths = patcher.get_rpath(lib_name)
rpaths = filter(is_valid_rpath, old_rpaths.split(":"))
# Remove duplicates while preserving ordering
# Fake an OrderedSet using OrderedDict
rpath_set = OrderedDict([(old_rpath, "") for old_rpath in rpaths])
rpath_set[rpath] = ""
patcher.set_rpath(lib_name, ":".join(rpath_set))
def _is_valid_rpath(rpath: str, lib_dir: str, wheel_base_dir: str) -> bool:
full_rpath_entry = _resolve_rpath_tokens(rpath, lib_dir)
if not isabs(full_rpath_entry):
logger.debug(
f"rpath entry {rpath} could not be resolved to an "
"absolute path -- discarding it."
)
return False
elif not is_subdir(full_rpath_entry, wheel_base_dir):
logger.debug(
f"rpath entry {rpath} points outside the wheel -- " "discarding it."
)
return False
else:
logger.debug(f"Preserved rpath entry {rpath}")
return True
def _resolve_rpath_tokens(rpath: str, lib_base_dir: str) -> str:
# See https://www.man7.org/linux/man-pages/man8/ld.so.8.html#DESCRIPTION
if platform.architecture()[0] == "64bit":
system_lib_dir = "lib64"
else:
system_lib_dir = "lib"
system_processor_type = platform.machine()
token_replacements = {
"ORIGIN": lib_base_dir,
"LIB": system_lib_dir,
"PLATFORM": system_processor_type,
}
for token, target in token_replacements.items():
rpath = rpath.replace(f"${token}", target) # $TOKEN
rpath = rpath.replace(f"${{{token}}}", target) # ${TOKEN}
return rpath
|
2a80395ae29fc481fb79d3f3f1bb27507fc392e2
|
8f4db78e562ab88ef6eadf991c340829f2c67217
|
/doc/examples/type_hints_notemplate.py
|
6677dd247b8149992d1485257548adda025b55dd
|
[
"BSD-3-Clause"
] |
permissive
|
fluiddyn/transonic
|
3df606abda7f98c9958a8964ce1c6ab442167574
|
40329915cb277645c0a54286ef9e1d400e29719f
|
refs/heads/branch/default
| 2023-08-30T14:00:12.851327
| 2023-08-24T09:16:38
| 2023-08-24T09:16:38
| 164,679,098
| 107
| 1
|
BSD-3-Clause
| 2021-12-30T11:58:11
| 2019-01-08T15:34:10
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
type_hints_notemplate.py
|
import numpy as np
from transonic import Type, NDim, Array, boost
T = Type(int, np.complex128)
N = NDim(1, 3)
A = Array[T, N]
A1 = Array[np.float32, N + 1]
@boost
def compute(a: A, b: A, c: T, d: A1, e: str):
print(e)
tmp = a + b
return tmp
|
be01c96d76b27836ba00087fb8665c92b7389455
|
3257372291236aac1737b057c9ac6c61da9ccca0
|
/tutorials/W0D5_Statistics/solutions/W0D5_Tutorial2_Solution_582d22a1.py
|
c1b15f9357dbee6334574295b7cd465df9633786
|
[
"CC-BY-4.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
NeuromatchAcademy/precourse
|
230ead0d11ae7b0dba21c8df97695a1796e9797d
|
b7f2432c6a68a7984ca923ceed8e07d5cfdb77c3
|
refs/heads/main
| 2023-07-26T11:18:24.493966
| 2023-07-09T14:42:49
| 2023-07-09T14:42:49
| 256,327,558
| 639
| 174
|
MIT
| 2023-07-09T14:42:50
| 2020-04-16T20:54:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
W0D5_Tutorial2_Solution_582d22a1.py
|
# Transition matrix
transition_matrix = np.array([[ 0.2, 0.6, 0.2], [ .6, 0.3, 0.1], [0.8, 0.2, 0]])
# Initial state, p0
p0 = np.array([0, 1, 0])
# Compute the probabilities 4 transitions later (use np.linalg.matrix_power to raise a matrix a power)
p4 = p0 @ np.linalg.matrix_power(transition_matrix, 4)
# The second area is indexed as 1 (Python starts indexing at 0)
print(f"The probability the rat will be in area 2 after 4 transitions is: {p4[1]}")
|
a18b3c1110d5717071b0976c928b91adbcc143f7
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/tools/splat_ext/pm_msg.py
|
7fce2326b5011f7ca9326458a7920731cad042fe
|
[] |
no_license
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
Python
| false
| false
| 14,244
|
py
|
pm_msg.py
|
import shutil
from segtypes.n64.segment import N64Segment
from pathlib import Path
from util import options
import re
import pylibyaml
import yaml as yaml_loader
CHARSET = {
0x00: "[NOTE]",
0x01: "!",
0x02: '"',
0x03: "#",
0x04: "$",
0x05: "%",
0x06: "&",
0x07: "'",
0x08: "(",
0x09: ")",
0x0A: "*",
0x0B: "+",
0x0C: ",",
0x0D: "-",
0x0E: ".",
0x0F: "/",
0x10: "0",
0x11: "1",
0x12: "2",
0x13: "3",
0x14: "4",
0x15: "5",
0x16: "6",
0x17: "7",
0x18: "8",
0x19: "9",
0x1A: ":",
0x1B: ";",
0x1C: "<",
0x1D: "=",
0x1E: ">",
0x1F: "?",
0x20: "@",
0x21: "A",
0x22: "B",
0x23: "C",
0x24: "D",
0x25: "E",
0x26: "F",
0x27: "G",
0x28: "H",
0x29: "I",
0x2A: "J",
0x2B: "K",
0x2C: "L",
0x2D: "M",
0x2E: "N",
0x2F: "O",
0x30: "P",
0x31: "Q",
0x32: "R",
0x33: "S",
0x34: "T",
0x35: "U",
0x36: "V",
0x37: "W",
0x38: "X",
0x39: "Y",
0x3A: "Z",
0x3B: "\\[",
0x3C: "¥",
0x3D: "]",
0x3E: "^",
0x3F: "_",
0x40: "`",
0x41: "a",
0x42: "b",
0x43: "c",
0x44: "d",
0x45: "e",
0x46: "f",
0x47: "g",
0x48: "h",
0x49: "i",
0x4A: "j",
0x4B: "k",
0x4C: "l",
0x4D: "m",
0x4E: "n",
0x4F: "o",
0x50: "p",
0x51: "q",
0x52: "r",
0x53: "s",
0x54: "t",
0x55: "u",
0x56: "v",
0x57: "w",
0x58: "x",
0x59: "y",
0x5A: "z",
0x5B: "{",
0x5C: "|",
0x5D: "}",
0x5E: "~",
0x5F: "°",
0x60: "À",
0x61: "Á",
0x62: "Â",
0x63: "Ä",
0x64: "Ç",
0x65: "È",
0x66: "É",
0x67: "Ê",
0x68: "Ë",
0x69: "Ì",
0x6A: "Í",
0x6B: "Î",
0x6C: "Ï",
0x6D: "Ñ",
0x6E: "Ò",
0x6F: "Ó",
0x70: "Ô",
0x71: "Ö",
0x72: "Ù",
0x73: "Ú",
0x74: "Û",
0x75: "Ü",
0x76: "ß",
0x77: "à",
0x78: "á",
0x79: "â",
0x7A: "ä",
0x7B: "ç",
0x7C: "è",
0x7D: "é",
0x7E: "ê",
0x7F: "ë",
0x80: "ì",
0x81: "í",
0x82: "î",
0x83: "ï",
0x84: "ñ",
0x85: "ò",
0x86: "ó",
0x87: "ô",
0x88: "ö",
0x89: "ù",
0x8A: "ú",
0x8B: "û",
0x8C: "ü",
0x8D: "¡",
0x8E: "¿",
0x8F: "ª",
0x90: "[HEART]",
0x91: "[STAR]",
0x92: "[UP]",
0x93: "[DOWN]",
0x94: "[LEFT]",
0x95: "[RIGHT]",
0x96: "[CIRCLE]",
0x97: "[CROSS]",
0x98: "[~A]",
0x99: "[~B]",
0x9A: "[~L]",
0x9B: "[~R]",
0x9C: "[~Z]",
0x9D: "[~C-UP]",
0x9E: "[~C-DOWN]",
0x9F: "[~C-LEFT]",
0xA0: "[~C-RIGHT]",
0xA1: "[~START]",
0xA2: "“",
0xA3: "”",
0xA4: "‘",
0xA5: "’",
0xF7: " ",
0xF0: "[BR]\n",
0xF1: "[Wait]",
0xF2: {None: lambda d: (f"[Pause {d[0]}]", 1)},
0xF3: "[Variant0]",
0xF4: "[Variant1]",
0xF5: "[Variant2]",
0xF6: "[Variant3]",
0xFB: "[Next]\n",
0xFC: {
0x01: "[Style right]\n",
0x02: "[Style left]\n",
0x03: "[Style center]\n",
0x04: "[Style tattle]\n",
0x05: {
None: lambda d: (
f"[Style choice pos={d[0]},{d[1]} size={d[2]},{d[3]}]\n",
4,
)
},
0x06: "[Style inspect]\n",
0x07: "[Style sign]\n",
0x08: {None: lambda d: (f"[Style lamppost height={d[0]}]\n", 1)},
0x09: {None: lambda d: (f"[Style postcard index={d[0]}]\n", 1)},
0x0A: "[Style popup]\n",
0x0C: {
None: lambda d: (
f"[Style upgrade pos={d[0]},{d[1]} size={d[2]},{d[3]}]\n",
4,
)
},
0x0D: "[Style narrate]\n",
0x0E: "[Style epilogue]\n",
},
0xFF: {
0x00: {
0: "[Font standard]\n",
1: "[Font menu]\n",
3: "[Font title]\n",
4: "[Font subtitle]\n",
},
0x04: "[Yield]",
0x05: {
# 0x0A: "[color:normal]",
# 0x20: "[color:red]",
# 0x21: "[color:pink]",
# 0x22: "[color:purple]",
# 0x23: "[color:blue]",
# 0x24: "[color:cyan]",
# 0x25: "[color:green]",
# 0x26: "[color:yellow]",
# 0x00: "[color=normal ctx=diary]",
# 0x07: "[color=red ctx=diary]",
# 0x17: "[color=dark ctx=inspect]",
# 0x18: "[color=normal ctx=sign]",
# 0x19: "[color=red ctx=sign]",
# 0x1A: "[color=blue ctx=sign]",
# 0x1B: "[color=green ctx=sign]",
# 0x28: "[color=red ctx=popup]",
# 0x29: "[color=pink ctx=popup]",
# 0x2A: "[color=purple ctx=popup]",
# 0x2B: "[color=blue ctx=popup]",
# 0x2C: "[color=teal ctx=popup]",
# 0x2D: "[color=green ctx=popup]",
# 0x2E: "[color=yellow ctx=popup]",
# 0x2F: "[color=normal ctx=popup]",
None: lambda d: (f"[Color 0x{d[0]:X}]", 1),
},
0x07: "[InputOff]\n",
0x08: "[InputOn]\n",
0x09: "[DelayOff]\n",
0x0A: "[DelayOn]\n",
0x0B: {None: lambda d: (f"[CharWidth {d[0]}]", 1)},
0x0C: {None: lambda d: (f"[Scroll {d[0]}]", 1)},
0x0D: {None: lambda d: (f"[Size {d[0]},{d[1]}]\n", 2)},
0x0E: "[SizeReset]\n",
0x0F: {None: lambda d: (f"[Speed delay={d[0]} chars={d[1]}]", 2)},
0x10: {None: lambda d: (f"[SetPosX {(d[0] << 8) + d[1]}]", 2)},
0x11: {None: lambda d: (f"[SetPosY {d[0]}]", 1)},
0x12: {None: lambda d: (f"[Right {d[0]}]", 1)},
0x13: {None: lambda d: (f"[Down {d[0]}]", 1)},
0x14: {None: lambda d: (f"[Up {d[0]}]", 1)},
0x15: {None: lambda d: (f"[InlineImage index={d[0]}]\n", 1)},
0x16: {
None: lambda d: (
f"[AnimSprite spriteID=0x{d[0]:02X}{d[1]:02X} raster={d[2]}]\n",
3,
)
},
0x17: {None: lambda d: (f"[ItemIcon itemID=0x{d[0]:02X}{d[1]:02X}]\n", 2)},
0x18: {
None: lambda d: (
f"[Image index={d[0]} pos={(d[1] << 8) + d[2]},{d[3]} hasBorder={d[4]} alpha={d[5]} fadeAmount={d[6]}]\n",
7,
)
},
0x19: {None: lambda d: (f"[HideImage fadeAmount={d[0]}]\n", 1)},
0x1A: {None: lambda d: (f"[AnimDelay index={d[1]} delay={d[2]}]", 3)},
0x1B: {None: lambda d: (f"[AnimLoop {d[0]} {d[1]}]", 2)},
0x1C: {None: lambda d: (f"[AnimDone {d[0]}]", 1)},
0x1E: {None: lambda d: (f"[Cursor {d[0]}]", 1)},
0x1F: {None: lambda d: (f"[EndChoice {d[0]}]", 1)},
0x20: {None: lambda d: (f"[SetCancel {d[0]}]", 1)},
0x21: {None: lambda d: (f"[Option {d[0]}]", 1)},
0x22: "[SavePos]",
0x23: "[RestorePos]",
0x24: {
0xFF: {
0x05: {
0x10: {0x98: {0xFF: {0x25: "[A]"}}},
0x11: {0x99: {0xFF: {0x25: "[B]"}}},
0x12: {0xA1: {0xFF: {0x25: "[START]"}}},
0x13: {
0x9D: {0xFF: {0x25: "[C-UP]"}},
0x9E: {0xFF: {0x25: "[C-DOWN]"}},
0x9F: {0xFF: {0x25: "[C-LEFT]"}},
0xA0: {0xFF: {0x25: "[C-RIGHT]"}},
},
0x14: {0x9C: {0xFF: {0x25: "[Z]"}}},
}
}
},
# 0x24: "[SaveColor]",
# 0x25: "[RestoreColor]",
0x26: {
0x00: "[Shake]",
0x01: "[Wave]",
0x02: "[NoiseOutline]",
0x03: {None: lambda d: (f"[Static {d[0]}]", 1)},
0x05: {None: lambda d: (f"[Blur dir={['x', 'y', 'xy'][d[0]]}]", 1)},
0x07: {None: lambda d: (f"[DitherFade {d[0]}]", 1)},
0x0A: "[PrintRising]",
0x0B: "[PrintGrowing]",
0x0C: "[SizeJitter]",
0x0D: "[SizeWave]",
0x0E: "[DropShadow]",
},
0x27: {
0x00: "[/Shake]",
0x01: "[/Wave]",
0x03: "[/Static]",
0x05: "[/Blur]",
0x07: "[/DitherFade]",
0x0A: "[/PrintRising]",
0x0B: "[/PrintGrowing]",
0x0C: "[/SizeJitter]",
0x0D: "[/SizeWave]",
0x0E: "[/DropShadow]",
},
0x28: {None: lambda d: (f"[Var {d[0]}]", 1)},
0x29: {None: lambda d: (f"[CenterX {d[0]}]", 1)},
0x2B: "[EnableCDownNext]",
0x2C: {
None: lambda d: (
f"[CustomVoice soundIDs=0x{d[0]:02X}{d[1]:02X}{d[2]:02X}{d[3]:02X},{d[4]:02X}{d[5]:02X}{d[6]:02X}{d[7]:02X}]",
8,
)
},
0x2E: {None: lambda d: (f"[Volume {d[0]}]", 1)},
0x2F: {
0: "[Voice normal]\n",
1: "[Voice bowser]\n",
2: "[Voice star]\n",
None: lambda d: (f"[Voice {d[0]}]\n", 1),
},
# None: lambda d: (f"[func_{d[0]:02X}]", 1),
},
None: lambda d: (f"[Raw 0x{d[0]:02X}]", 1),
}
CHARSET_CREDITS = {
**CHARSET,
0x00: "A",
0x01: "B",
0x02: "C",
0x03: "D",
0x04: "E",
0x05: "F",
0x06: "G",
0x07: "H",
0x08: "I",
0x09: "J",
0x0A: "K",
0x0B: "L",
0x0C: "M",
0x0D: "N",
0x0E: "O",
0x0F: "P",
0x10: "Q",
0x11: "R",
0x12: "S",
0x13: "T",
0x14: "U",
0x15: "V",
0x16: "W",
0x17: "X",
0x18: "Y",
0x19: "Z",
0x1A: "'",
0x1B: ".",
0x1C: ",",
0x1D: "0",
0x1E: "1",
0x1F: "2",
0x20: "3",
0x21: "4",
0x22: "5",
0x23: "6",
0x24: "7",
0x25: "8",
0x26: "9",
0x27: "©",
0x28: "&",
0xF7: " ",
}
class N64SegPm_msg(N64Segment):
def __init__(
self,
rom_start,
rom_end,
type,
name,
vram_start,
args,
yaml,
):
super().__init__(
rom_start,
rom_end,
type,
name,
vram_start,
args=args,
yaml=yaml,
)
self.files = yaml.get("files", []) if isinstance(yaml, dict) else []
with (Path(__file__).parent / f"{self.name}.yaml").open("r") as f:
self.msg_names = yaml_loader.load(f.read(), Loader=yaml_loader.SafeLoader)
def split(self, rom_bytes):
data = rom_bytes[self.rom_start : self.rom_end]
section_offsets = []
pos = 0
while True:
offset = int.from_bytes(data[pos : pos + 4], byteorder="big")
if offset == 0:
break
section_offsets.append(offset)
pos += 4
msg_dir = options.opts.asset_path / self.name
msg_dir.mkdir(parents=True, exist_ok=True)
for i, section_offset in enumerate(section_offsets):
name = f"{i:02X}"
if len(self.files) >= i:
name = self.files[i]
msg_offsets = []
pos = section_offset
while True:
offset = int.from_bytes(data[pos : pos + 4], byteorder="big")
if offset == section_offset:
break
msg_offsets.append(offset)
pos += 4
# self.log(f"Reading {len(msg_offsets)} messages in section {name} (0x{i:02X})")
path = msg_dir / Path(name + ".msg")
with open(path, "w") as self.f:
for j, msg_offset in enumerate(msg_offsets):
if j != 0:
self.f.write("\n")
msg_name = None
for d in self.msg_names:
section, index, goodname = d[:3]
if i == section and j == index:
msg_name = goodname
break
if msg_name is None:
self.f.write(f"#message:{i:02X}:{j:03X} {{\n\t")
else:
self.f.write(f"#message:{i:02X}:({msg_name}) {{\n\t")
self.write_message_markup(data[msg_offset:])
self.f.write("\n}\n")
def get_linker_entries(self):
from segtypes.linker_entry import LinkerEntry
base_path = options.opts.asset_path / f"{self.name}"
out_paths = [base_path / Path(f + ".msg") for f in self.files]
return [LinkerEntry(self, out_paths, base_path, ".data")]
@staticmethod
def get_default_name(addr):
return "msg"
def write_message_markup(self, data):
pos = 0
self.root_charset = CHARSET
while data[pos] != 0xFD:
self.charset = self.root_charset
while True:
char = data[pos]
if char in self.charset:
value = self.charset[char]
elif None in self.charset:
value = self.charset[None]
if value is None:
value = fallback
if isinstance(value, str):
self.write_markup(value)
pos += 1
break
elif callable(value):
markup, delta = value(data[pos:])
self.write_markup(markup)
pos += delta
break
elif isinstance(value, dict):
if None in self.charset:
fallback = self.charset[None]
self.charset = value
pos += 1
else:
raise ValueError(value)
self.write_markup("[End]")
def write_markup(self, markup):
self.f.write(re.sub("\n", "\n\t", markup))
markup_lower = markup.lower()
if markup_lower == "[font title]\n" or markup_lower == "[font subtitle]\n":
self.root_charset = CHARSET_CREDITS
elif markup_lower == "[font standard]":
self.root_charset = CHARSET
def cache(self):
return (self.yaml, self.rom_end, self.msg_names)
|
673fbf3ed565b8c77c12d7ba3e74f8cb4bef5dc1
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/pot/openvino/tools/pot/graph/model_utils.py
|
2d1b660e9a000f761ee542c015cf9de901c002d1
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 6,221
|
py
|
model_utils.py
|
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import networkx as nx
from openvino.tools.mo.graph.graph import Node
from openvino.tools.mo.middle.passes.infer import type_infer
from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from . import editor as ge, builder as gb
from .nx_model import CompressedModel
from .passes import compress_weights
from ..utils.utils import convert_output_key
def load_model(model_config, target_device='ANY'):
""" Loads model from specified path
:return CompressedModel instance
"""
return CompressedModel(config=model_config, target_device=target_device)
def save_model(model: CompressedModel, save_path, model_name=None, for_stat_collection=False):
""" Save model as IR in specified path
:param model: CompressedModel instance to save
:param save_path: path to save the model
:param model_name: name under which the model will be saved
:param for_stat_collection: whether model is saved to be used
for statistic collection or for normal inference (affects only cascaded models)
:return model_paths: list of dictionaries:
'name': model name (for cascade models only)
'model': path to xml
'weights': path to bin
"""
model_paths = model.save(save_path, model_name=model_name, for_stat_collection=for_stat_collection)
return model_paths
def add_outputs(models, node_names):
""" Applies add_outputs to each model in models
param models: list of dictionaries
'name': model name (for cascaded models only)
'model': IE model instance
"""
outputs_per_model = {}
for model_dict in models:
model_name = model_dict['model'].friendly_name
model_node_names = list(node_names[model_name].values())
node_names_ = model_node_names if len(models) == 1 \
else [node_name for node_name in model_node_names
if convert_output_key(node_name).startswith(model_dict['name'])]
outputs = model_dict['model'].add_outputs(node_names_)
outputs_per_model[model_name] = (outputs if outputs else [])
return outputs_per_model
def compress_model_weights(model: CompressedModel):
"""Apply transformations to save model weights to INT8."""
for model_dict in model.models:
for_graph_and_each_sub_graph_recursively(model_dict['model'], compress_weights)
# TODO: set recursively = True to enable subgraphs quantization
def get_nodes_by_type(model: CompressedModel, types: list, recursively: bool = True):
""" Returns all nodes with type from types collection
:param model: CompressedModel model
:param types: list of required types
:param recursively: whether return all nodes from the model
and each subgraph or only from the external model
:return list of nodes filtered by 'types' collection
"""
return [node for model_dict in model.models
for node in ge.get_nodes_by_type(model_dict['model'], types, recursively)]
def get_node_by_name(model: CompressedModel, name: str) -> Node:
""" Returns node by name found in the graph and each subgraph
:param model: CompressedModel model
:param name: name of the node
:return node from model (of type Node or None if there's no such node)
"""
names = [ge.get_node_by_name(model_dict['model'], name)
for model_dict in model.models]
names = [name for name in names if name is not None]
if len(names) > 1:
raise RuntimeError('The model contains more than one node with the name {}'.format(name))
return names[0] if names else None
# TODO: set recursively = True to enable subgraphs quantization
def get_all_operation_nodes(model: CompressedModel, recursively: bool = True):
""" Returns sequence of all nodes in all graphs
:param model: CompressedModel model
:param recursively: whether return all nodes from the model
and each subgraph or only from the external model
:return list of all nodes
"""
return [node for model_dict in model.models
for node in ge.get_all_operation_nodes(model_dict['model'], recursively)]
def build_model_for_node(nx_model, input_name, input_shape, node, remove_bias=False,
remove_fake_quantize=False, target_device='ANY'):
""" Build Model containing Subgraph of CompressedModel (input - node - output).
The Convolution, MatMul node types are supported.
:param nx_model: CompressedModel model
:param input_name: name of the input node in the generated graph
:param input_shape: shape of the input node in the generated graph
:param node: node for which graph (input - node - output) will be generated
:param remove_bias: remove bias in the generated graph
:param remove_fake_quantize: remove fake quantize nodes in the generated graph
:param target_device: device for processing
:return: generated CompressedModel instance.
"""
candidates = [model_dict['model'] for model_dict in nx_model.models
if ge.get_node_by_name(model_dict['model'], input_name)]
if len(candidates) > 1:
raise RuntimeError('Name collision: {}'.format(input_name))
model = candidates[0]
op_graph = gb.build_graph_for_node(model, input_name, input_shape, node, remove_bias, remove_fake_quantize)
return CompressedModel(graph=op_graph, target_device=target_device)
def models_union(first_model, second_model):
""" Return the union of CompressedModel models
:return CompressedModel instance - union of first_model and second_model
"""
union = first_model
union_models = union.models
for model_dict, model_dict_2, union_dict in zip(first_model.models, second_model.models, union_models):
model_1 = model_dict['model']
model_2 = model_dict_2['model']
union_dict['model'] = nx.union(model_1, model_2)
union_dict['model'].graph.update(model_1.graph)
union_dict['model'].graph.update(model_2.graph)
return union
def nx_type_infer(model):
""" Apply type_infer for each model in CompressedModel wrapper
"""
for model_dict in model.models:
type_infer(model_dict['model'])
|
c907c81913ae13806093ee10db66f78d316197ba
|
fa89ef4a8eb06dc2015d7116637f230b6891eb8d
|
/refinery/units/__init__.py
|
1d11b20c75ae852b0aa0659b33b10e101fcb0285
|
[
"BSD-3-Clause"
] |
permissive
|
binref/refinery
|
f61878d9fddf616fee8edf226df22f6a35238940
|
4c7c3717ae45543b9d7bae60a4af4c00993cf719
|
refs/heads/master
| 2023-08-17T17:02:34.357138
| 2023-08-14T08:43:05
| 2023-08-14T08:43:05
| 228,019,736
| 439
| 48
|
NOASSERTION
| 2023-09-11T10:26:02
| 2019-12-14T12:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 71,597
|
py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This package contains all refinery units. To write an executable refinery unit,
it is sufficient to write a class that inherits from `refinery.units.Unit` and
implements `refinery.units.Unit.process`. If the operation implemented by this
unit should be reversible, then a method called `reverse` with the same
signature has to be implemented. For example, the following would be a
minimalistic approach to implement `refinery.hex`:
from refinery import Unit
class hex(Unit):
def process(self, data): return bytes.fromhex(data.decode('ascii'))
def reverse(self, data): return data.hex().encode(self.codec)
The above script can be run from the command line. Since `hex` is not marked as
abstract, its inherited `refinery.units.Unit.run` method will be invoked when
the script is executed.
### Command Line Parameters
If you want your custom refinery unit to accept command line parameters, you can
write an initialization routine. For example, the following unit implements a
very simple XOR unit (less versatile than the already existing `refinery.xor`):
from refinery import Unit
import itertools
class myxor (Unit):
def __init__(self, key: Unit.Arg.Binary(help='Encryption key')):
pass
def process(self, data: bytearray):
key = itertools.cycle(self.args.key)
for k, b in enumerate(data):
data[k] ^= next(key)
return data
The `refinery.Arg` decorator is optional and only used here to provide a help
message on the command line. It is also available as the `Arg` class property
of the `refinery.Unit` class for convenience. The example also shows that the
`__init__` code can be left empty: In this case, refinery automatically adds
boilerplate code that copies all `__init__` parameters to the `args` member
variable of the unit. In this case, the constructor will be completed to have
the following code:
def __init__(self, key: Unit.Arg.Binary(help='Encryption key')):
super().__init__(key=key)
The option of writing an empty `__init__` was added because it is rarely needed
to perform any processing of the input arguments. The command line help for this
unit will look as follows:
usage: myxor [-h] [-Q] [-0] [-v] key
positional arguments:
key Encryption key
generic options:
-h, --help Show this help message and exit.
-Q, --quiet Disables all log output.
-0, --devnull Do not produce any output.
-v, --verbose Specify up to two times to increase log level.
### Refinery Syntax in Code
Refinery units can be used in Python code (and a Python repl) in nearly the same
way as on the command line. As one example, consider the following unit that can
decode base64 with a custom alphabet using `refinery.map` and `refinery.b64`:
from refinery import Unit, b64, map
class b64custom(Unit):
_b64alphabet = (
B'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
B'abcdefghijklmnopqrstuvwxyz'
B'0123456789+/'
)
def __init__(self, alphabet=_b64alphabet):
if len(alphabet) != 64:
raise ValueError('Alphabet size must be 64')
super().__init__(alphabet=alphabet)
def process(self, data):
return data | map(self.args.alphabet, self._b64alphabet) | b64
def reverse(self, data):
return data | -b64 | map(self._b64alphabet, self.args.alphabet)
The syntax does not work exactly as on the command line, but it was designed to
be as similar as possible:
- The binary or operator `|` can be used to combine units into pipelines.
- Combining a pipeline from the left with a byte string or io stream object will
feed this byte string into the unit.
- Unary negation of a reversible unit is equivalent to using the `-R` switch for
reverse mode.
- A pipeline is an iterable of output chunks, but there is quite a selection of
objects that can be connected to a pipeline from the right using `|` for
various different output options. See below for details.
If you want to use frames in code, simply omit any pipe before a square bracked.
For example, the first example from the `refinery.lib.frame` documentation
translates to the following Python code:
>>> from refinery import *
>>> B'OOOOOOOO' | chop(2) [ ccp(B'F') | cca(B'.') ]| ...
>>> bytearray(b'FOO.FOO.FOO.FOO.')
In the above example, the pipeline is piped to a literal ellipsis (`...`) to get
the final result. The following section lists the other output options.
### Output Options in Code
You can connect a pipeline to any binary i/o stream, and the output of the
pipeline will be written to that stream. Example:
with open('output', 'wb') as stream:
B'BINARY REFINERY' | xor(0x13) | stream
Furthermore, you can connect pipelines to any callable, and you can always use
a literal ellipsis (`...`) to represent the identity function. The result of
this is that you receive the raw output from the pipeline:
>>> B'BINARY REFINERY' | xor(0x13) | ...
bytearray(b'QZ]RAJ3AVUZ]VAJ')
You can also connect to sets and lists containing a single callable. In this
case, the callable will be applied to each output chunk and all results will be
collected in a list or set, respectively. Examples:
>>> B'ABABCBABABCHB' | rex('.B') | [str]
['AB', 'AB', 'CB', 'AB', 'AB', 'HB']
>>> B'ABABCBABABCHB' | rex('.B') | {str}
{'AB', 'CB', 'HB'}
You can also consume into a dictionary in a similar way:
>>> B'ABABCBABABCHB' | rex('.(?P<k>.)B') | {'k': str}
{A: ['BAB', 'BAB'], H: ['CHB']}
Here, the dictionary is expected to contain exactly one key-value pair. The key
is the name of a meta variable and the value is a conversion function. The
result will be a dictionary where all converted results have been grouped under
the respective value of their meta variable. With all of the above options, it
is always possible to use a literal ellipsis (`...`).
You can connect pipelines to `bytearray` and (writable) `memoryview` instances.
In this case, the output will be appended to the end of this buffer. Finally, if
you connect a pipeline to `None`, this will execute the unit but discard all
output. This is useful for using units with side-effects, like `refinery.peek`,
in a REPL.
"""
from __future__ import annotations
import abc
import copy
import inspect
import os
import sys
from abc import ABCMeta
from enum import Enum
from functools import wraps
from collections import OrderedDict
from typing import (
Dict,
Iterable,
Sequence,
Set,
Type,
TypeVar,
Union,
List,
Optional,
Callable,
Tuple,
Any,
ByteString,
Generator,
overload,
no_type_check,
get_type_hints
)
from argparse import (
ArgumentTypeError, Namespace,
ONE_OR_MORE,
OPTIONAL,
REMAINDER,
ZERO_OR_MORE
)
from refinery.lib.argformats import pending, manifest, multibin, numseq, number, sliceobj, VariableMissing, ParserVariableMissing
from refinery.lib.argparser import ArgumentParserWithKeywordHooks, ArgparseError
from refinery.lib.tools import documentation, isstream, lookahead, autoinvoke, one, skipfirst, isbuffer
from refinery.lib.frame import Framed, Chunk
from refinery.lib.structures import MemoryFile
from refinery.lib.environment import LogLevel, Logger, environment, logger
from refinery.lib.types import ByteStr, Singleton
ByteIO = MemoryFile[ByteStr]
class RefineryPartialResult(ValueError):
"""
This exception indicates that a partial result is available.
"""
def __init__(self, message: str, partial: ByteString, rest: Optional[ByteString] = None):
super().__init__(message)
self.message = message
self.partial = partial
self.rest = rest
def __str__(self):
return self.message
class RefineryImportMissing(ImportError):
def __init__(self, missing: str, *dependencies: str):
super().__init__()
import shlex
self.missing = missing
self.install = ' '.join(shlex.quote(dist) for dist in dependencies)
self.dependencies = dependencies
class RefineryCriticalException(RuntimeError):
"""
If this exception is thrown, processing of the entire input stream
is aborted instead of just aborting the processing of the current
chunk.
"""
pass
class RefineryException(RuntimeError):
"""
This is an exception that was not generated by an external library.
"""
pass
class Entry:
"""
An empty class marker. Any entry point unit (i.e. any unit that can be executed
via the command line) is an instance of this class.
"""
pass
class Argument:
"""
This class implements an abstract argument to a Python function, including positional
and keyword arguments. Passing an `Argument` to a Python function can be done via the
matrix multiplication operator: The syntax `function @ Argument(a, b, kwd=c)` is
equivalent to the call `function(a, b, kwd=c)`.
"""
__slots__ = 'args', 'kwargs'
args: List[Any]
kwargs: Dict[str, Any]
def __init__(self, *args, **kwargs):
self.args = list(args)
self.kwargs = kwargs
def __rmatmul__(self, method):
return method(*self.args, **self.kwargs)
def __repr__(self):
def rep(v):
r = repr(v)
if r.startswith('<'):
try:
return v.__name__
except AttributeError:
pass
try:
return v.__class__.__name__
except AttributeError:
pass
return r
arglist = [repr(a) for a in self.args]
arglist.extend(F'{key!s}={rep(value)}' for key, value in self.kwargs.items())
return ', '.join(arglist)
class Arg(Argument):
"""
This class is specifically an argument for the `add_argument` method of an `ArgumentParser` from
the `argparse` module. It can also be used as a decorator or annotation for the constructor of a
refinery unit to better control the argument parser of that unit's command line interface.
Example:
```
class prefixer(Unit):
def __init__(
self,
prefix: Arg.Binary(help='This data will be prepended to the input.')
): ...
def process(self, data):
return self.args.prefix + data
```
Note that when the init of a unit has a return annotation that is a base class of itself, then
all its parameters will automatically be forwarded to that base class.
"""
class delete: pass
class omit: pass
def __init__(
self, *args: str,
action : Union[omit, str] = omit, # noqa
choices : Union[omit, Iterable[Any]] = omit, # noqa
const : Union[omit, Any] = omit, # noqa
default : Union[omit, Any] = omit, # noqa
dest : Union[omit, str] = omit, # noqa
help : Union[omit, str] = omit, # noqa
metavar : Union[omit, str] = omit, # noqa
nargs : Union[omit, int, str] = omit, # noqa
required : Union[omit, bool] = omit, # noqa
type : Union[omit, type] = omit, # noqa
group : Optional[str] = None, # noqa
guessed : Optional[Set[str]] = None, # noqa
) -> None:
kwargs = dict(action=action, choices=choices, const=const, default=default, dest=dest,
help=help, metavar=metavar, nargs=nargs, required=required, type=type)
kwargs = {key: value for key, value in kwargs.items() if value is not Arg.omit}
self.group = group
self.guessed = set(guessed or ())
super().__init__(*args, **kwargs)
def update_help(self):
if 'help' not in self.kwargs:
return
class formatting(dict):
arg = self
def __missing__(self, key):
if key == 'choices':
return ', '.join(self.arg.kwargs['choices'])
if key == 'default':
default: Union[bytes, int, str, slice] = self.arg.kwargs['default']
if isinstance(default, slice):
parts = [default.start or '', default.stop or '', default.step]
default = ':'.join(str(x) for x in parts if x is not None)
if isinstance(default, int):
return default
if not isbuffer(default):
return default
if default.isalnum():
return default.decode('latin-1')
return F'H:{default.hex()}'
if key == 'varname':
return self.arg.kwargs.get('metavar', self.arg.destination)
try:
help_string: str = self.kwargs['help']
self.kwargs.update(
help=help_string.format_map(formatting()))
except Exception:
pass
def __rmatmul__(self, method):
self.update_help()
return super().__rmatmul__(method)
@staticmethod
def AsOption(value: Optional[Any], cls: Enum) -> Enum:
if value is None or isinstance(value, cls):
return value
if isinstance(value, str):
try: return cls[value]
except KeyError: pass
needle = value.upper().replace('-', '_')
for item in cls:
if item.name.upper() == needle:
return item
try:
return cls(value)
except Exception as E:
choices = ', '.join([option.name for option in cls])
raise ValueError(F'Could not transform {value} into {cls.__name__}; the choices are: {choices}') from E
@classmethod
def Counts(
cls,
*args : str,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
group : Optional[str] = None,
):
"""
A convenience method to add argparse arguments that introduce a counter.
"""
return cls(*args, group=group, help=help, dest=dest, action='count')
@classmethod
def Switch(
cls,
*args : str, off=False,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
group : Optional[str] = None,
):
"""
A convenience method to add argparse arguments that change a boolean value from True to False or
vice versa. By default, a switch will have a False default and change it to True when specified.
"""
return cls(*args, group=group, help=help, dest=dest, action='store_false' if off else 'store_true')
@classmethod
def Binary(
cls,
*args : str,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
nargs : Union[omit, int, str] = omit,
metavar : Optional[str] = None,
group : Optional[str] = None,
):
"""
Used to add argparse arguments that contain binary data.
"""
if metavar is None and any('-' in a for a in args):
metavar = 'B'
return cls(*args, group=group, help=help, dest=dest, nargs=nargs, type=multibin, metavar=metavar)
@classmethod
def NumSeq(
cls,
*args : str,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
nargs : Union[omit, int, str] = omit,
metavar : Optional[str] = None,
group : Optional[str] = None,
):
"""
Used to add argparse arguments that contain a numeric sequence.
"""
return cls(*args, group=group, help=help, nargs=nargs, dest=dest, type=numseq, metavar=metavar)
@classmethod
def Bounds(
cls,
*args : str,
help : Union[omit, str] = 'Specify start:end:step in Python slice syntax.',
dest : Union[omit, str] = omit,
nargs : Union[omit, int, str] = omit,
default : Union[omit, Any] = omit,
metavar : Optional[str] = 'start:end:step',
group : Optional[str] = None,
):
"""
Used to add argparse arguments that contain a slice.
"""
return cls(*args, group=group, help=help, default=default, nargs=nargs, dest=dest, type=sliceobj, metavar=metavar)
@classmethod
def Number(
cls,
*args : str,
bound : Union[omit, Tuple[int, int]] = omit,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
metavar : Optional[str] = None,
group : Optional[str] = None,
):
"""
Used to add argparse arguments that contain a number.
"""
nt = number
if bound is not cls.omit:
lower, upper = bound
nt = nt[lower:upper]
return cls(*args, group=group, help=help, dest=dest, type=nt, metavar=metavar or 'N')
@classmethod
def Option(
cls,
*args : str,
choices : Enum,
help : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
metavar : Optional[str] = None,
group : Optional[str] = None,
):
"""
Used to add argparse arguments with a fixed set of options, based on an enumeration.
"""
cnames = [c.name.replace('_', '-') for c in choices]
metavar = metavar or choices.__name__
return cls(*args, group=group, help=help, metavar=metavar, dest=dest, choices=cnames, type=str)
@classmethod
def Choice(
cls,
*args : str,
choices : List[str],
help : Union[omit, str] = omit,
metavar : Union[omit, str] = omit,
dest : Union[omit, str] = omit,
type : Type = str,
nargs : Union[omit, int, str] = omit,
group : Optional[str] = None,
):
"""
Used to add argparse arguments with a fixed set of options, based on a list of strings.
"""
return cls(*args, group=group, type=type, metavar=metavar, nargs=nargs,
dest=dest, help=help, choices=choices)
@property
def positional(self) -> bool:
return any(a[0] != '-' for a in self.args)
@property
def destination(self) -> str:
"""
The name of the variable where the contents of this parsed argument will be stored.
"""
for a in self.args:
if a[0] != '-':
return a
try:
return self.kwargs['dest']
except KeyError:
for a in self.args:
if a.startswith('--'):
dest = a.lstrip('-').replace('-', '_')
if dest.isidentifier():
return dest
raise AttributeError(F'The argument with these values has no destination: {self!r}')
@classmethod
def Infer(cls, pt: inspect.Parameter, module: Optional[str] = None):
"""
This class method can be used to infer the argparse argument for a Python function
parameter. This guess is based on the annotation, name, and default value.
"""
def needs_type(item: Dict[str, str]):
try:
return item['action'] == 'store'
except KeyError:
return True
def get_argp_type(annotation_type):
if issubclass(annotation_type, (bytes, bytearray, memoryview)):
return multibin
if issubclass(annotation_type, int):
return number
if issubclass(annotation_type, slice):
return sliceobj
return annotation_type
name = pt.name.replace('_', '-')
default = pt.default
guessed_pos_args = []
guessed_kwd_args = dict(dest=pt.name)
guessed = set()
annotation = pt.annotation
def guess(key, value):
try:
return guessed_kwd_args[key]
except KeyError:
guessed_kwd_args[key] = value
guessed.add(key)
return value
if isinstance(annotation, str):
symbols = None
while symbols is not False:
try:
annotation = eval(annotation, symbols)
except NameError:
if symbols is not None or module is None:
break
try:
import importlib
symbols = importlib.import_module(module).__dict__
except Exception:
symbols = False
except Exception:
pass
else:
break
if annotation is not pt.empty:
if isinstance(annotation, Arg):
if annotation.kwargs.get('dest', pt.name) != pt.name:
raise ValueError(
F'Incompatible argument destination specified; parameter {pt.name} '
F'was annotated with {annotation!r}.')
guessed_pos_args = annotation.args
guessed_kwd_args.update(annotation.kwargs)
guessed_kwd_args.update(group=annotation.group)
elif isinstance(annotation, type):
guessed.add('type')
if not issubclass(annotation, bool) and needs_type(guessed_kwd_args):
guessed_kwd_args.update(type=get_argp_type(annotation))
elif not isinstance(default, bool):
raise ValueError('Default value for boolean arguments must be provided.')
if not guessed_pos_args:
guessed_pos_args = guessed_pos_args or [F'--{name}' if pt.kind is pt.KEYWORD_ONLY else name]
if pt.kind is pt.VAR_POSITIONAL:
oldnargs = guess('nargs', ZERO_OR_MORE)
if oldnargs not in (ONE_OR_MORE, ZERO_OR_MORE, REMAINDER):
raise ValueError(F'Variadic positional arguments has nargs set to {oldnargs!r}')
return cls(*guessed_pos_args, **guessed_kwd_args)
if default is not pt.empty:
if isinstance(default, Enum):
default = default.name
if isinstance(default, (list, tuple)):
guess('nargs', ZERO_OR_MORE)
if not pt.default:
default = pt.empty
else:
guessed_kwd_args['default'] = pt.default
default = default[0]
else:
guessed_kwd_args['default'] = default
if pt.kind is pt.POSITIONAL_ONLY:
guess('nargs', OPTIONAL)
if default is not pt.empty:
if isinstance(default, bool):
action = 'store_false' if default else 'store_true'
guessed_kwd_args['action'] = action
elif needs_type(guessed_kwd_args):
guess('type', get_argp_type(type(default)))
return cls(*guessed_pos_args, **guessed_kwd_args, guessed=guessed)
def merge_args(self, them: Argument) -> None:
def iterboth():
yield from them.args
yield from self.args
if not self.args:
self.args = list(them.args)
return
sflag = None
lflag = None
for a in iterboth():
if a[:2] == '--': lflag = lflag or a
elif a[0] == '-': sflag = sflag or a
self.args = []
if sflag: self.args.append(sflag)
if lflag: self.args.append(lflag)
if not self.args:
self.args = list(them.args)
def merge_all(self, them: Arg) -> None:
for key, value in them.kwargs.items():
if value is Arg.delete:
self.kwargs.pop(key, None)
self.guessed.discard(key)
continue
if key in them.guessed:
if key not in self.guessed:
if key == 'type' and self.kwargs.get('action', None) != 'store':
continue
if key in self.kwargs:
continue
self.guessed.add(key)
self.kwargs[key] = value
self.merge_args(them)
self.group = them.group or self.group
def __copy__(self) -> Argument:
cls = self.__class__
clone = cls.__new__(cls)
clone.kwargs = dict(self.kwargs)
clone.args = list(self.args)
clone.group = self.group
clone.guessed = set(self.guessed)
return clone
def __repr__(self) -> str:
return F'{self.__class__.__name__}({super().__repr__()})'
def __call__(self, init: Callable) -> Callable:
parameters = inspect.signature(init).parameters
try:
inferred = Arg.Infer(parameters[self.destination])
inferred.merge_all(self)
init.__annotations__[self.destination] = inferred
except KeyError:
raise ValueError(F'Unable to decorate because no parameter with name {self.destination} exists.')
return init
class ArgumentSpecification(OrderedDict):
"""
A container object that stores `refinery.units.arg` specifications.
"""
def merge(self: Dict[str, Arg], argument: Arg):
"""
Insert or update the specification with the given argument.
"""
dest = argument.destination
if dest in self:
self[dest].merge_all(argument)
return
self[dest] = argument
DataType = TypeVar('DataType', bound=ByteString)
ProcType = Callable[['Unit', ByteString], Optional[Union[DataType, Iterable[DataType]]]]
_T = TypeVar('_T')
def UnitProcessorBoilerplate(operation: ProcType[ByteString]) -> ProcType[Chunk]:
@wraps(operation)
def wrapped(self: Unit, data: ByteString) -> Optional[Union[Chunk, Iterable[Chunk]]]:
ChunkType = Chunk
if data is None:
data = B''
typespec = get_type_hints(operation)
typespec.pop('return', None)
if typespec and len(typespec) == 1:
SpecType = next(iter(typespec.values()))
if isinstance(SpecType, str):
try: SpecType = eval(SpecType)
except Exception: pass
if isinstance(SpecType, type):
ChunkType = SpecType
if not isinstance(data, ChunkType):
data = ChunkType(data)
result = operation(self, data)
if isinstance(result, Chunk):
return result
elif not inspect.isgenerator(result):
return Chunk(result)
return (Chunk.Wrap(r) for r in result)
return wrapped
def UnitFilterBoilerplate(
operation : Callable[[Any, Iterable[Chunk]], Iterable[Chunk]]
) -> Callable[[Any, Iterable[Chunk]], Iterable[Chunk]]:
@wraps(operation)
def peekfilter(self, chunks: Iterable[Chunk]) -> Iterable[Chunk]:
def _apply_args_to_head():
it = iter(chunks)
for chunk in it:
if chunk.visible:
yield self.args @ chunk
break
else:
yield chunk
yield from it
yield from operation(self, _apply_args_to_head())
return peekfilter
class MissingFunction(metaclass=Singleton):
"""
A singleton class that represents a missing function. Used internally to
indicate that a unit does not implement a reverse operation.
"""
def __call__(*_, **__):
raise NotImplementedError
class Executable(ABCMeta):
"""
This is the metaclass for refinery units. A class which is of this type is
required to implement a method `run()`. If the class is created in the
currently executing module, then an instance of the class is automatically
created after it is defined and its `run()` method is invoked.
"""
Entry = None
"""
This variable stores the executable entry point. If more than one entry point
are present, only the first one is executed and an error message is generated
for the other ones.
"""
_argument_specification: Dict[str, Arg]
def _infer_argspec(cls, parameters: Dict[str, inspect.Parameter], args: Optional[Dict[str, Arg]], module: str):
args: Dict[str, Arg] = ArgumentSpecification() if args is None else args
exposed = [pt.name for pt in skipfirst(parameters.values()) if pt.kind != pt.VAR_KEYWORD]
# The arguments are added in reverse order to the argument parser later.
# This is done to have a more intuitive use of decorator based argument configuration.
exposed.reverse()
for name in exposed:
try:
argument = Arg.Infer(parameters[name], module)
except KeyError:
continue
args.merge(argument)
for name in exposed:
args.move_to_end(name)
for known in args.values():
if known.positional:
known.kwargs.pop('dest', None)
if 'default' in known.kwargs:
known.kwargs.setdefault('nargs', OPTIONAL)
elif not any(len(a) > 2 for a in known.args):
flagname = known.destination.replace('_', '-')
known.args.append(F'--{flagname}')
action = known.kwargs.get('action', 'store')
if action.startswith('store_'):
known.kwargs.pop('default', None)
continue
if action == 'store':
known.kwargs.setdefault('type', multibin)
return args
def __new__(mcs, name: str, bases: Sequence[Executable], nmspc: Dict[str, Any], abstract=False):
def decorate(**decorations):
for method, decorator in decorations.items():
try:
old = nmspc[method]
except KeyError:
continue
if getattr(old, '__isabstractmethod__', False):
continue
nmspc[method] = decorator(old)
decorate(
filter=UnitFilterBoilerplate,
process=UnitProcessorBoilerplate,
reverse=UnitProcessorBoilerplate,
__init__=no_type_check,
)
if not abstract and Entry not in bases:
bases = bases + (Entry,)
if not bases[0].is_reversible:
nmspc.setdefault('reverse', MissingFunction)
nmspc.setdefault('__doc__', '')
return super(Executable, mcs).__new__(mcs, name, bases, nmspc)
def __init__(cls, name: str, bases: Sequence[Executable], nmspc: Dict[str, Any], abstract=False):
super(Executable, cls).__init__(name, bases, nmspc)
cls._argument_specification = args = ArgumentSpecification()
cls_init = cls.__init__
sig_init = inspect.signature(cls_init)
parameters = sig_init.parameters
has_keyword = any(p.kind == p.VAR_KEYWORD for p in parameters.values())
inherited = []
for base in bases:
base: Executable
for key, value in base._argument_specification.items():
if key in parameters:
args[key] = value.__copy__()
if not abstract and bases and has_keyword:
for key, value in bases[0]._argument_specification.items():
if key not in args:
args[key] = value.__copy__()
inherited.append(key)
cls._infer_argspec(parameters, args, cls.__module__)
if not abstract and has_keyword:
cls__init__ = cls.__init__
@wraps(cls__init__)
def new__init__(self, *args, **kwargs):
cls__init__(self, *args, **kwargs)
params = [p for p in parameters.values() if p.kind != p.VAR_KEYWORD]
if inherited:
pp = inspect.signature(bases[0].__init__).parameters
for name in inherited:
params.append(pp[name])
new__init__.__signature__ = sig_init.replace(parameters=tuple(params))
cls.__init__ = new__init__
try:
initcode = cls.__init__.__code__.co_code
except AttributeError:
initcode = None
if initcode == (lambda: None).__code__.co_code:
base = bases[0]
head = []
defs = {}
tail = None
for p in skipfirst(parameters.values()):
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD):
head.append(p.name)
if p.kind in (p.KEYWORD_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is not p.empty:
defs[p.name] = p.default
if p.kind is p.VAR_POSITIONAL:
tail = p.name
@wraps(cls.__init__)
def cls__init__(self, *args, **kw):
for name, arg in zip(head, args):
kw[name] = arg
if tail:
k = min(len(args), len(head))
kw[tail] = args[k:]
for key in defs:
if key not in kw:
kw[key] = defs[key]
base.__init__(self, **kw)
cls.__init__ = cls__init__
if not abstract and sys.modules[cls.__module__].__name__ == '__main__':
if not Executable.Entry:
Executable.Entry = cls.name
cls.run()
def __getitem__(cls, other):
return cls().__getitem__(other)
def __or__(cls, other):
return cls().__or__(other)
def __pos__(cls):
return cls()
def __neg__(cls):
unit: Unit = cls()
unit.args.reverse = 1
return unit
def __ror__(cls, other) -> Unit:
return cls().__ror__(other)
@property
def is_multiplex(cls) -> bool:
"""
This proprety is `True` if and only if the unit's `process` or `reverse` method is a generator, i.e.
when the unit can generate multiple outputs.
"""
if inspect.isgeneratorfunction(inspect.unwrap(cls.process)):
return True
if not cls.is_reversible:
return False
return inspect.isgeneratorfunction(inspect.unwrap(cls.reverse))
@property
def is_reversible(cls) -> bool:
"""
This property is `True` if and only if the unit has a member function named `reverse`. By convention,
this member function implements the inverse of `refinery.units.Unit.process`.
"""
if cls.reverse is MissingFunction:
return False
try:
return not cls.reverse.__isabstractmethod__
except AttributeError:
return True
@property
def codec(cls) -> str:
"""
The default codec for encoding textual information between units. The value of this property is
hardcoded to `UTF8`.
"""
return 'UTF8'
@property
def name(cls) -> str:
return cls.__name__.strip('_').replace('_', '-')
@property
def logger(cls) -> Logger:
try:
return cls._logger
except AttributeError:
pass
cls._logger = _logger = logger(cls.name)
return _logger
class DelayedArgumentProxy:
"""
This class implements a proxy for the `args` member variable of `refinery.units.Unit`.
Its primary purpose is to proxy `refinery.lib.argformats.DelayedArgument` values which
can be computed only as soon as input data becomes available and which also have to be
recomputed for each input.
"""
class PendingUpdate:
pass
_argv: Namespace
_argo: List[str]
_args: Dict[str, Any]
_done: bool
_guid: int
def __copy__(self):
cls = self.__class__
clone = cls.__new__(cls)
clone._store(
_argv=self._argv,
_argo=list(self._argo),
_args=dict(self._args),
_done=self._done,
_guid=self._guid,
)
return clone
def __iter__(self):
yield from self._args
def __getitem__(self, key):
return self._args[key]
def __init__(self, argv: Namespace, argo: Iterable[str]):
args = {}
done = True
for name, value in vars(argv).items():
if not pending(value):
args[name] = value
else:
done = False
self._store(
_argv=argv,
_argo=list(argo),
_args=args,
_done=done,
_guid=None,
)
def __call__(self, data: bytearray):
"""
Update the current arguments for the input `data`, regardless of whether or not this chunk
has already been used. In most cases, the matrix-multiplication syntax should be used instead
of this direct call: If a multibin argument modifies the meta dictionary by being applied, a
second interpretation of this argument with the same chunk might cause an error. For example,
if an argument specifies to pop a meta variable from the meta dictionary, this variable will
not be available for a second interpretation call.
"""
for name in self._argo:
value = getattr(self._argv, name, None)
if value is self.PendingUpdate:
raise RuntimeError(F'Attempting to resolve {name} while an update for this argument is in flight')
if value and pending(value):
self._args[name] = self.PendingUpdate
self._args[name] = manifest(value, data)
self._store(_guid=id(data))
return data
def __matmul__(self, data: bytearray):
"""
Interpret the current arguments for the given input `data`.
"""
if self._done:
return data
if not isinstance(data, bytearray):
data = bytearray(data)
if id(data) == self._guid:
return data
return self(data)
def _store(self, **kwargs):
self.__dict__.update(kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
pass
try:
return self._args[name]
except KeyError:
pass
try:
value = getattr(self._argv, name)
except AttributeError as E:
raise AttributeError(F'Argument {name} not set.') from E
if not value or not pending(value):
return value
raise AttributeError(F'the value {name} cannot be accessed until data is available.')
def __setattr__(self, name, value):
if not hasattr(self._argv, name):
self._argo.append(name)
if pending(value):
self._store(_done=False)
else:
self._args[name] = value
return setattr(self._argv, name, value)
class UnitBase(metaclass=Executable, abstract=True):
"""
This base class is an abstract interface specifying the abstract methods that have
to be present on any unit. All actual units should inherit from its only child class
`refinery.units.Unit`.
"""
@abc.abstractmethod
def process(self, data: ByteString) -> Union[Optional[ByteString], Iterable[ByteString]]:
"""
This routine is overridden by children of `refinery.units.Unit` to define how
the unit processes a given chunk of binary data.
"""
@abc.abstractmethod
def reverse(self, data: ByteString) -> Union[Optional[ByteString], Iterable[ByteString]]:
"""
If this routine is overridden by children of `refinery.units.Unit`, then it must
implement an operation that reverses the `refinery.units.Unit.process` operation.
The absence of an overload for this function is ignored for non-abstract children of
`refinery.units.UnitBase`.
"""
@abc.abstractclassmethod
def handles(self, data: ByteString) -> Optional[bool]:
"""
This tri-state routine returns `True` if the unit is certain that it can process the
given input data, and `False` if it is convinced of the opposite. `None` is returned
when no clear verdict is available.
"""
@abc.abstractmethod
def filter(self, inputs: Iterable[Chunk]) -> Iterable[Chunk]:
"""
Receives an iterable of `refinery.lib.frame.Chunk`s and yields only those that
should be processed. The default implementation returns the iterator without
change; this member function is designed to be overloaded by child classes of
`refinery.units.Unit` to allow inspection of an entire frame layer and altering
it before `refinery.units.Unit.process` is called on the individual chunks.
"""
@abc.abstractmethod
def finish(self) -> Iterable[Chunk]:
"""
Child classes of `refinery.units.Unit` can overwrite this method to generate a
stream of chunks to be processed after the last frame has been processed.
"""
class requirement(property):
pass
class Unit(UnitBase, abstract=True):
"""
The base class for all refinery units. It implements a small set of globally
available options and the handling for multiple inputs and outputs. All units
implement the _framing_ syntax for producing multiple outputs and ingesting
multiple inputs in a common format. For more details, see `refinery.lib.frame`.
"""
Arg = Arg
optional_dependencies: Optional[Set[str]] = None
required_dependencies: Optional[Set[str]] = None
@staticmethod
def Requires(distribution: str, optional: bool = True):
class Requirement(requirement):
dependency = distribution
required = not optional
def __init__(self, importer: Callable):
super().__init__(importer)
self.module = None
def __set_name__(self, unit: Type[Unit], name: str):
if self.required:
bucket = unit.required_dependencies
else:
bucket = unit.optional_dependencies
if bucket is None:
bucket = set()
if self.required:
unit.required_dependencies = bucket
else:
unit.optional_dependencies = bucket
bucket.add(self.dependency)
def __get__(self, unit: Optional[Type[Unit]], tp: Optional[Type[Executable]] = None):
if self.module is not None:
return self.module
try:
self.module = module = self.fget()
except ImportError as E:
args = unit.optional_dependencies or ()
raise RefineryImportMissing(self.dependency, *args) from E
except Exception as E:
raise AttributeError(F'module import for distribution "{distribution}" failed: {E!s}')
else:
return module
return Requirement
@property
def is_reversible(self) -> bool:
return self.__class__.is_reversible
@property
def codec(self) -> str:
return self.__class__.codec
@property
def logger(self):
logger: Logger = self.__class__.logger
return logger
@property
def name(self) -> str:
return self.__class__.name
@property
def is_quiet(self) -> bool:
try:
return self.args.quiet
except AttributeError:
return False
@property
def log_level(self) -> LogLevel:
"""
Returns the current log level as an element of `refinery.units.LogLevel`.
"""
if self.is_quiet:
return LogLevel.NONE
return LogLevel(self.logger.getEffectiveLevel())
@log_level.setter
def log_level(self, value: Union[int, LogLevel]) -> None:
if not isinstance(value, LogLevel):
value = LogLevel.FromVerbosity(value)
self.logger.setLevel(value)
def log_detach(self) -> None:
"""
When a unit is created using the `refinery.units.Unit.assemble` method, it is attached to a
logger by default (in less abstract terms, the `refinery.units.Unit.log_level` property is
set to a positive value). This method detaches the unit from its logger, which also means that
any exceptions that occur during runtime will be raised to the caller.
"""
self.log_level = LogLevel.DETACHED
return self
def __iter__(self) -> Generator[Chunk, None, None]:
return self
@property
def leniency(self) -> int:
return getattr(self.args, 'lenient', 0)
def _exception_handler(self, exception: BaseException, data: Optional[ByteString]):
abort_execution = False
if data is not None and self.leniency > 1:
try:
return exception.partial
except AttributeError:
return data
if isinstance(exception, RefineryPartialResult):
if self.leniency >= 1:
return exception.partial
if self.log_level < LogLevel.DETACHED:
self.log_warn(F'error, partial result returned: {exception}')
return None
raise exception
elif self.log_level >= LogLevel.DETACHED:
raise exception
elif isinstance(exception, RefineryCriticalException):
self.log_warn(F'critical error, terminating: {exception}')
raise exception
elif isinstance(exception, VariableMissing):
self.log_warn('critical error:', exception.args[0])
abort_execution = True
elif isinstance(exception, GeneratorExit):
raise exception
elif isinstance(exception, RefineryImportMissing):
self.log_fail(F'dependency {exception.missing} is missing; run pip install {exception.install}')
elif isinstance(exception, RefineryException):
self.log_fail(exception.args[0])
else:
try:
explanation = exception.args[0]
except (AttributeError, IndexError):
explanation = exception
if not isinstance(explanation, str):
explanation = exception
explanation = str(explanation).strip()
message = F'exception of type {exception.__class__.__name__}'
if explanation:
message = F'{message}; {explanation!s}'
if self.log_level <= LogLevel.INFO and data is not None:
from refinery.units.sinks.peek import peek
peeked = str(data | peek(lines=2, decode=True, stdout=True))
message = F'{message}\n{peeked}'
self.log_fail(message)
if self.log_debug():
import traceback
traceback.print_exc(file=sys.stderr)
if abort_execution:
raise RefineryCriticalException(str(exception))
def __next__(self) -> Chunk:
if not self._chunks:
self._chunks = iter(self._framehandler)
while True:
try:
return next(self._chunks)
except StopIteration:
raise
except RefineryCriticalException as R:
raise StopIteration from R
except BaseException as B:
self._exception_handler(B, None)
raise StopIteration from B
@property
def _framehandler(self) -> Framed:
if self._framed:
return self._framed
def normalized_action(data: ByteString) -> Generator[Chunk, None, None]:
try:
result = self.act(data)
if inspect.isgenerator(result):
yield from (x for x in result if x is not None)
elif result is not None:
yield result
except KeyboardInterrupt:
raise
except BaseException as B:
result = self._exception_handler(B, data)
message = str(B).strip() or 'unknown'
if result is not None:
yield self.labelled(result, error=message)
self._framed = Framed(
normalized_action,
self.source,
self.args.nesting,
self.args.squeeze,
self.filter,
self.finish,
)
return self._framed
def finish(self) -> Iterable[Chunk]:
yield from ()
def filter(self, inputs: Iterable[Chunk]) -> Iterable[Chunk]:
return inputs
@classmethod
def handles(self, data: bytearray) -> Optional[bool]:
return None
def reset(self):
try:
self._source.reset()
except AttributeError:
pass
self._framed = None
self._chunks = None
@property
def source(self):
"""
Represents a unit or binary IO stream which has been attached to this unit as its
source of input data.
"""
return self._source
@source.setter
def source(self, stream):
if isinstance(stream, self.__class__.__class__):
stream = stream()
if not isinstance(stream, self.__class__):
self.reset()
self._source = stream
@property
def nozzle(self) -> Unit:
"""
The nozzle is defined recursively as the nozzle of `refinery.units.Unit.source`
and `self` if no such thing exists. In other words, it is the leftmost unit in
a pipeline, where data should be inserted for processing.
"""
try:
return self.source.nozzle
except AttributeError:
return self
def __getitem__(self, unit: Union[Unit, Type[Unit], slice]):
if isinstance(unit, type):
unit = unit()
alpha = self.__copy__()
if isinstance(unit, slice):
if unit.start or unit.stop or unit.step:
raise ValueError
alpha.args.squeeze = True
return alpha
omega = unit.__copy__()
alpha.args.nesting += 1
omega.args.nesting -= 1
omega.nozzle.source = alpha
return omega
def __pos__(self):
return self
def __del__(self):
try:
self.nozzle.source.close()
except Exception:
pass
def __neg__(self):
pipeline = []
cursor = self
while isinstance(cursor, Unit):
reversed = copy.copy(cursor)
reversed.args.reverse = 1
reversed._source = None
reversed.reset()
pipeline.append(reversed)
cursor = cursor._source
reversed = None
while pipeline:
reversed = reversed | pipeline.pop()
return reversed
def __ror__(self, stream: Union[str, ByteIO, ByteString]):
if stream is None:
return self
if not isstream(stream):
if isinstance(stream, str):
stream = stream.encode(self.codec)
stream = MemoryFile(stream) if stream else open(os.devnull, 'rb')
self.reset()
self.nozzle.source = stream
return self
def __str__(self):
return self | str
def __bytes__(self):
return self | bytes
@overload
def __or__(self, stream: Callable[[ByteString], _T]) -> _T:
...
@overload
def __or__(self, stream: Union[Unit, Type[Unit]]) -> Unit:
...
@overload
def __or__(self, stream: dict) -> dict:
...
@overload
def __or__(self, stream: list) -> list:
...
@overload
def __or__(self, stream: set) -> set:
...
@overload
def __or__(self, stream: bytearray) -> bytearray:
...
@overload
def __or__(self, stream: memoryview) -> memoryview:
...
@overload
def __or__(self, stream: Type[None]) -> None:
...
@overload
def __or__(self, stream: Type[...]) -> bytearray:
...
@overload
def __or__(self, stream: ByteIO) -> ByteIO:
...
def __or__(self, stream):
def get_converter(it: Iterable):
try:
c = one(it)
except LookupError:
return None
if ... is c:
def identity(x):
return x
return identity
if callable(c):
return c
if stream is None:
with open(os.devnull, 'wb') as null:
self | null
return
if isinstance(stream, type) and issubclass(stream, Entry):
stream = stream()
if isinstance(stream, type(...)):
def stream(c): return c
if isinstance(stream, Entry):
return stream.__copy__().__ror__(self)
elif isinstance(stream, list):
converter = get_converter(stream)
if converter is None:
stream.extend(self)
return stream
return [converter(chunk) for chunk in self]
elif isinstance(stream, set):
converter = get_converter(stream)
if converter is None:
stream.update(self)
return stream
return {converter(chunk) for chunk in self}
elif isinstance(stream, dict):
key, convert = one(stream.items())
output: Dict[Any, Union[List[Chunk], Set[Chunk]]] = {}
deconflict = None
if isinstance(convert, (list, set)):
deconflict = type(convert)
convert = one(convert)
for item in self:
try:
value = item.meta[key]
except KeyError:
value = None
if convert is not ...:
item = convert(item)
if deconflict:
bag = output.setdefault(value, deconflict())
if isinstance(bag, list):
bag.append(item)
else:
bag.add(item)
else:
output[value] = item
return output
elif isinstance(stream, (bytearray, memoryview)):
with MemoryFile(stream) as stdout:
return (self | stdout).getvalue()
elif callable(stream):
with MemoryFile(bytearray()) as stdout:
self | stdout
out: bytearray = stdout.getbuffer()
if isinstance(stream, type) and isinstance(out, stream):
return out
if isinstance(stream, type) and issubclass(stream, str):
out = out.decode(self.codec)
return stream(out)
stream: ByteIO
if not stream.writable():
raise ValueError('target stream is not writable')
self._target = stream
def cname(x: str):
return x.lower().replace('-', '')
recode = self.isatty and cname(self.codec) != cname(sys.stdout.encoding)
chunk = None
for last, chunk in lookahead(self):
if (
not last
and self._framehandler.framebreak
and not chunk.endswith(B'\n')
):
chunk.extend(B'\n')
if recode:
try:
chunk = chunk.decode(chunk, self.codec, errors='backslashreplace').encode(sys.stdout.encoding)
except Exception:
pass
try:
stream.write(chunk)
stream.flush()
except AttributeError:
pass
except (BrokenPipeError, OSError) as E:
if isinstance(E, BrokenPipeError) or E.errno != 32:
# This happens when the next unit does not consume everything
# we send. For example, this can happen when a large file is
# read in chunks and the pick unit is used to select only the
# first few of these.
self.log_debug(F'cannot send to next unit: {E}')
break
try:
if self.isatty and chunk and not chunk.endswith(B'\n'):
stream.write(B'\n')
stream.flush()
except (NameError, AttributeError):
pass
return stream
def read(self, bytecount: int = -1) -> bytes:
"""
Reads bytes from the output stream of this unit.
"""
if not bytecount or bytecount < 0:
return self.read1()
bfr = bytearray(bytecount)
offset = 0
while offset < bytecount:
tmp = self.read1(bytecount - offset)
if not tmp:
del bfr[offset:]
break
end = offset + len(tmp)
bfr[offset:end] = tmp
offset = end
return bytes(bfr)
def read1(self, bytecount: int = -1) -> bytes:
"""
Performs a single read against the output stream of this unit and returns
the result.
"""
try:
out = self._buffer or next(self)
if bytecount and bytecount > 0:
out, self._buffer = out[:bytecount], out[bytecount:]
elif self._buffer:
self._buffer = B''
return out
except StopIteration:
return B''
def act(self, data: Union[Chunk, ByteString]) -> Union[Optional[ByteString], Generator[ByteString, None, None]]:
mode = self.args.reverse
data = self.args @ data
if not mode:
return self.process(data)
elif mode % 2:
return self.reverse(data)
else:
return self.reverse(self.process(data))
def __call__(self, data: Optional[Union[ByteString, Chunk]] = None) -> bytes:
with MemoryFile(data) if data else open(os.devnull, 'rb') as stdin:
stdin: ByteIO
with MemoryFile() as stdout:
return (stdin | self | stdout).getvalue()
@classmethod
def labelled(cls, data: Union[Chunk, ByteString], **meta) -> Chunk:
"""
This class method can be used to label a chunk of binary output with metadata. This
metadata will be visible inside pipeline frames, see `refinery.lib.frame`.
"""
if isinstance(data, Chunk):
data.meta.update(meta)
return data
return Chunk(data, meta=meta)
def process(self, data: ByteString) -> Union[Optional[ByteString], Generator[ByteString, None, None]]:
return data
@classmethod
def log_fail(cls: Union[Executable, Type[Unit]], *messages, clip=False) -> bool:
"""
Log the message if and only if the current log level is at least `refinery.units.LogLevel.ERROR`.
"""
rv = cls.logger.isEnabledFor(LogLevel.ERROR)
if rv and messages:
cls.logger.error(cls._output(*messages, clip=clip))
return rv
@classmethod
def log_warn(cls: Union[Executable, Type[Unit]], *messages, clip=False) -> bool:
"""
Log the message if and only if the current log level is at least `refinery.units.LogLevel.WARN`.
"""
rv = cls.logger.isEnabledFor(LogLevel.WARNING)
if rv and messages:
cls.logger.warning(cls._output(*messages, clip=clip))
return rv
@classmethod
def log_info(cls: Union[Executable, Type[Unit]], *messages, clip=False) -> bool:
"""
Log the message if and only if the current log level is at least `refinery.units.LogLevel.INFO`.
"""
rv = cls.logger.isEnabledFor(LogLevel.INFO)
if rv and messages:
cls.logger.info(cls._output(*messages, clip=clip))
return rv
@classmethod
def log_debug(cls: Union[Executable, Type[Unit]], *messages, clip=False) -> bool:
"""
Log the pmessage if and only if the current log level is at least `refinery.units.LogLevel.DEBUG`.
"""
rv = cls.logger.isEnabledFor(LogLevel.DEBUG)
if rv and messages:
cls.logger.debug(cls._output(*messages, clip=clip))
return rv
@property
def isatty(self) -> bool:
try:
return self._target.isatty()
except AttributeError:
return False
@classmethod
def _output(cls, *messages, clip=False) -> str:
def transform(message):
if callable(message):
message = message()
if isinstance(message, Exception):
args = [arg for arg in message.args if isinstance(arg, str)]
if len(args) == 1:
message = args[0]
else:
message = str(message)
if isinstance(message, str):
return message
if isbuffer(message):
import codecs
message: Union[bytes, bytearray, memoryview]
pmsg: str = codecs.decode(message, cls.codec, 'surrogateescape')
if not pmsg.isprintable():
pmsg = message.hex().upper()
return pmsg
else:
import pprint
return pprint.pformat(message)
message = ' '.join(transform(msg) for msg in messages)
if clip:
from textwrap import shorten
from refinery.lib.tools import get_terminal_size
message = shorten(
message,
get_terminal_size(75) - len(cls.name) - 14,
)
return message
@classmethod
def _interface(cls, argp: ArgumentParserWithKeywordHooks) -> ArgumentParserWithKeywordHooks:
"""
Receives a reference to an argument parser. This parser will be used to parse
the command line for this unit into the member variable called `args`.
"""
base = argp.add_argument_group('generic options')
base.set_defaults(reverse=False, squeeze=False)
base.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
base.add_argument('-L', '--lenient', action='count', default=0, help='Allow partial results as output.')
base.add_argument('-Q', '--quiet', action='store_true', help='Disables all log output.')
base.add_argument('-0', '--devnull', action='store_true', help='Do not produce any output.')
base.add_argument('-v', '--verbose', action='count', default=0,
help='Specify up to two times to increase log level.')
if cls.is_reversible:
base.add_argument('-R', '--reverse', action='count', default=0,
help='Use the reverse operation; Specify twice to normalize (first decode, then encode).')
groups = {None: argp}
for argument in reversed(cls._argument_specification.values()):
gp = argument.group
if gp not in groups:
groups[gp] = argp.add_mutually_exclusive_group()
groups[gp].add_argument @ argument
return argp
@classmethod
def argparser(cls, **keywords):
argp = ArgumentParserWithKeywordHooks(
keywords, prog=cls.name, description=documentation(cls), add_help=False)
argp.set_defaults(nesting=0)
return cls._interface(argp)
@staticmethod
def superinit(spc, **keywords):
"""
This function uses `refinery.lib.tools.autoinvoke` to call the `__init__` function of `super` with
by taking all required parameters from `keywords`, ignoring the rest. Calling
```
self.superinit(super(), **vars())
```
will therefore perform initialization of the parent class without having to forward all parameters
manually. This is a convenience feature which reduces code bloat when many parameters have to be
forwarded, see e.g. `refinery.units.pattern.carve.carve` for an example.
"""
my_own_args = iter(inspect.signature(spc.__thisclass__.__init__).parameters.values())
parent_args = inspect.signature(spc.__init__).parameters
keywords.pop(next(my_own_args).name, None)
for a in my_own_args:
if a.kind is a.VAR_KEYWORD:
keywords.update(keywords.pop(a.name, {}))
junk = [a for a in keywords]
for a in parent_args.values():
if a.kind is a.VAR_KEYWORD:
junk = [j for j in junk if j.startswith('_')]
break
try: junk.remove(a.name)
except ValueError: pass
for j in junk:
del keywords[j]
try:
if spc.__init__.__func__ is Unit.__init__:
return spc.__init__(**keywords)
except AttributeError:
pass
return autoinvoke(spc.__init__, keywords)
@classmethod
def assemble(cls, *args, **keywords):
"""
Creates a unit from the given arguments and keywords. The given keywords are used to overwrite any
previously specified defaults for the argument parser of the unit, then this modified parser is
used to parse the given list of arguments as though they were given on the command line. The parser
results are used to construct an instance of the unit, this object is consequently returned.
"""
argp = cls.argparser(**keywords)
args = argp.parse_args_with_nesting(args)
try:
unit = autoinvoke(cls, args.__dict__)
except ValueError as E:
argp.error(str(E))
else:
unit.args._store(_argo=argp.order)
unit.args.quiet = args.quiet
unit.args.lenient = args.lenient
unit.args.squeeze = args.squeeze
unit.args.nesting = args.nesting
unit.args.reverse = args.reverse
unit.args.devnull = args.devnull
unit.args.verbose = args.verbose
if args.quiet:
unit.log_level = LogLevel.NONE
else:
unit.log_level = args.verbose
return unit
def __copy__(self):
cls = self.__class__
clone: Unit = cls.__new__(cls)
clone.__dict__.update(self.__dict__)
# TODO: Preferably, units should keep all their information in args, making
# the above __dict__ update unnecessary.
# clone._buffer = self._buffer
# clone._source = self._source
clone._target = None
clone._framed = None
clone._chunks = None
clone.args = copy.copy(self.args)
return clone
def __init__(self, **keywords):
self._buffer = B''
self._source = None
self._target = None
self._framed = None
self._chunks = None
keywords.update(dict(
nesting=0,
reverse=False,
squeeze=False,
devnull=False,
quiet=False,
))
# Since Python 3.6, functions always preserve the order of the keyword
# arguments passed to them (see PEP 468).
self.args = DelayedArgumentProxy(Namespace(**keywords), list(keywords))
self.log_detach()
_SECRET_DEBUG_TIMING_FLAG = '--debug-timing'
_SECRET_YAPPI_TIMING_FLAG = '--yappi-timing'
@classmethod
def run(cls: Union[Type[Unit], Executable], argv=None, stream=None) -> None:
"""
Implements command line execution. As `refinery.units.Unit` is an `refinery.units.Executable`,
this method will be executed when a class inheriting from `refinery.units.Unit` is defined in
the current `__main__` module.
"""
if not environment.disable_ps1_bandaid.value:
from refinery.lib import powershell
ps1 = powershell.bandaid(cls.codec)
else:
ps1 = None
try:
sys.set_int_max_str_digits(0)
except AttributeError:
pass
argv = argv if argv is not None else sys.argv[1:]
clock = None
yappi = None
if cls._SECRET_DEBUG_TIMING_FLAG in argv:
from time import process_time
argv.remove(cls._SECRET_DEBUG_TIMING_FLAG)
clock = process_time()
cls.logger.setLevel(LogLevel.INFO)
cls.logger.info('starting clock: {:.4f}'.format(clock))
if cls._SECRET_YAPPI_TIMING_FLAG in argv:
argv.remove(cls._SECRET_YAPPI_TIMING_FLAG)
try:
import yappi as _yappi
except ImportError:
cls.logger.warn('unable to start yappi; package is missing')
else:
yappi = _yappi
if stream is None:
stream = open(os.devnull, 'rb') if sys.stdin.isatty() else sys.stdin.buffer
with stream as source:
try:
unit = cls.assemble(*argv)
except ArgparseError as ap:
ap.parser.error_commandline(str(ap))
return
except Exception as msg:
import traceback
cls.logger.critical(cls._output('initialization failed:', msg))
for line in traceback.format_exc().splitlines(keepends=False):
cls.logger.critical(cls._output(line))
return
if ps1:
unit.log_debug(F'applying PowerShell band-aid for: {unit.name}')
loglevel = environment.verbosity.value
if loglevel:
unit.log_level = loglevel
if clock:
unit.log_level = min(unit.log_level, LogLevel.INFO)
unit.logger.info('unit launching: {:.4f}'.format(clock))
if yappi is not None:
yappi.set_clock_type('cpu')
yappi.start()
try:
with open(os.devnull, 'wb') if unit.args.devnull else sys.stdout.buffer as output:
source | unit | output
except ParserVariableMissing as E:
unit.logger.error(F'the variable "{E!s}" was missing while trying to parse an expression')
except ArgumentTypeError as E:
unit.logger.error(F'delayed argument initialization failed: {E!s}')
except KeyboardInterrupt:
unit.logger.warning('aborting due to keyboard interrupt')
except OSError:
pass
if yappi is not None:
stats = yappi.get_func_stats()
filename = F'{unit.name}.perf'
stats.save(filename, type='CALLGRIND')
cls.logger.info(F'wrote yappi results to file: {filename}')
if clock:
stop_clock = process_time()
unit.logger.info('stopping clock: {:.4f}'.format(stop_clock))
unit.logger.info('time delta was: {:.4f}'.format(stop_clock - clock))
__pdoc__ = {
'Unit.is_reversible': Executable.is_reversible.__doc__,
'Unit.codec': Executable.codec.__doc__
}
|
bb21759b911bdf8d86aa131270b3933c52a1933a
|
dd0d1e578321adb92a865b302b3180d4c34cc514
|
/Hackathon2022/calibrator.py
|
de6cacbca4ed9ce607a093f9ca0d0be93d4e4aa7
|
[] |
no_license
|
LitLeo/TensorRT_Tutorial
|
a99055160cea88109ad2a5561fabf0fdd02f1c0e
|
0abd8ed708ea97ff598923fbfd9cb65c5dee1fff
|
refs/heads/master
| 2023-06-08T00:36:52.974872
| 2023-05-29T07:57:02
| 2023-05-29T07:57:02
| 89,451,146
| 819
| 170
| null | 2017-05-01T11:46:31
| 2017-04-26T07:25:35
| null |
UTF-8
|
Python
| false
| false
| 4,212
|
py
|
calibrator.py
|
import tensorrt as trt
import os
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
from sys import getsizeof
# import pycuda.driver as cuda
# import pycuda.autoinit
# import numpy as np
# import helpers.tokenization as tokenization
# import helpers.data_processing as dp
class EncoderCalibrator(trt.IInt8LegacyCalibrator):
def __init__(self, calibration_data_file, cache_file, batch_size):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8LegacyCalibrator.__init__(self)
self.cache_file = cache_file
# self.feat_list = feat_list
# self.feat_len_list = feat_len_list
self.batch_size = batch_size
self.current_index = 0
print("start read " + calibration_data_file)
# feat_name_list = []
self.feat_list = []
self.feat_len_list = []
data = np.load(calibration_data_file)
for i in data.files:
if "speech-" in i:
self.feat_list.append(data[i])
print(i)
print(data[i].shape)
if "speech_lengths" in i:
self.feat_len_list.append(data[i])
print(i)
print(data[i].shape)
if len(self.feat_list) != len(self.feat_len_list):
print("len(feat_list) != len(feat_len_list)")
assert(0)
self.num_inputs = len(self.feat_list)
# self.num_inputs = 1
self.d_feat = None
self.d_feat_len = None
def free(self):
pass
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
# print("self.num_inputs:" + str(self.num_inputs))
# print("self.current_index:" + str(self.current_index))
if self.current_index >= self.num_inputs:
print("Calibrating index {:} batch size {:} exceed max input limit {:} sentences".format(self.current_index, self.batch_size, self.num_inputs))
return None
np_feats = np.concatenate((np_feats, feat), axis=0)
feat_len = self.feat_len_list[self.current_index + i]
np_feat_lens = np.concatenate((np_feat_lens, feat_len), axis=0)
np_feats = self.feat_list[self.current_index]
np_feat_lens = self.feat_len_list[self.current_index]
# print(np_feats.shape)
# print(np_feat_lens.shape)
self.d_feat = cuda.mem_alloc(np_feats.size * 4)
self.d_feat_len = cuda.mem_alloc(np_feat_lens.size * 4)
print(getsizeof(np_feats))
print(self.d_feat_len)
cuda.memcpy_htod(self.d_feat, np_feats.ravel())
cuda.memcpy_htod(self.d_feat_len, np_feat_lens.ravel())
self.current_index += 1
return [self.d_feat, self.d_feat_len]
# t_feats = torch.from_numpy(np_feats).cuda()
# t_feat_lens = torch.from_numpy(np_feat_lens).cuda()
# return [t_feats.data_ptr(), t_feat_lens.data_ptr()]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
f.flush()
os.fsync(f)
def get_quantile(self):
return 0.9999
def get_regression_cutoff(self):
return 1.0
def read_histogram_cache(self, length):
return None
def write_histogram_cache(self, ptr, length):
return None
def main():
c = EncoderCalibrator("/workspace/data/calibration.npz", "encoder.cache", 100)
c.get_batch("input")
c.get_batch("input")
c.get_batch("input")
c.get_batch("input")
if __name__ == '__main__':
main()
|
69f2292d36545c20e27d0b796317d19e1c055608
|
8f2c55a2530c3e59dab5907c0044c618b88dd09b
|
/_pydevd_bundle/pydevconsole_code.py
|
e6ba3002378115f9286357b747a3534dccc8ae48
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
fabioz/PyDev.Debugger
|
5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d
|
26864816cbfcf002a99913bcc31ebef48042a4ac
|
refs/heads/main
| 2023-08-18T01:08:34.323363
| 2023-04-15T11:15:47
| 2023-04-15T11:15:47
| 21,870,144
| 363
| 126
|
Apache-2.0
| 2023-07-30T23:03:31
| 2014-07-15T18:01:12
|
Python
|
UTF-8
|
Python
| false
| false
| 19,014
|
py
|
pydevconsole_code.py
|
"""
A copy of the code module in the standard library with some changes to work with
async evaluation.
Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
import inspect
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
finally:
err1 = err2 = None
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
try:
from ast import PyCF_ALLOW_TOP_LEVEL_AWAIT
self.flags |= PyCF_ALLOW_TOP_LEVEL_AWAIT
except:
pass
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
from _pydev_bundle._pydev_saved_modules import threading
class _EvalAwaitInNewEventLoop(threading.Thread):
def __init__(self, compiled, updated_globals, updated_locals):
threading.Thread.__init__(self)
self.daemon = True
self._compiled = compiled
self._updated_globals = updated_globals
self._updated_locals = updated_locals
# Output
self.evaluated_value = None
self.exc = None
async def _async_func(self):
return await eval(self._compiled, self._updated_locals, self._updated_globals)
def run(self):
try:
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.evaluated_value = asyncio.run(self._async_func())
except:
self.exc = sys.exc_info()
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One of several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
is_async = False
if hasattr(inspect, 'CO_COROUTINE'):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
if is_async:
t = _EvalAwaitInNewEventLoop(code, self.locals, None)
t.start()
t.join()
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
exec(code, self.locals)
except SystemExit:
raise
except:
self.showtraceback()
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
if sys.excepthook is sys.__excepthook__:
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(type, value, tb)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
if sys.excepthook is sys.__excepthook__:
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(ei[0], ei[1], last_tb)
finally:
last_tb = ei = None
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None, exitmsg=None):
"""Closely emulate the interactive Python console.
The optional banner argument specifies the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
The optional exitmsg argument specifies the exit message
printed when exiting. Pass the empty string to suppress
printing an exit message. If exitmsg is not given or None,
a default message is printed.
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
elif banner:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
input(); a subclass may replace this with a different
implementation.
"""
return input(prompt)
def interact(banner=None, readfunc=None, local=None, exitmsg=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
exitmsg -- passed to InteractiveConsole.interact()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner, exitmsg)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-q', action='store_true',
help="don't print version and copyright messages")
args = parser.parse_args()
if args.q or sys.flags.quiet:
banner = ''
else:
banner = None
interact(banner)
|
412da4ba9807ae3a47955dc6a75f350cf4fb7b0b
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferSceneUITest/ParameterInspectorTest.py
|
f1f2cb9c082fe5b5b8eb2552273029b403222ece
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 31,878
|
py
|
ParameterInspectorTest.py
|
##########################################################################
#
# Copyright (c) 2021, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferUITest
import GafferScene
import GafferSceneTest
import GafferSceneUI
class ParameterInspectorTest( GafferUITest.TestCase ) :
def testName( self ) :
sphere = GafferScene.SceneNode()
inspector = GafferSceneUI.Private.ParameterInspector( sphere["out"], None, "light", ( "", "penumbra_angle" ) )
self.assertEqual( inspector.name(), "penumbra_angle" )
inspector = GafferSceneUI.Private.ParameterInspector( sphere["out"], None, "light", ( "", "penumbraAngle" ) )
self.assertEqual( inspector.name(), "penumbraAngle" )
@staticmethod
def __inspect( scene, path, parameter, editScope=None, attribute="light" ) :
editScopePlug = Gaffer.Plug()
editScopePlug.setInput( editScope["enabled"] if editScope is not None else None )
inspector = GafferSceneUI.Private.ParameterInspector(
scene, editScopePlug, attribute, ( "", parameter )
)
with Gaffer.Context() as context :
context["scene:path"] = IECore.InternedStringVectorData( path.split( "/" )[1:] )
return inspector.inspect()
def __assertExpectedResult( self, result, source, sourceType, editable, nonEditableReason = "", edit = None, editWarning = "" ) :
self.assertEqual( result.source(), source )
self.assertEqual( result.sourceType(), sourceType )
self.assertEqual( result.editable(), editable )
if editable :
self.assertEqual( nonEditableReason, "" )
self.assertEqual( result.nonEditableReason(), "" )
acquiredEdit = result.acquireEdit()
self.assertIsNotNone( acquiredEdit )
if result.editScope() :
self.assertTrue( result.editScope().isAncestorOf( acquiredEdit ) )
if edit is not None :
self.assertEqual(
acquiredEdit.fullName() if acquiredEdit is not None else "",
edit.fullName() if edit is not None else ""
)
self.assertEqual( result.editWarning(), editWarning )
else :
self.assertIsNone( edit )
self.assertEqual( editWarning, "" )
self.assertEqual( result.editWarning(), "" )
self.assertNotEqual( nonEditableReason, "" )
self.assertEqual( result.nonEditableReason(), nonEditableReason )
self.assertRaises( RuntimeError, result.acquireEdit )
def testValue( self ) :
light = GafferSceneTest.TestLight()
light["parameters"]["exposure"].setValue( 0.25 )
self.assertEqual(
self.__inspect( light["out"], "/light", "exposure" ).value(),
IECore.FloatData( 0.25 )
)
def testSourceAndEdits( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
s["group"] = GafferScene.Group()
s["editScope1"] = Gaffer.EditScope()
s["editScope2"] = Gaffer.EditScope()
s["group"]["in"][0].setInput( s["light"]["out"] )
s["editScope1"].setup( s["group"]["out"] )
s["editScope1"]["in"].setInput( s["group"]["out"] )
s["editScope2"].setup( s["editScope1"]["out"] )
s["editScope2"]["in"].setInput( s["editScope1"]["out"] )
# Should be able to edit light directly.
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "intensity", None ),
source = s["light"]["parameters"]["intensity"], sourceType = SourceType.Other,
editable = True, edit = s["light"]["parameters"]["intensity"]
)
# Even if there is an edit scope in the way
self.__assertExpectedResult(
self.__inspect( s["editScope1"]["out"], "/group/light", "intensity", None ),
source = s["light"]["parameters"]["intensity"], sourceType = SourceType.Other,
editable = True, edit = s["light"]["parameters"]["intensity"]
)
# We shouldn't be able to edit if we've been told to use an EditScope and it isn't in the history.
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "intensity", s["editScope1"] ),
source = s["light"]["parameters"]["intensity"], sourceType = SourceType.Other,
editable = False, nonEditableReason = "The target EditScope (editScope1) is not in the scene history."
)
# If it is in the history though, and we're told to use it, then we will.
inspection = self.__inspect( s["editScope2"]["out"], "/group/light", "intensity", s["editScope2"] )
self.assertIsNone(
GafferScene.EditScopeAlgo.acquireParameterEdit(
s["editScope2"], "/group/light", "light", ( "", "intensity" ), createIfNecessary = False
)
)
self.__assertExpectedResult(
inspection,
source = s["light"]["parameters"]["intensity"], sourceType = SourceType.Upstream,
editable = True
)
lightEditScope2Edit = inspection.acquireEdit()
self.assertIsNotNone( lightEditScope2Edit )
self.assertEqual(
lightEditScope2Edit,
GafferScene.EditScopeAlgo.acquireParameterEdit(
s["editScope2"], "/group/light", "light", ( "", "intensity" ), createIfNecessary = False
)
)
# If there's an edit downstream of the EditScope we're asked to use,
# then we're allowed to be editable still
inspection = self.__inspect( s["editScope2"]["out"], "/group/light", "intensity", s["editScope1"] )
self.assertTrue( inspection.editable() )
self.assertEqual( inspection.nonEditableReason(), "" )
lightEditScope1Edit = inspection.acquireEdit()
self.assertIsNotNone( lightEditScope1Edit )
self.assertEqual(
lightEditScope1Edit,
GafferScene.EditScopeAlgo.acquireParameterEdit(
s["editScope1"], "/group/light", "light", ( "", "intensity" ), createIfNecessary = False
)
)
self.assertEqual( inspection.editWarning(), "" )
# If there is a source node inside an edit scope, make sure we use that
s["editScope1"]["light2"] = GafferSceneTest.TestLight()
s["editScope1"]["light2"]["name"].setValue( "light2" )
s["editScope1"]["parentLight2"] = GafferScene.Parent()
s["editScope1"]["parentLight2"]["parent"].setValue( "/" )
s["editScope1"]["parentLight2"]["children"][0].setInput( s["editScope1"]["light2"]["out"] )
s["editScope1"]["parentLight2"]["in"].setInput( s["editScope1"]["BoxIn"]["out"] )
s["editScope1"]["LightEdits"]["in"].setInput( s["editScope1"]["parentLight2"]["out"] )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "intensity", s["editScope1"] ),
source = s["editScope1"]["light2"]["parameters"]["intensity"], sourceType = SourceType.EditScope,
editable = True, edit = s["editScope1"]["light2"]["parameters"]["intensity"]
)
# If there is a tweak in the scope's processor make sure we use that
light2Edit = GafferScene.EditScopeAlgo.acquireParameterEdit(
s["editScope1"], "/light2", "light", ( "", "intensity" ), createIfNecessary = True
)
light2Edit["enabled"].setValue( True )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "intensity", s["editScope1"] ),
source = light2Edit, sourceType = SourceType.EditScope,
editable = True, edit = light2Edit
)
# If there is a manual tweak downstream of the scope's scene processor, make sure we use that
s["editScope1"]["tweakLight2"] = GafferScene.ShaderTweaks()
s["editScope1"]["tweakLight2"]["in"].setInput( s["editScope1"]["LightEdits"]["out"] )
s["editScope1"]["tweakLight2Filter"] = GafferScene.PathFilter()
s["editScope1"]["tweakLight2Filter"]["paths"].setValue( IECore.StringVectorData( [ "/light2" ] ) )
s["editScope1"]["tweakLight2"]["filter"].setInput( s["editScope1"]["tweakLight2Filter"]["out"] )
s["editScope1"]["BoxOut"]["in"].setInput( s["editScope1"]["tweakLight2"]["out"] )
s["editScope1"]["tweakLight2"]["shader"].setValue( "light" )
editScopeShaderTweak = Gaffer.TweakPlug( "intensity", imath.Color3f( 1, 0, 0 ) )
s["editScope1"]["tweakLight2"]["tweaks"].addChild( editScopeShaderTweak )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "intensity", s["editScope1"] ),
source = editScopeShaderTweak, sourceType = SourceType.EditScope,
editable = True, edit = editScopeShaderTweak
)
# If there is a manual tweak outside of an edit scope make sure we use that with no scope
s["independentLightTweak"] = GafferScene.ShaderTweaks()
s["independentLightTweak"]["in"].setInput( s["editScope2"]["out"] )
s["independentLightTweakFilter"] = GafferScene.PathFilter()
s["independentLightTweakFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/light" ] ) )
s["independentLightTweak"]["filter"].setInput( s["independentLightTweakFilter"]["out"] )
s["independentLightTweak"]["shader"].setValue( "light" )
independentLightTweakPlug = Gaffer.TweakPlug( "intensity", imath.Color3f( 1, 1, 0 ) )
s["independentLightTweak"]["tweaks"].addChild( independentLightTweakPlug )
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/group/light", "intensity", None ),
source = independentLightTweakPlug, sourceType = SourceType.Other,
editable = True, edit = independentLightTweakPlug
)
# Check we show the last input plug if the source plug is an output
exposureCurve = Gaffer.Animation.acquire( s["light"]["parameters"]["exposure"] )
exposureCurve.addKey( Gaffer.Animation.Key( time = 1, value = 2 ) )
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "exposure", None ),
source = s["light"]["parameters"]["exposure"], sourceType = SourceType.Other,
editable = True, edit = s["light"]["parameters"]["exposure"]
)
inspection = self.__inspect( s["editScope1"]["out"], "/group/light", "exposure", s["editScope1"] )
exposureTweak = inspection.acquireEdit()
exposureTweak["enabled"].setValue( True )
exposureTweakCurve = Gaffer.Animation.acquire( exposureTweak["value"] )
exposureTweakCurve.addKey( Gaffer.Animation.Key( time = 2, value = 4 ) )
self.__assertExpectedResult(
self.__inspect( s["editScope1"]["out"], "/group/light", "exposure", s["editScope1"] ),
source = exposureTweak, sourceType = SourceType.EditScope,
editable = True, edit = exposureTweak
)
# Check editWarnings and nonEditableReasons
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/group/light", "intensity", s["editScope2"] ),
source = independentLightTweakPlug, sourceType = SourceType.Downstream,
editable = True, edit = lightEditScope2Edit, editWarning = "Parameter has edits downstream in independentLightTweak."
)
s["editScope2"]["enabled"].setValue( False )
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/group/light", "intensity", s["editScope2"] ),
source = independentLightTweakPlug, sourceType = SourceType.Downstream,
editable = False, nonEditableReason = "The target EditScope (editScope2) is disabled."
)
s["editScope2"]["enabled"].setValue( True )
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"], True )
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/light2", "intensity", s["editScope2"] ),
source = editScopeShaderTweak, sourceType = SourceType.Upstream,
editable = False, nonEditableReason = "editScope2 is locked."
)
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"], False )
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"]["LightEdits"]["edits"], True )
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/light2", "intensity", s["editScope2"] ),
source = editScopeShaderTweak, sourceType = SourceType.Upstream,
editable = False, nonEditableReason = "editScope2.LightEdits.edits is locked."
)
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"]["LightEdits"], True )
self.__assertExpectedResult(
self.__inspect( s["independentLightTweak"]["out"], "/light2", "intensity", s["editScope2"] ),
source = editScopeShaderTweak, sourceType = SourceType.Upstream,
editable = False, nonEditableReason = "editScope2.LightEdits is locked."
)
def testShaderAssignmentWarning( self ) :
shader = GafferSceneTest.TestShader()
shader["type"].setValue( "test:surface" )
shader["parameters"]["optionalString"]["enabled"].setValue( True )
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["shader"].setInput( shader["out"] )
shaderAssignment["filter"].setInput( planeFilter["out"] )
self.__assertExpectedResult(
self.__inspect( shaderAssignment["out"], "/plane", "c", None, attribute="test:surface" ),
source = shader["parameters"]["c"], sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True, editWarning = "Edits to TestShader may affect other locations in the scene."
)
self.__assertExpectedResult(
self.__inspect( shaderAssignment["out"], "/plane", "optionalString", None, attribute="test:surface" ),
source = shader["parameters"]["optionalString"], sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True, editWarning = "Edits to TestShader may affect other locations in the scene."
)
def testEditScopeNotInHistory( self ) :
light = GafferSceneTest.TestLight()
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
shaderTweaks = GafferScene.ShaderTweaks()
shaderTweaks["in"].setInput( light["out"] )
shaderTweaks["filter"].setInput( lightFilter["out"] )
shaderTweaks["tweaks"].addChild( Gaffer.TweakPlug( "exposure", 3 ) )
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( light["out"], "/light", "exposure", editScope ),
source = light["parameters"]["exposure"], sourceType = SourceType.Other,
editable = False, nonEditableReason = "The target EditScope (EditScope) is not in the scene history."
)
self.__assertExpectedResult(
self.__inspect( shaderTweaks["out"], "/light", "exposure" ),
source = shaderTweaks["tweaks"][0], sourceType = SourceType.Other,
editable = True, edit = shaderTweaks["tweaks"][0],
)
self.__assertExpectedResult(
self.__inspect( shaderTweaks["out"], "/light", "exposure", editScope ),
source = shaderTweaks["tweaks"][0], sourceType = SourceType.Other,
editable = False, nonEditableReason = "The target EditScope (EditScope) is not in the scene history."
)
def testDisabledTweaks( self ) :
light = GafferSceneTest.TestLight()
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
shaderTweaks = GafferScene.ShaderTweaks()
shaderTweaks["in"].setInput( light["out"] )
shaderTweaks["filter"].setInput( lightFilter["out"] )
exposureTweak = Gaffer.TweakPlug( "exposure", 10 )
shaderTweaks["tweaks"].addChild( exposureTweak )
self.__assertExpectedResult(
self.__inspect( shaderTweaks["out"], "/light", "exposure" ),
source = exposureTweak, sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True, edit = exposureTweak
)
exposureTweak["enabled"].setValue( False )
self.__assertExpectedResult(
self.__inspect( shaderTweaks["out"], "/light", "exposure" ),
source = light["parameters"]["exposure"], sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True, edit = light["parameters"]["exposure"]
)
def testInspectorShaderDiscovery( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["shader"] = GafferSceneTest.TestShader()
s["shader"]["type"].setValue( "test:surface" )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["shaderAssignment"]["in"].setInput( s["sphere"]["out"] )
i = self.__inspect( s["shaderAssignment"]["out"], "/sphere", "c", attribute="test:surface" )
self.assertTrue( i.editable() )
self.assertEqual( i.acquireEdit(), s["shader"]["parameters"]["c"] )
s["switch"]= Gaffer.Switch()
s["switch"].setup( s["shaderAssignment"]["shader"] )
s["switch"]["in"][0].setInput( s["shader"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["switch"]["out"] )
i = self.__inspect( s["shaderAssignment"]["out"], "/sphere", "c", attribute="test:surface" )
self.assertTrue( i.editable() )
self.assertEqual( i.acquireEdit(), s["shader"]["parameters"]["c"] )
s["expr"] = Gaffer.Expression()
s["expr"].setExpression( 'parent["switch"]["index"] = 0', "python" )
i = self.__inspect( s["shaderAssignment"]["out"], "/sphere", "c", attribute="test:surface" )
self.assertTrue( i.editable() )
self.assertEqual( i.acquireEdit(), s["shader"]["parameters"]["c"] )
def testEditScopeNesting( self ) :
light = GafferSceneTest.TestLight()
editScope1 = Gaffer.EditScope( "EditScope1" )
editScope1.setup( light["out"] )
editScope1["in"].setInput( light["out"] )
i = self.__inspect( editScope1["out"], "/light", "intensity", editScope1 )
scope1Edit = i.acquireEdit()
scope1Edit["enabled"].setValue( True )
self.assertEqual( scope1Edit.ancestor( Gaffer.EditScope ), editScope1 )
editScope2 = Gaffer.EditScope( "EditScope2" )
editScope2.setup( light["out"] )
editScope1.addChild( editScope2 )
editScope2["in"].setInput( scope1Edit.ancestor( GafferScene.SceneProcessor )["out"] )
editScope1["BoxOut"]["in"].setInput( editScope2["out"] )
i = self.__inspect( editScope1["out"], "/light", "intensity", editScope2 )
scope2Edit = i.acquireEdit()
scope2Edit["enabled"].setValue( True )
self.assertEqual( scope2Edit.ancestor( Gaffer.EditScope ), editScope2 )
# Check we still find the edit in scope 1
i = self.__inspect( editScope1["out"], "/light", "intensity", editScope1 )
self.assertEqual( i.acquireEdit()[0].ancestor( Gaffer.EditScope ), editScope1 )
def testDownstreamSourceType( self ) :
light = GafferSceneTest.TestLight()
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
shaderTweaks = GafferScene.ShaderTweaks()
shaderTweaks["in"].setInput( editScope["out"] )
shaderTweaks["filter"].setInput( lightFilter["out"] )
exposureTweak = Gaffer.TweakPlug( "exposure", 10 )
shaderTweaks["tweaks"].addChild( exposureTweak )
self.__assertExpectedResult(
self.__inspect( shaderTweaks["out"], "/light", "exposure", editScope ),
source = exposureTweak, sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Downstream,
editable = True, edit = None,
editWarning = "Parameter has edits downstream in ShaderTweaks."
)
def testLightInsideBox( self ) :
box = Gaffer.Box()
box["light"] = GafferSceneTest.TestLight()
Gaffer.PlugAlgo.promote( box["light"]["out"] )
self.__assertExpectedResult(
self.__inspect( box["out"], "/light", "exposure" ),
source = box["light"]["parameters"]["exposure"], sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True, edit = box["light"]["parameters"]["exposure"],
)
def testDirtiedSignal( self ) :
light = GafferSceneTest.TestLight()
editScope1 = Gaffer.EditScope()
editScope1.setup( light["out"] )
editScope1["in"].setInput( light["out"] )
editScope2 = Gaffer.EditScope()
editScope2.setup( editScope1["out"] )
editScope2["in"].setInput( editScope1["out"] )
settings = Gaffer.Node()
settings["editScope"] = Gaffer.Plug()
inspector = GafferSceneUI.Private.ParameterInspector(
editScope2["out"], settings["editScope"], "light", ( "", "exposure" )
)
cs = GafferTest.CapturingSlot( inspector.dirtiedSignal() )
# Tweaking a parameter should dirty the inspector.
light["parameters"]["exposure"].setValue( 10 )
self.assertEqual( len( cs ), 1 )
# But tweaking the transform should not.
light["transform"]["translate"]["x"].setValue( 10 )
self.assertEqual( len( cs ), 1 )
# Changing EditScope should also dirty the inspector.
settings["editScope"].setInput( editScope1["enabled"] )
self.assertEqual( len( cs ), 2 )
settings["editScope"].setInput( editScope2["enabled"] )
self.assertEqual( len( cs ), 3 )
settings["editScope"].setInput( None )
self.assertEqual( len( cs ), 4 )
def testNonExistentLocation( self ) :
light = GafferSceneTest.TestLight()
self.assertIsNone( self.__inspect( light["out"], "/nothingHere", "exposure" ) )
group = GafferScene.Group()
group["in"][0].setInput( light["out"] )
self.assertIsNone( self.__inspect( group["out"], "/group/nothingHere", "exposure" ) )
def testNonExistentAttribute( self ) :
light = GafferSceneTest.TestLight()
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
self.assertIsNone( self.__inspect( light["out"], "/light", "exposure", attribute = "nothingHere" ) )
self.assertIsNone( self.__inspect( editScope["out"], "/light", "exposure", editScope, attribute = "nothingHere" ) )
def testNonExistentParameter( self ) :
light = GafferSceneTest.TestLight()
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
self.assertIsNone( self.__inspect( light["out"], "/light", "nothingHere" ) )
self.assertIsNone( self.__inspect( editScope["out"], "/light", "nothingHere", editScope ) )
def testWrongAttributeType( self ) :
light = GafferSceneTest.TestLight()
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
attr = GafferScene.CustomAttributes()
attr["attributes"].addChild(
Gaffer.NameValuePlug( "test", 10, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
attr["in"].setInput( light["out"] )
attr["filter"].setInput( filter["out"] )
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( attr["out"] )
self.assertIn( "test", editScope["out"].attributes( "/light" ) )
self.assertIsNone( self.__inspect( editScope["out"], "/light", "nothingHere", None, attribute = "test" ) )
self.assertIsNone( self.__inspect( editScope["out"], "/light", "nothingHere", editScope, attribute = "test" ) )
def testReadOnlyMetadataSignalling( self ) :
light = GafferSceneTest.TestLight()
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
settings = Gaffer.Node()
settings["editScope"] = Gaffer.Plug()
inspector = GafferSceneUI.Private.ParameterInspector(
editScope["out"], settings["editScope"], "light", ( "", "exposure" )
)
cs = GafferTest.CapturingSlot( inspector.dirtiedSignal() )
Gaffer.MetadataAlgo.setReadOnly( editScope, True )
Gaffer.MetadataAlgo.setReadOnly( editScope, False )
self.assertEqual( len( cs ), 0 ) # Changes not relevant because we're not using the EditScope.
settings["editScope"].setInput( editScope["enabled"] )
self.assertEqual( len( cs ), 1 )
Gaffer.MetadataAlgo.setReadOnly( editScope, True )
self.assertEqual( len( cs ), 2 ) # Change affects the result of `inspect().editable()`
def testUnsupportedSourceNode( self ) :
s = Gaffer.ScriptNode()
s["sceneReader"] = GafferScene.SceneReader()
s["sceneReader"]["fileName"].setValue( "${GAFFER_ROOT}/python/GafferSceneTest/usdFiles/sphereLight.usda" )
s["editScope"] = Gaffer.EditScope()
s["editScope"].setup( s["sceneReader"]["out"] )
s["editScope"]["in"].setInput( s["sceneReader"]["out"] )
self.__assertExpectedResult(
self.__inspect( s["sceneReader"]["out"], "/SpotLight23", "intensity", None ),
source = None,
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = False,
nonEditableReason = "No editable source found in history."
)
inspection = self.__inspect( s["editScope"]["out"], "/SpotLight23", "intensity", s["editScope"] )
edit = inspection.acquireEdit()
self.assertIsNotNone( edit )
self.__assertExpectedResult(
inspection,
source = None,
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = edit
)
def testReadOnlyPlug( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( s["light"]["out"], "/light", "intensity", None ),
source = s["light"]["parameters"]["intensity"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["parameters"]["intensity"]
)
Gaffer.MetadataAlgo.setReadOnly( s["light"]["parameters"]["intensity"], True )
self.__assertExpectedResult(
self.__inspect( s["light"]["out"], "/light", "intensity", None ),
source = s["light"]["parameters"]["intensity"],
sourceType = SourceType.Other,
editable = False,
nonEditableReason = "light.parameters.intensity is locked."
)
def testAnimatedPlugEditability( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
curve = Gaffer.Animation.acquire( s["light"]["parameters"]["exposure"] )
key = Gaffer.Animation.Key( time = 10, value = 10 )
curve.addKey( key )
self.assertTrue( Gaffer.Animation.isAnimated( s["light"]["parameters"]["exposure"] ) )
with Gaffer.Context() as context :
context.setFrame( 10 )
self.__assertExpectedResult(
self.__inspect( s["light"]["out"], "/light", "exposure", None ),
source = s["light"]["parameters"]["exposure"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["parameters"]["exposure"]
)
Gaffer.MetadataAlgo.setReadOnly( curve, True )
with Gaffer.Context() as context :
context.setFrame( 10 )
self.__assertExpectedResult(
self.__inspect( s["light"]["out"], "/light", "exposure", None ),
source = s["light"]["parameters"]["exposure"],
sourceType = SourceType.Other,
editable = False,
nonEditableReason = "Animation.curves.curve0 is locked."
)
def testPlugWithInput( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
s["scope"] = Gaffer.EditScope()
s["scope"].setup( s["light"]["out"] )
s["scope"]["in"].setInput( s["light"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
"parent[\"light\"][\"parameters\"][\"exposure\"] = 10.0",
"python"
)
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.assertEqual( s["light"]["parameters"]["exposure"].getValue(), 10 )
self.__assertExpectedResult(
self.__inspect( s["scope"]["out"], "/light", "exposure", None ),
source = s["light"]["parameters"]["exposure"],
sourceType = SourceType.Other,
editable = False,
nonEditableReason = "light.parameters.exposure has a non-settable input."
)
inspection = self.__inspect( s["scope"]["out"], "/light", "exposure", s["scope"] )
self.assertTrue( inspection.editable() )
edit = inspection.acquireEdit()
edit["enabled"].setValue( True )
edit["value"].setValue( 5 )
self.__assertExpectedResult(
self.__inspect( s["scope"]["out"], "/light", "exposure", s["scope"] ),
source = edit,
sourceType = SourceType.EditScope,
editable = True,
edit = s["scope"]["LightEdits"]["edits"]["row1"]["cells"]["exposure"]["value"]
)
s["expression2"] = Gaffer.Expression()
s["expression2"].setExpression(
"parent[\"scope\"][\"LightEdits\"][\"edits\"][\"row1\"][\"cells\"][\"exposure\"][\"value\"][\"value\"] = 20",
"python"
)
self.__assertExpectedResult(
self.__inspect( s["scope"]["out"], "/light", "exposure", s["scope"] ),
source = edit,
sourceType = SourceType.EditScope,
editable = False,
nonEditableReason = "scope.LightEdits.edits.row1.cells.exposure.value.value has a non-settable input."
)
def testDefaultSpreadsheetRow( self ) :
s = Gaffer.ScriptNode()
s["spreadsheet"] = Gaffer.Spreadsheet()
s["spreadsheet"]["rows"].addColumn( Gaffer.FloatPlug( "exposure" ) )
s["spreadsheet"]["rows"]["default"]["cells"]["exposure"]["value"].setValue( 5 )
s["light"] = GafferSceneTest.TestLight()
s["light"]["parameters"]["exposure"].setInput( s["spreadsheet"]["out"]["exposure"] )
self.assertEqual( s["light"]["parameters"]["exposure"].getValue(), 5 )
self.__assertExpectedResult(
self.__inspect( s["light"]["out"], "/light", "exposure", None ),
source = s["spreadsheet"]["rows"]["default"]["cells"]["exposure"]["value"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = False,
nonEditableReason = "spreadsheet.rows.default.cells.exposure.value is a spreadsheet default row."
)
def testLightOptionalValuePlug( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
s["light"]["parameters"].addChild( Gaffer.OptionalValuePlug( "testFloat", Gaffer.FloatPlug(), False ) )
s["editScope"] = Gaffer.EditScope()
s["editScope"].setup( s["light"]["out"] )
s["editScope"]["in"].setInput( s["light"]["out"] )
self.assertIsNone( self.__inspect( s["editScope"]["out"], "/light", "testFloat" ) )
self.assertIsNone( self.__inspect( s["editScope"]["out"], "/light", "testFloat", s["editScope"] ) )
s["light"]["parameters"]["testFloat"]["enabled"].setValue( True )
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( s["editScope"]["out"], "/light", "testFloat" ),
source = s["light"]["parameters"]["testFloat"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["parameters"]["testFloat"]
)
inspection = self.__inspect( s["editScope"]["out"], "/light", "testFloat", s["editScope"] )
self.assertIsNotNone( inspection )
edit = inspection.acquireEdit()
edit["enabled"].setValue( True )
edit["value"].setValue( 5.0 )
self.__assertExpectedResult(
self.__inspect( s["editScope"]["out"], "/light", "testFloat", s["editScope"] ),
source = edit,
sourceType = SourceType.EditScope,
editable = True,
edit = edit
)
if __name__ == "__main__":
unittest.main()
|
3ca3778822636998a0f468e0946aac58ebf80c24
|
c80df697c0b66cd58a039c928574926bd6161a36
|
/tools/dom/scripts/databasebuilder_test.py
|
840e6dd4c6c8ed663530576a2b44362718e0e442
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] |
permissive
|
dart-lang/sdk
|
d4e50700dfc54b33c0a7a09fab1aa9623ebc84e5
|
b25873f11c68772408f6a4aea5f5c961f31ac9f7
|
refs/heads/master
| 2023-08-31T11:13:09.400940
| 2023-08-31T09:10:57
| 2023-08-31T09:10:57
| 35,726,310
| 10,701
| 2,079
|
BSD-3-Clause
| 2023-09-14T10:34:15
| 2015-05-16T14:14:58
|
Dart
|
UTF-8
|
Python
| false
| false
| 13,259
|
py
|
databasebuilder_test.py
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import database
import idlparser
import logging.config
import os
import os.path
import shutil
import tempfile
import unittest
from databasebuilder import *
class DatabaseBuilderTestCase(unittest.TestCase):
def _create_input(self, idl_file_name, content):
file_name = os.path.join(self._input_dir, idl_file_name)
f = open(file_name, 'w')
f.write(content)
f.close()
return file_name
def _assert_interface_exists(self, path):
file_path = os.path.join(self._database_dir, path)
self.assertTrue(os.path.exists(file_path))
def _assert_content_equals(self, path, expected_content):
def clean(content):
return ' '.join(
filter(len, list(map(str.strip, content.split('\n')))))
file_path = os.path.join(self._database_dir, path)
self.assertTrue(os.path.exists(file_path))
f = open(file_path, 'r')
actual_content = f.read()
f.close()
if clean(actual_content) != clean(expected_content):
msg = '''
FILE: %s
EXPECTED:
%s
ACTUAL:
%s
''' % (file_path, expected_content, actual_content)
self.fail(msg)
def setUp(self):
working_dir = tempfile.mkdtemp()
self._database_dir = os.path.join(working_dir, 'database')
self.assertFalse(os.path.exists(self._database_dir))
self._input_dir = os.path.join(working_dir, 'inputdir')
os.makedirs(self._input_dir)
self._db = database.Database(self._database_dir)
self.assertTrue(os.path.exists(self._database_dir))
self._builder = DatabaseBuilder(self._db)
def tearDown(self):
shutil.rmtree(self._database_dir)
def test_basic_import(self):
file_name = self._create_input(
'input.idl', '''
module M {
interface I {
attribute int a;
};
};''')
self._builder.import_idl_file(file_name)
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_interface_exists('I.idl')
def test_splitting(self):
file_name = self._create_input(
'input.idl', '''
module M {
interface I {
readonly attribute int a;
int o(in int x, in optional int y);
};
};''')
self._builder.import_idl_file(file_name)
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
interface I {
/* Attributes */
getter attribute int a;
/* Operations */
int o(in int x);
int o(in int x, in int y);
};''')
def test_renames(self):
file_name = self._create_input(
'input.idl', '''
module M {
[Constructor(in T x)] interface I {
T op(T x);
readonly attribute N::T attr;
};
};''')
options = DatabaseBuilderOptions(type_rename_map={'I': 'i', 'T': 't'})
self._builder.import_idl_file(file_name, options)
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'i.idl', '''
[Constructor(in t x)] interface i {
/* Attributes */
getter attribute t attr;
/* Operations */
t op(in t x);
};''')
def test_type_defs(self):
file_name = self._create_input(
'input.idl', '''
module M {
typedef T S;
interface I : S {
S op(S x);
readonly attribute S attr;
};
};''')
options = DatabaseBuilderOptions()
self._builder.import_idl_file(file_name, options)
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
interface I :
T {
/* Attributes */
getter attribute T attr;
/* Operations */
T op(in T x);
};''')
def test_merge(self):
file_name1 = self._create_input(
'input1.idl', '''
module M {
interface I {
const int CONST_BOTH = 0;
const int CONST_ONLY_FIRST = 0;
const int CONST_BOTH_DIFFERENT_VALUE = 0;
readonly attribute int attr_only_first;
readonly attribute int attr_both;
readonly attribute int attr_both_readonly_difference;
readonly attribute int attr_both_int_long_difference;
int op_only_first();
int op_both(int a);
int op_both_optionals_difference(int a,
in optional int b);
int op_both_arg_rename(int arg);
};
};''')
self._builder.import_idl_file(
file_name1,
DatabaseBuilderOptions(
source='1st', idl_syntax=idlparser.FREMONTCUT_SYNTAX))
file_name2 = self._create_input(
'input2.idl', '''
module M {
interface I {
const int CONST_BOTH = 0;
const int CONST_ONLY_SECOND = 0;
const int CONST_BOTH_DIFFERENT_VALUE = 1;
readonly attribute int attr_only_second;
readonly attribute int attr_both;
readonly attribute long attr_both_int_long_difference;
attribute int attr_both_readonly_difference;
int op_only_second();
int op_both(int a);
int op_both_optionals_difference(int a,
optional boolean b);
int op_both_arg_rename(int betterName);
};
};''')
self._builder.import_idl_file(
file_name2,
DatabaseBuilderOptions(
source='2nd', idl_syntax=idlparser.FREMONTCUT_SYNTAX))
self._builder.set_same_signatures({'int': 'long'})
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
@1st(module=M) @2nd(module=M) interface I {
/* Constants */
@1st @2nd const int CONST_BOTH = 0;
@1st const int CONST_BOTH_DIFFERENT_VALUE = 0;
@2nd const int CONST_BOTH_DIFFERENT_VALUE = 1;
@1st const int CONST_ONLY_FIRST = 0;
@2nd const int CONST_ONLY_SECOND = 0;
/* Attributes */
@1st @2nd getter attribute int attr_both;
@1st @2nd getter attribute int attr_both_int_long_difference;
@1st @2nd getter attribute int attr_both_readonly_difference;
@2nd setter attribute int attr_both_readonly_difference;
@1st getter attribute int attr_only_first;
@2nd getter attribute int attr_only_second;
/* Operations */
@1st @2nd int op_both(in t a);
@1st @2nd int op_both_arg_rename(in t betterName);
@1st @2nd int op_both_optionals_difference(in t a);
@1st int op_both_optionals_difference(in t a, in int b);
@2nd int op_both_optionals_difference(in t a, in boolean b);
@1st int op_only_first();
@2nd int op_only_second();
};''')
def test_mergeDartName(self):
file_name1 = self._create_input(
'input1.idl', '''
module M {
interface I {
[ImplementationFunction=foo] int member(in int a);
};
};''')
self._builder.import_idl_file(
file_name1,
DatabaseBuilderOptions(
source='1st', idl_syntax=idlparser.FREMONTCUT_SYNTAX))
file_name2 = self._create_input(
'input2.idl', '''
module M {
interface I {
[DartName=bar] int member(in int a);
};
};''')
self._builder.import_idl_file(
file_name2,
DatabaseBuilderOptions(
source='2nd', idl_syntax=idlparser.FREMONTCUT_SYNTAX))
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
@1st(module=M) @2nd(module=M) interface I {
/* Operations */
@1st @2nd [DartName=bar, ImplementationFunction=foo] int member(in int a);
};''')
def test_supplemental(self):
file_name = self._create_input(
'input1.idl', '''
module M {
interface I {
readonly attribute int a;
};
[Supplemental] interface I {
readonly attribute int b;
};
};''')
self._builder.import_idl_file(file_name,
DatabaseBuilderOptions(source='Src'))
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
@Src(module=M) [Supplemental] interface I {
/* Attributes */
@Src getter attribute int a;
@Src getter attribute int b;
};''')
def test_impl_stmt(self):
file_name = self._create_input(
'input.idl', '''
module M {
interface I {};
I implements J;
};''')
self._builder.import_idl_file(file_name,
DatabaseBuilderOptions(source='Src'))
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
@Src(module=M) interface I :
@Src J {
};''')
def test_obsolete(self):
file_name1 = self._create_input(
'input1.idl', '''
module M {
interface I {
readonly attribute int keep;
readonly attribute int obsolete; // Would be removed
};
};''')
self._builder.import_idl_file(file_name1,
DatabaseBuilderOptions(source='src'))
file_name2 = self._create_input(
'input2.idl', '''
module M {
interface I {
readonly attribute int keep;
readonly attribute int new;
};
};''')
self._builder.import_idl_file(
file_name2,
DatabaseBuilderOptions(
source='src', obsolete_old_declarations=True))
self._builder.merge_imported_interfaces([])
self._db.Save()
self._assert_content_equals(
'I.idl', '''
@src(module=M) interface I {
/* Attributes */
@src getter attribute int keep;
@src getter attribute int new;
};''')
def test_annotation_normalization(self):
file_name = self._create_input(
'input.idl', '''
module M {
interface I : J{
const int C = 0;
readonly attribute int a;
int op();
};
};''')
self._builder.import_idl_file(
file_name,
DatabaseBuilderOptions(source='Src', source_attributes={'x': 'y'}))
self._builder.merge_imported_interfaces([])
interface = self._db.GetInterface('I')
interface.parents[0].annotations['Src']['x'] = 'u'
interface.constants[0].annotations['Src']['z'] = 'w'
interface.attributes[0].annotations['Src']['x'] = 'u'
self._db.Save()
# Before normalization
self._assert_content_equals(
'I.idl', '''
@Src(module=M, x=y)
interface I : @Src(x=u) J {
/* Constants */
@Src(x=y, z=w) const int C = 0;
/* Attributes */
@Src(x=u) getter attribute int a;
/* Operations */
@Src(x=y) int op();
};''')
# Normalize
self._builder.normalize_annotations(['Src'])
self._db.Save()
# After normalization
self._assert_content_equals(
'I.idl', '''
@Src(module=M, x=y)
interface I : @Src(x=u) J {
/* Constants */
@Src(z=w) const int C = 0;
/* Attributes */
@Src(x=u) getter attribute int a;
/* Operations */
@Src int op();
};''')
def test_fix_displacements(self):
file_name1 = self._create_input(
'input1.idl', '''
module M {
interface I {};
interface J : I {
readonly attribute int attr;
};
};''')
self._builder.import_idl_file(file_name1,
DatabaseBuilderOptions(source='1st'))
file_name2 = self._create_input(
'input2.idl', '''
module M {
interface I {
readonly attribute int attr;
};
interface J : I {};
};''')
self._builder.import_idl_file(file_name2,
DatabaseBuilderOptions(source='2nd'))
self._builder.merge_imported_interfaces([])
self._builder.fix_displacements('2nd')
self._db.Save()
self._assert_content_equals(
'J.idl', '''
@1st(module=M) @2nd(module=M) interface J :
@1st @2nd I {
/* Attributes */
@1st
@2nd(via=I)
getter attribute int attr;
};''')
if __name__ == "__main__":
logging.config.fileConfig("logging.conf")
if __name__ == '__main__':
unittest.main()
|
b39679a0b338659172bffcf3e9147ae358d9bfa8
|
b8dad2a51911f71c3f4946b98573a487d9e4c29e
|
/custom_components/ble_monitor/test/test_mikrotik.py
|
eee21b368e044986fe5cfcde2bb33f7a4e28a049
|
[
"MIT"
] |
permissive
|
custom-components/ble_monitor
|
2b9dfc99f740314130fb21a29f070d21ec0d5a37
|
f52860aed1b2791e2b4643a25f73157694d8c3a2
|
refs/heads/master
| 2023-08-23T10:48:16.567468
| 2023-07-25T17:53:34
| 2023-07-25T17:53:34
| 223,993,584
| 1,294
| 215
|
MIT
| 2023-09-11T06:54:43
| 2019-11-25T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
test_mikrotik.py
|
"""The tests for the Laica Smart Scale ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestLaica:
"""Tests for the Mikrotik parser"""
def test_mikrotik_tg_bt5_in(self):
"""Test Mikrotik TG-BT5-IN parser."""
data_string = "043E2202010300DD7B146E2CDC1615FF4F09010010A90000FDFF010000806BE866000062D5"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_raw_data(data)
assert sensor_msg["firmware"] == "Mikrotik"
assert sensor_msg["type"] == "TG-BT5-IN"
assert sensor_msg["mac"] == "DC2C6E147BDD"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["version"] == 1
assert sensor_msg["acceleration x"] == 0.0
assert sensor_msg["acceleration y"] == 255.98828125
assert sensor_msg["acceleration z"] == 0.00390625
assert sensor_msg["acceleration"] == 255.9882812798037
assert sensor_msg["uptime"] == 6744171
assert sensor_msg["battery"] == 98
assert sensor_msg["switch"] == 0
assert sensor_msg["tilt"] == 0
assert sensor_msg["dropping"] == 0
assert sensor_msg["impact"] == 0
assert sensor_msg["impact x"] == 0
assert sensor_msg["impact y"] == 0
assert sensor_msg["impact z"] == 0
assert sensor_msg["rssi"] == -43
def test_mikrotik_tg_bt5_out(self):
"""Test Mikrotik TG-BT5-OUT parser."""
data_string = "043E2202010300DD7B146E2CDC1615FF4F09010010A90000FDFF0100A1196BE866000062D5"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_raw_data(data)
assert sensor_msg["firmware"] == "Mikrotik"
assert sensor_msg["type"] == "TG-BT5-OUT"
assert sensor_msg["mac"] == "DC2C6E147BDD"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg['temperature'] == 25.62890625
assert sensor_msg["version"] == 1
assert sensor_msg["acceleration x"] == 0.0
assert sensor_msg["acceleration y"] == 255.98828125
assert sensor_msg["acceleration z"] == 0.00390625
assert sensor_msg["acceleration"] == 255.9882812798037
assert sensor_msg["uptime"] == 6744171
assert sensor_msg["battery"] == 98
assert sensor_msg["switch"] == 0
assert sensor_msg["tilt"] == 0
assert sensor_msg["dropping"] == 0
assert sensor_msg["impact"] == 0
assert sensor_msg["impact x"] == 0
assert sensor_msg["impact y"] == 0
assert sensor_msg["impact z"] == 0
assert sensor_msg["rssi"] == -43
|
546e21e229bbe621439ec1e4c67cdb1d6f4c199c
|
4a5681a81c8720087291bcf6caacef083cb7af16
|
/src/python/strelka/tests/test_scan_msi.py
|
702f2c5027fca4ec0015b6b8c932f314a9ccba5e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
target/strelka
|
8d0801dbca270c46bd2ea998c0cf72f80e741cee
|
e42800362535a8fec956689e4de81235231022db
|
refs/heads/master
| 2023-08-18T15:35:01.150070
| 2023-08-15T12:05:07
| 2023-08-15T12:05:07
| 149,654,117
| 739
| 114
|
NOASSERTION
| 2023-09-12T18:18:25
| 2018-09-20T18:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
test_scan_msi.py
|
from pathlib import Path
from unittest import TestCase, mock
from strelka.scanners.scan_msi import ScanMsi as ScanUnderTest
from strelka.tests import run_test_scan
def test_scan_msi(mocker):
"""
Pass: Sample event matches output of scanner.
Failure: Unable to load file or sample event fails to match.
"""
test_scan_event = {
"elapsed": mock.ANY,
"flags": [],
"SourceFile": mock.ANY,
"ExifToolVersion": mock.ANY,
"FileName": mock.ANY,
"Directory": "/tmp",
"FileSize": mock.ANY,
"FileModifyDate": mock.ANY,
"FileAccessDate": mock.ANY,
"FileInodeChangeDate": mock.ANY,
"FilePermissions": mock.ANY,
"FileType": "FPX",
"FileTypeExtension": "fpx",
"MIMEType": "image/vnd.fpx",
"CodePage": "Windows Latin 1 (Western European)",
"Title": "Installation Database",
"Subject": "StrelkaMSITest",
"Author": "Target",
"Keywords": "Installer",
"Comments": "This installer database contains the logic and data required to install StrelkaMSITest.",
"Template": "Intel;1033",
"RevisionNumber": "{3F5D9FF7-E061-48CF-95B2-0AA7C9E5DE2A}",
"CreateDate": mock.ANY,
"ModifyDate": mock.ANY,
"Pages": 200,
"Words": 2,
"Software": "Windows Installer XML Toolset (3.11.2.4516)",
"Security": "Read-only recommended",
}
scanner_event = run_test_scan(
mocker=mocker,
scan_class=ScanUnderTest,
fixture_path=Path(__file__).parent / "fixtures/test.msi",
)
TestCase.maxDiff = None
TestCase().assertDictEqual(test_scan_event, scanner_event)
|
0b1060caacfb96d3b19dbacb54bb7ad0028a0ba4
|
e384f5467d8bcfd70845997bcbd68d950e874a61
|
/example/python/mesh/mesh_002_torus/torus.py
|
5385cfcac897b4b13f99ed6c0d3da07ddb925777
|
[] |
no_license
|
Rabbid76/graphics-snippets
|
ee642f1ed9ceafc6d320e467d3a084d2446d22c2
|
fa187afeabb9630bc1d988304fb5787e95a91385
|
refs/heads/master
| 2023-08-04T04:32:06.884318
| 2023-07-21T09:15:43
| 2023-07-21T09:15:43
| 109,126,544
| 177
| 12
| null | 2023-04-11T20:05:52
| 2017-11-01T12:05:56
|
C++
|
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
torus.py
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import math
# PyOpenGL import
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
# MyLibOGL import
from MyLibOGL.math import mat
from MyLibOGL.math import cam
from MyLibOGL.ogl import shader
from MyLibOGL.ogl import vertex
from MyLibOGL.ogl import uniform
from MyLibOGL.glut import window
class MyWindow(window.CameraWindow):
def __init__( self, cx, cy, multisample=True ):
super().__init__(cx, cy, multisample)
def _InitCamera_(self):
camera = super()._InitCamera_()
#camera.fov_y = 120
camera.pos = (0, -2.5, 0)
return camera
# draw event
def OnDraw(self):
# set up projection matrix
prjMat = self.Perspective()
# set up view matrix
viewMat = self.LookAt()
# set up attributes and shader program
glEnable( GL_DEPTH_TEST )
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
progDraw.Use()
modelMat = mat.IdentityMat44()
modelMat = self.AutoModelMatrix()
#modelMat = mat.RotateX( modelMat, self.CalcAng( 13.0 ) )
#modelMat = mat.RotateY( modelMat, self.CalcAng( 17.0 ) )
progDraw.SetUniforms( {
b"u_projectionMat44" : self.Perspective(),
b"u_viewMat44" : self.LookAt(),
b"u_modelMat44" : modelMat,
b"u_lightDir" : [-1.0, -0.5, -2.0],
b"u_ambient" : 0.2,
b"u_diffuse" : 0.8,
b"u_specular" : 0.8,
b"u_shininess" : 10.0 } )
# draw object
torusVAO.Draw()
def AddToBuffer( buffer, data, count=1 ):
for inx_c in range(0, count):
for inx_s in range(0, len(data)): buffer.append( data[inx_s] )
# create window
wnd = MyWindow( 800, 600, True )
# define torus vertex array opject
circum_size = 32
tube_size = 32
rad_circum = 1.0
rad_tube = 0.5
torus_pts = []
torus_nv = []
torus_col = []
torus_inx = []
col = [1, 0.5, 0.0]
for i_c in range(0, circum_size):
center = [
math.cos(2 * math.pi * i_c / circum_size),
math.sin(2 * math.pi * i_c / circum_size) ]
for i_t in range(0, tube_size):
tubeX = math.cos(2 * math.pi * i_t / tube_size)
tubeY = math.sin(2 * math.pi * i_t / tube_size)
pt = [
center[0] * ( rad_circum + tubeX * rad_tube ),
center[1] * ( rad_circum + tubeX * rad_tube ),
tubeY * rad_tube ]
nv = [ pt[0] - center[0] * rad_tube, pt[1] - center[1] * rad_tube, tubeY * rad_tube ]
torus_pts.extend( pt )
torus_nv.extend( nv )
torus_col.extend( col )
i_cn = (i_c+1) % circum_size
i_tn = (i_t+1) % tube_size
i_c0 = i_c * tube_size;
i_c1 = i_cn * tube_size;
torus_inx.extend( [i_c0+i_t, i_c0+i_tn, i_c1+i_t, i_c0+i_tn, i_c1+i_t, i_c1+i_tn] )
torusVAO = vertex.VAObject( [ (3, torus_pts), (3, torus_nv), (3, torus_col) ], torus_inx )
# load, compile and link shader
progDraw = shader.ShaderProgram(
[ ('resource/shader/blinn_phong.vert', GL_VERTEX_SHADER),
('resource/shader/blinn_phong.frag', GL_FRAGMENT_SHADER) ] )
# start main loop
wnd.Run()
|
9baa2dfba778aa90fe8a0160867a9fde2cc8438e
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/applications/Chat/examples/community/peft/train_peft_prompts.py
|
9385e457d852ff042a42439c467bc5b02b7c6e73
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 10,275
|
py
|
train_peft_prompts.py
|
import argparse
import pandas as pd
import torch
import torch.distributed as dist
from coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset
from coati.models.bloom import BLOOMRM, BLOOMCritic
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
from coati.models.opt import OPTRM, OPTActor, OPTCritic
from coati.trainer import PPOTrainer
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
from easy_dataset import EasyPromptsDataset, EasySupervisedDataset
from easy_models import BLOOMActor
from peft import PeftModel
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer
from colossalai.nn.optimizer import HybridAdam
def main(args):
# configure strategy
if args.strategy == 'ddp':
strategy = DDPStrategy()
elif args.strategy == 'colossalai_gemini':
strategy = GeminiStrategy(placement_policy='cpu', initial_scale=2**5)
elif args.strategy == 'colossalai_zero2':
strategy = LowLevelZeroStrategy(stage=2, placement_policy='cpu')
else:
raise ValueError(f'Unsupported strategy "{args.strategy}"')
if args.rm_path is not None:
state_dict = torch.load(args.rm_path, map_location='cpu')
# configure model
if args.model == 'bloom':
# initial_model = BLOOMActor(pretrained=args.pretrain)
print('Using peft lora to load Bloom model as initial_model')
initial_model = BLOOMActor(pretrained=args.pretrain, lora_path=args.sft_lora_path)
print('Using peft lora to load Bloom model as initial_model (Done)')
else:
raise ValueError(f'Unsupported actor model "{args.model}"')
if args.rm_model == None:
rm_model_name = args.model
else:
rm_model_name = args.rm_model
if rm_model_name == 'gpt2':
reward_model = GPTRM(pretrained=args.rm_pretrain)
elif rm_model_name == 'bloom':
print("load bloom reward model ", args.rm_pretrain)
reward_model = BLOOMRM(pretrained=args.rm_pretrain)
elif rm_model_name == 'opt':
reward_model = OPTRM(pretrained=args.rm_pretrain)
elif rm_model_name == 'llama':
reward_model = LlamaRM(pretrained=args.rm_pretrain)
else:
raise ValueError(f'Unsupported reward model "{rm_model_name}"')
if args.rm_path is not None:
print('Loading reward model from', args.rm_path)
reward_model.load_state_dict(state_dict)
if args.strategy != 'colossalai_gemini':
initial_model.to(torch.float16).to(torch.cuda.current_device())
reward_model.to(torch.float16).to(torch.cuda.current_device())
with strategy.model_init_context():
if args.model == 'bloom':
# actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
print('Using peft lora to load Bloom model as Actor')
actor = BLOOMActor(pretrained=args.pretrain, lora_path=args.sft_lora_path)
print('Using peft lora to load Bloom model as Actor (Done)')
else:
raise ValueError(f'Unsupported actor model "{args.model}"')
if rm_model_name == 'gpt2':
critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
elif rm_model_name == 'bloom':
print("load bloom critic ", args.rm_pretrain, " lora_rank ", args.lora_rank, " use_action_mask ", True)
critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
print("load bloom critic (Done) ")
elif rm_model_name == 'opt':
critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
elif rm_model_name == 'llama':
critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
else:
raise ValueError(f'Unsupported reward model "{rm_model_name}"')
if args.rm_path is not None:
print('Loading reward model from', args.rm_path)
critic.load_state_dict(state_dict)
del state_dict
if args.strategy != 'colossalai_gemini':
critic.to(torch.float16).to(torch.cuda.current_device())
actor.to(torch.float16).to(torch.cuda.current_device())
# configure optimizer
if args.strategy.startswith('colossalai'):
actor_optim = HybridAdam(actor.parameters(), lr=1e-7)
critic_optim = HybridAdam(critic.parameters(), lr=1e-7)
else:
actor_optim = Adam(actor.parameters(), lr=1e-7)
critic_optim = Adam(critic.parameters(), lr=1e-7)
# configure tokenizer
if args.model == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained(args.rm_pretrain)
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'bloom':
tokenizer = BloomTokenizerFast.from_pretrained(args.rm_pretrain)
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'opt':
tokenizer = AutoTokenizer.from_pretrained(args.rm_pretrain)
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'llama':
tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)
tokenizer.eos_token = '<\s>'
tokenizer.pad_token = tokenizer.unk_token
else:
raise ValueError(f'Unsupported model "{args.model}"')
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
prompt_dataset = EasyPromptsDataset(args.prompt_path, tokenizer)
if dist.is_initialized() and dist.get_world_size() > 1:
prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
else:
prompt_sampler = None
prompt_dataloader = DataLoader(prompt_dataset,
shuffle=(prompt_sampler is None),
sampler=prompt_sampler,
batch_size=args.train_batch_size)
pretrain_dataset = EasySupervisedDataset(args.pretrain_dataset, tokenizer)
if dist.is_initialized() and dist.get_world_size() > 1:
pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)
else:
pretrain_sampler = None
pretrain_dataloader = DataLoader(pretrain_dataset,
shuffle=(pretrain_sampler is None),
sampler=pretrain_sampler,
batch_size=args.ptx_batch_size,
collate_fn=data_collator)
def tokenize_fn(texts):
# MUST padding to max length to ensure inputs of all ranks have the same length
# Different length may lead to hang when using gemini, as different generation steps
batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)
return {k: v.to(torch.cuda.current_device()) for k, v in batch.items()}
(actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))
# configure trainer
trainer = PPOTrainer(
strategy,
actor,
critic,
reward_model,
initial_model,
actor_optim,
critic_optim,
kl_coef=args.kl_coef,
ptx_coef=args.ptx_coef,
train_batch_size=args.train_batch_size,
experience_batch_size=args.experience_batch_size,
tokenizer=tokenize_fn,
max_length=512,
do_sample=True,
temperature=1.0,
top_k=50,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
trainer.fit(prompt_dataloader=prompt_dataloader,
pretrain_dataloader=pretrain_dataloader,
num_episodes=args.num_episodes,
num_update_steps=args.num_update_steps,
num_collect_steps=args.num_collect_steps)
# save model checkpoint after fitting
trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)
# save optimizer checkpoint on all ranks
if args.need_optim_ckpt:
strategy.save_optimizer(actor_optim,
'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),
only_rank0=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')
parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')
parser.add_argument('--strategy',
choices=['ddp', 'colossalai_gemini', 'colossalai_zero2'],
default='ddp',
help='strategy to use')
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
parser.add_argument('--pretrain', type=str, default=None)
parser.add_argument('--sft_lora_path', type=str, default=None)
parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama'])
parser.add_argument('--rm_path', type=str, default=None)
parser.add_argument('--rm_pretrain', type=str, default=None)
parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')
parser.add_argument('--need_optim_ckpt', type=bool, default=False)
parser.add_argument('--num_episodes', type=int, default=10)
parser.add_argument('--num_collect_steps', type=int, default=10)
parser.add_argument('--num_update_steps', type=int, default=5)
parser.add_argument('--train_batch_size', type=int, default=2)
parser.add_argument('--ptx_batch_size', type=int, default=1)
parser.add_argument('--experience_batch_size', type=int, default=8)
parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
parser.add_argument('--kl_coef', type=float, default=0.1)
parser.add_argument('--ptx_coef', type=float, default=0.9)
args = parser.parse_args()
main(args)
|
a956f6af550c7d0460f5acc9d6d60751e3c55cef
|
76dee2c21e55c69d62c5413a4629b3f7b4450051
|
/qmk_cli/helpers.py
|
90767fa13d87f517bd659d44b437b8babf20ee1c
|
[
"MIT"
] |
permissive
|
qmk/qmk_cli
|
865d624b58166b14d0fb3a997a2f38814089a206
|
1c72e1e564239e47634c0f1173add6b73927a2b6
|
refs/heads/master
| 2023-06-21T22:12:58.143930
| 2023-06-13T18:16:53
| 2023-06-13T18:16:53
| 194,550,935
| 113
| 42
|
MIT
| 2023-09-12T17:41:03
| 2019-06-30T19:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
helpers.py
|
"""Useful helper functions.
"""
import os
from functools import lru_cache
from importlib.util import find_spec
from pathlib import Path
from milc import cli
def is_qmk_firmware(qmk_firmware):
"""Returns True if the given Path() is a qmk_firmware clone.
"""
paths = [
qmk_firmware,
qmk_firmware / 'quantum',
qmk_firmware / 'requirements.txt',
qmk_firmware / 'requirements-dev.txt',
qmk_firmware / 'lib/python/qmk/cli/__init__.py'
]
for path in paths:
if not path.exists():
return False
return True
@lru_cache(maxsize=2)
def find_qmk_firmware():
"""Look for qmk_firmware in the usual places.
This function returns the path to qmk_firmware, or the default location if one does not exist.
"""
if in_qmk_firmware():
return in_qmk_firmware()
if cli.config.user.qmk_home:
return Path(cli.config.user.qmk_home).expanduser().resolve()
if 'QMK_HOME' in os.environ:
path = Path(os.environ['QMK_HOME']).expanduser()
if path.exists():
return path.resolve()
return path
return Path.home() / 'qmk_firmware'
def in_qmk_firmware():
"""Returns the path to the qmk_firmware we are currently in, or None if we are not inside qmk_firmware.
"""
cur_dir = Path.cwd()
while len(cur_dir.parents) > 0:
if is_qmk_firmware(cur_dir):
return cur_dir
# Move up a directory before the next iteration
cur_dir = cur_dir / '..'
cur_dir = cur_dir.resolve()
|
1e60f810cb4bdccac387b43d5333e0a2a0f215e3
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/dace/transformation/passes/dead_state_elimination.py
|
a5ff0ba71a7c58e18b00d54cbb761db3e6e3ff24
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,062
|
py
|
dead_state_elimination.py
|
# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
import collections
import sympy as sp
from typing import Optional, Set, Tuple, Union
from dace import SDFG, InterstateEdge, SDFGState, symbolic, properties
from dace.properties import CodeBlock
from dace.sdfg.graph import Edge
from dace.sdfg.validation import InvalidSDFGInterstateEdgeError
from dace.transformation import pass_pipeline as ppl
@properties.make_properties
class DeadStateElimination(ppl.Pass):
"""
Removes all unreachable states (e.g., due to a branch that will never be taken) from an SDFG.
"""
CATEGORY: str = 'Simplification'
def modifies(self) -> ppl.Modifies:
return ppl.Modifies.States
def should_reapply(self, modified: ppl.Modifies) -> bool:
# If connectivity or any edges were changed, some more states might be dead
return modified & (ppl.Modifies.InterstateEdges | ppl.Modifies.States)
def apply_pass(self, sdfg: SDFG, _) -> Optional[Set[Union[SDFGState, Edge[InterstateEdge]]]]:
"""
Removes unreachable states throughout an SDFG.
:param sdfg: The SDFG to modify.
:param pipeline_results: If in the context of a ``Pipeline``, a dictionary that is populated with prior Pass
results as ``{Pass subclass name: returned object from pass}``. If not run in a
pipeline, an empty dictionary is expected.
:param initial_symbols: If not None, sets values of initial symbols.
:return: A set of the removed states, or None if nothing was changed.
"""
# Mark dead states and remove them
dead_states, dead_edges, annotated = self.find_dead_states(sdfg, set_unconditional_edges=True)
for e in dead_edges:
sdfg.remove_edge(e)
sdfg.remove_nodes_from(dead_states)
result = dead_states | dead_edges
if not annotated:
return result or None
else:
return result or set() # Return an empty set if edges were annotated
def find_dead_states(
self,
sdfg: SDFG,
set_unconditional_edges: bool = True) -> Tuple[Set[SDFGState], Set[Edge[InterstateEdge]], bool]:
"""
Finds "dead" (unreachable) states in an SDFG. A state is deemed unreachable if it is:
* Unreachable from the starting state
* Conditions leading to it will always evaluate to False
* There is another unconditional (always True) inter-state edge that leads to another state
:param sdfg: The SDFG to traverse.
:param set_unconditional_edges: If True, conditions of edges evaluated as unconditional are removed.
:return: A 3-tuple of (unreachable states, unreachable edges, were edges annotated).
"""
visited: Set[SDFGState] = set()
dead_edges: Set[Edge[InterstateEdge]] = set()
edges_annotated = False
# Run a modified BFS where definitely False edges are not traversed, or if there is an
# unconditional edge the rest are not. The inverse of the visited states is the dead set.
queue = collections.deque([sdfg.start_state])
while len(queue) > 0:
node = queue.popleft()
if node in visited:
continue
visited.add(node)
# First, check for unconditional edges
unconditional = None
for e in sdfg.out_edges(node):
# If an unconditional edge is found, ignore all other outgoing edges
if self.is_definitely_taken(e.data, sdfg):
# If more than one unconditional outgoing edge exist, fail with Invalid SDFG
if unconditional is not None:
raise InvalidSDFGInterstateEdgeError('Multiple unconditional edges leave the same state', sdfg,
sdfg.edge_id(e))
unconditional = e
if set_unconditional_edges and not e.data.is_unconditional():
# Annotate edge as unconditional
e.data.condition = CodeBlock('1')
edges_annotated = True
# Continue traversal through edge
if e.dst not in visited:
queue.append(e.dst)
continue
if unconditional is not None: # Unconditional edge exists, skip traversal
# Remove other (now never taken) edges from graph
for e in sdfg.out_edges(node):
if e is not unconditional:
dead_edges.add(e)
continue
# End of unconditional check
# Check outgoing edges normally
for e in sdfg.out_edges(node):
next_node = e.dst
# Test for edges that definitely evaluate to False
if self.is_definitely_not_taken(e.data, sdfg):
dead_edges.add(e)
continue
# Continue traversal through edge
if next_node not in visited:
queue.append(next_node)
# Dead states are states that are not live (i.e., visited)
return set(sdfg.nodes()) - visited, dead_edges, edges_annotated
def report(self, pass_retval: Set[Union[SDFGState, Edge[InterstateEdge]]]) -> str:
if pass_retval is not None and not pass_retval:
return 'DeadStateElimination annotated new unconditional edges.'
states = [p for p in pass_retval if isinstance(p, SDFGState)]
return f'Eliminated {len(states)} states and {len(pass_retval) - len(states)} interstate edges.'
def is_definitely_taken(self, edge: InterstateEdge, sdfg: SDFG) -> bool:
""" Returns True iff edge condition definitely evaluates to True. """
if edge.is_unconditional():
return True
# Evaluate condition
scond = edge.condition_sympy()
if scond == True or scond == sp.Not(sp.logic.boolalg.BooleanFalse(), evaluate=False):
return True
# Evaluate non-optional arrays
scond = symbolic.evaluate_optional_arrays(scond, sdfg)
if scond == True:
return True
# Indeterminate or False condition
return False
def is_definitely_not_taken(self, edge: InterstateEdge, sdfg: SDFG) -> bool:
""" Returns True iff edge condition definitely evaluates to False. """
if edge.is_unconditional():
return False
# Evaluate condition
scond = edge.condition_sympy()
if scond == False or scond == sp.Not(sp.logic.boolalg.BooleanTrue(), evaluate=False):
return True
# Evaluate non-optional arrays
scond = symbolic.evaluate_optional_arrays(scond, sdfg)
if scond == False:
return True
# Indeterminate or True condition
return False
|
7523cdf7101e19534a18c4e1729bc0b4103f538d
|
9472c7d1608e318e46214f231773fbb3f33de0f1
|
/kats/models/ensemble/kats_ensemble.py
|
2f063131bfff14c0fe6af6db7fb919d9c5812f84
|
[
"MIT"
] |
permissive
|
facebookresearch/Kats
|
16eee984bc1c482bd709cb5d62c226d4ad85f216
|
00ab9a3db27218b4817eae2e05dc602e437f634f
|
refs/heads/main
| 2023-08-30T23:33:12.654847
| 2023-08-25T16:02:04
| 2023-08-25T16:02:04
| 342,388,745
| 4,514
| 517
|
MIT
| 2023-09-14T15:28:43
| 2021-02-25T21:51:06
|
Python
|
UTF-8
|
Python
| false
| false
| 30,751
|
py
|
kats_ensemble.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Kats ensemble model
Implementation of the Kats ensemble model. It starts from seasonality detection, if seasonality detected, it
continues to perform STL decomposition, then fit forecasting models on de-seasonalized components and aggregate;
otherwise it simiply leverage individual forecasting models and ensembling. We provided two ensembling methods,
weighted average and median ensembling.
"""
from __future__ import annotations
import logging
import math
import multiprocessing
import sys
from copy import copy
from multiprocessing import cpu_count
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
import numpy as np
import pandas as pd
from kats.consts import Params, TimeSeriesData
# Seasonality detector
from kats.detectors.seasonality import ACFDetector
from kats.models import (
arima,
holtwinters,
linear_model,
prophet,
quadratic_model,
sarima,
theta,
)
from kats.models.ensemble.ensemble import EnsembleParams
from kats.models.model import Model
from kats.utils.backtesters import BackTesterSimple
# STL decomposition
from kats.utils.decomposition import TimeSeriesDecomposition
# from numpy.typing import ArrayLike
ArrayLike = Union[Sequence[float], np.ndarray]
# models that can fit de_seasonal component
MODELS = {
"arima": arima.ARIMAModel,
"holtwinters": holtwinters.HoltWintersModel,
"sarima": sarima.SARIMAModel,
"prophet": prophet.ProphetModel,
"linear": linear_model.LinearModel,
"quadratic": quadratic_model.QuadraticModel,
"theta": theta.ThetaModel,
}
# models that can fit seasonal time series data
SMODELS = {
"prophet": prophet.ProphetModel,
"theta": theta.ThetaModel,
# "sarima": sarima.SARIMAModel,
}
def _logged_error(msg: str) -> ValueError:
"""Log and raise an error."""
logging.error(msg)
return ValueError(msg)
# pyre-fixme[24]: Generic type `Model` expects 1 type parameter.
class KatsEnsemble(Model):
"""Decomposition based ensemble model in Kats
This is the holistic ensembling class based on decomposition when seasonality presents
"""
seasonality: bool = False
sea_data: Optional[TimeSeriesData] = None
desea_data: Optional[TimeSeriesData] = None
steps: int = -1
decomposition_method: str = ""
model_params: Optional[EnsembleParams] = None
fitted: Optional[Dict[str, Any]] = None
weights: Optional[Dict[str, float]] = None
predicted: Optional[Dict[str, pd.DataFrame]] = None
err: Optional[Dict[str, float]] = None
dates: Optional[pd.DatetimeIndex] = None
fcst_dates: Optional[ArrayLike] = None
fcst_df: Optional[pd.DataFrame] = None
errors: Optional[Dict[str, Any]] = None
def __init__(
self,
data: TimeSeriesData,
params: Dict[str, Any],
) -> None:
self.data = data
self.freq: Optional[str] = pd.infer_freq(data.time)
self.params = params
self.validate_params()
def validate_params(self) -> None:
# validate aggregation method
if self.params["aggregation"] not in ("median", "weightedavg"):
method = self.params["aggregation"]
msg = f"Only support `median` or `weightedavg` ensemble, but got {method}."
raise _logged_error(msg)
# validate decomposition method
if self.params["decomposition_method"] in ("additive", "multiplicative"):
self.decomposition_method = self.params["decomposition_method"]
else:
logging.info("Invalid decomposition method setting specified")
logging.info("Defaulting to Additive Decomposition")
self.decomposition_method = "additive"
# validate m
if (self.params["seasonality_length"] is not None) and (
self.params["seasonality_length"] > int(len(self.data.time) // 2)
):
msg = "seasonality_length value cannot be larger than"
"1/2 of the length of give time series"
raise _logged_error(msg)
# check customized forecastExecutor
if ("forecastExecutor" in self.params.keys()) and (
self.params["forecastExecutor"] is not None
):
msg = "Using customized forecastExecutor from given parameters"
logging.info(msg)
self.forecastExecutor = self.params["forecastExecutor"]
# check customized fitExecutor
if ("fitExecutor" in self.params.keys()) and (
self.params["fitExecutor"] is not None
):
msg = "Using customized fitExecutor from given parameters"
logging.info(msg)
self.fitExecutor = self.params["fitExecutor"]
@staticmethod
def seasonality_detector(data: TimeSeriesData) -> bool:
"""Detect seasonalities from given TimeSeriesData
Args:
data: :class:`kats.consts.TimeSeriesData`, the input `TimeSeriesData`
Returns:
Flag for the presence of seasonality
"""
detector = ACFDetector(data)
detector.detector()
seasonality = detector.seasonality_detected
return seasonality
@staticmethod
def deseasonalize(
data: TimeSeriesData, decomposition_method: str
) -> Tuple[TimeSeriesData, TimeSeriesData]:
"""STL decomposition to given TimeSeriesData
Static method to perform decomposition on the input data
Args:
data: :class:`kats.consts.TimeSeriesData`, input time series data
decomposition_method: the specific method for decomposition
Returns:
Tuple of seasonal data and de-seasonalized data
"""
# create decomposer for time series decomposition
decomposer = TimeSeriesDecomposition(data, decomposition_method)
decomp = decomposer.decomposer()
sea_data = copy(decomp["seasonal"])
desea_data = copy(data)
if decomposition_method == "additive":
desea_data.value = desea_data.value - decomp["seasonal"].value
else:
desea_data.value = desea_data.value / decomp["seasonal"].value
return sea_data, desea_data
@staticmethod
def reseasonalize(
sea_data: TimeSeriesData,
desea_predict: Dict[str, pd.DataFrame],
decomposition_method: str,
seasonality_length: int,
steps: int,
) -> Dict[str, pd.DataFrame]:
"""Re-seasonalize the time series data
Static method to re-seasonalize the input data
Args:
sea_data: :class:`kats.consts.TimeSeriesData`, the seasonal data from deseasonalize method
desea_predict: dict of forecasted results for the deseasonalized
data for each individual forecasting method
decomposition_method: the specific method for decomposition
seasonality_lenth: the length of seasonality
steps: the length of forecasting horizon
Returns:
Dict of re-seasonalized data for each individual forecasting model
"""
rep = math.trunc(1 + steps / seasonality_length)
seasonality_unit = sea_data.value[-seasonality_length:]
predicted = {}
for model_name, desea_pred in desea_predict.items():
if decomposition_method == "additive":
if (
"fcst_lower" in desea_pred.columns
and "fcst_upper" in desea_pred.columns
):
# check consistency of time being index
if "time" in desea_pred.columns:
msg = "Setting time column as index"
logging.info(msg)
desea_pred.set_index("time", inplace=True)
# native C.I calculated from individual model
predicted[model_name] = (
desea_pred
+ np.tile(
np.tile(seasonality_unit, rep)[:steps], [3, 1]
).transpose()
)
else:
# no C.I from individual model
tmp_fcst = desea_pred.fcst + np.tile(seasonality_unit, rep)[:steps]
predicted[model_name] = pd.DataFrame(
{
"time": desea_pred.index,
"fcst": tmp_fcst,
"fcst_lower": np.nan,
"fcst_upper": np.nan,
},
copy=False,
).set_index("time")
else:
# multiplicative, element-wise multiply
if (
"fcst_lower" in desea_pred.columns
and "fcst_upper" in desea_pred.columns
):
# check consistency of time being index
if "time" in desea_pred.columns:
msg = "Setting time column as index"
logging.info(msg)
desea_pred.set_index("time", inplace=True)
# native C.I calculated from individual model
predicted[model_name] = (
desea_pred
* np.tile(
np.tile(seasonality_unit, rep)[:steps], [3, 1]
).transpose()
)
else:
# no C.I from individual model
tmp_fcst = desea_pred.fcst * np.tile(seasonality_unit, rep)[:steps]
predicted[model_name] = pd.DataFrame(
{
"time": desea_pred.index,
"fcst": tmp_fcst,
"fcst_lower": 0,
"fcst_upper": 0,
},
copy=False,
).set_index("time")
return predicted
def fitExecutor(
self,
data: TimeSeriesData,
models: EnsembleParams,
should_auto_backtest: bool = False,
err_method: str = "mape",
) -> Tuple[Dict[str, Any], Optional[Dict[str, float]]]:
"""callable forecast executor
This is native implementation with Python's multiprocessing
fit individual model in `models` with given `data`. Services
who use KatsEnsemble need to implement their own executor for better
performance, if no executor function is given, the native version will be
used.
Attributes:
data: :class:`kats.consts.TimeSeriesData`, given TimeSeriesData, could be original or de-seasonalized
models: EnsembleParams object containing model params
in BaseModelParams
should_auto_backtest: boolean flag for additional back testing runs
Returns:
Tuple of fitted individual model and weights
"""
# Fit individual model with given data
num_process = min(len(MODELS), (cpu_count() - 1) // 2)
if num_process < 1:
num_process = 1
pool = multiprocessing.Manager().Pool(
processes=(num_process), maxtasksperchild=1000
)
fitted_models = {}
for model in models.models:
fitted_models[model.model_name] = pool.apply_async(
self._fit_single,
args=(
data,
MODELS[model.model_name.split("_")[0].lower()],
model.model_params,
),
)
pool.close()
pool.join()
fitted = {model: res.get() for model, res in fitted_models.items()}
# if auto back testing
weights = self.backTestExecutor(err_method) if should_auto_backtest else None
return fitted, weights
# pyre-fixme[15]: `fit` overrides method defined in `Model` inconsistently.
def fit(self, err_method: str = "mape") -> KatsEnsemble:
"""Fit individual forecasting models via calling fitExecutor
This is the fit methdo to fit individual forecasting model
"""
self.seasonality = KatsEnsemble.seasonality_detector(self.data)
# check if self.params["seasonality_length"] is given
if self.seasonality and self.params["seasonality_length"] is None:
msg = "The given time series contains seasonality,\
a `seasonality_length` must be given in params."
raise _logged_error(msg)
# set up auto backtesting flag
auto_backtesting = False if self.params["aggregation"] == "median" else True
# check fitExecutor
fitExecutor = self.params.get("fitExecutor")
if fitExecutor is None:
fitExecutor = self.fitExecutor
if self.seasonality:
# STL decomposition
sea_data, desea_data = KatsEnsemble.deseasonalize(
self.data, self.decomposition_method
)
self.sea_data = sea_data
self.desea_data = desea_data
# we created extra models
given_models = copy(self.params["models"].models)
for m in self.params["models"].models:
if m.model_name.lower() in SMODELS.keys():
tmp = copy(m)
tmp.model_name = m.model_name + "_smodel"
given_models.append(tmp)
self.model_params = model_params = EnsembleParams(given_models)
self.fitted, self.weights = fitExecutor(
data=desea_data,
models=model_params,
should_auto_backtest=auto_backtesting,
)
else:
# fit models on the original data
self.model_params = model_params = EnsembleParams(
self.params["models"].models
)
self.fitted, self.weights = fitExecutor(
data=self.data,
models=model_params,
should_auto_backtest=auto_backtesting,
err_method=err_method,
)
return self
# pyre-fixme[14]: `predict` overrides method defined in `Model` inconsistently.
# pyre-fixme[15]: `predict` overrides method defined in `Model` inconsistently.
def predict(self, steps: int) -> KatsEnsemble:
"""Predit future for each individual model
Args:
steps : number of steps ahead to forecast
Returns:
None
"""
fitted = self.fitted
if fitted is None:
raise _logged_error("fit must be called before predict.")
self.steps = steps
if self.seasonality:
sea_data = self.sea_data
assert sea_data is not None
# we should pred two types of model
desea_fitted = {k: v for k, v in fitted.items() if "_smodel" not in k}
desea_predict = {
k: v.predict(self.steps).set_index("time")
for k, v in desea_fitted.items()
}
# re-seasonalize
predicted = KatsEnsemble.reseasonalize(
sea_data=sea_data,
desea_predict=desea_predict,
decomposition_method=self.decomposition_method,
seasonality_length=self.params["seasonality_length"],
steps=self.steps,
)
# add extra model prediction results from smodels
fitted_smodel = {k: v for k, v in fitted.items() if "_smodel" in k}
extra_predict = {
k: v.predict(self.steps).set_index("time")
for k, v in fitted_smodel.items()
}
predicted.update(extra_predict)
self.predicted = predicted
else:
predicted = {
k: v.predict(self.steps).set_index("time") for k, v in fitted.items()
}
# add dummy C.I if the model doesn't have native C.I
# this is a hack for median ensemble; everyone model needs to have
# its native C.I if user choose weighted average ensemble.
for k, v in predicted.items():
# if predicted df doesn't have fcst_lower and fcst_upper
if not {"fcst_lower", "fcst_upper"}.issubset(v.columns):
# add dummy C.I
tmp_v = copy(v)
tmp_v["fcst_lower"] = np.nan
tmp_v["fcst_upper"] = np.nan
predicted[k] = tmp_v
self.predicted = predicted
return self
def forecast(
self, steps: int
) -> Tuple[Dict[str, pd.DataFrame], Optional[Dict[str, float]]]:
"""Holistic forecast method in Kats ensemble
combine fit and predict methods to produce forecasted results
this is especially useful for services which prefer to produce
final forecasts without saving the fitted model
Args:
steps: the length of forecasting horizon
Returns:
Tuple of predicted values and weights
"""
self.steps = steps
self.seasonality = KatsEnsemble.seasonality_detector(self.data)
# check if self.params["seasonality_length"] is given
if (self.seasonality) and (self.params["seasonality_length"] is None):
msg = "The given time series contains seasonality,\
a `seasonality_length` must be given in params."
raise _logged_error(msg)
# set up auto backtesting flag
auto_backtesting = False if self.params["aggregation"] == "median" else True
if self.seasonality:
# call forecastExecutor and move to next steps
sea_data, desea_data = KatsEnsemble.deseasonalize(
self.data, self.decomposition_method
)
self.sea_data = sea_data
self.desea_data = desea_data
# call forecasterExecutor with self.desea_data
desea_predict, desea_err = self.forecastExecutor(
data=desea_data,
models=self.params["models"],
steps=steps,
should_auto_backtest=auto_backtesting,
)
# update the desea_predict with adding seasonality component
# re-seasonalize
predicted = KatsEnsemble.reseasonalize(
sea_data=sea_data,
desea_predict=desea_predict,
decomposition_method=self.decomposition_method,
seasonality_length=self.params["seasonality_length"],
steps=self.steps,
)
# call forecasterExecutor with self.data
# create new models
# we created extra models
extra_models = []
for m in self.params["models"].models:
if m.model_name.lower() in SMODELS.keys():
tmp = copy(m)
tmp.model_name = m.model_name + "_smodel"
extra_models.append(tmp)
model_params = EnsembleParams(extra_models)
extra_predict, extra_error = self.forecastExecutor(
data=self.data,
models=model_params,
steps=self.steps,
should_auto_backtest=auto_backtesting,
)
# combine with predict
predicted.update(extra_predict)
self.predicted = predicted
if self.params["aggregation"] == "weightedavg":
if desea_err is None:
desea_err = extra_error
elif extra_error is not None:
desea_err.update(extra_error)
self.err = forecast_error = desea_err
else:
forecast_error = None
else:
# no seasonality detected
predicted, forecast_error = self.forecastExecutor(
data=self.data,
models=self.params["models"],
steps=self.steps,
should_auto_backtest=auto_backtesting,
)
self.err = forecast_error
# same as in predict method above
# add dummy C.I if the model doesn't have native C.I
# this is a hack for median ensemble; everyone model needs to have
# its native C.I if user choose weighted average ensemble.
for k, v in predicted.items():
# if predicted df doesn't have fcst_lower and fcst_upper
if "fcst_lower" not in v.columns or "fcst_upper" not in v.columns:
# add dummy C.I
tmp_v = copy(v)
tmp_v["fcst_lower"] = np.nan
tmp_v["fcst_upper"] = np.nan
predicted[k] = tmp_v
self.predicted = predicted
# we need to transform err to weights if it's weighted avg
if self.params["aggregation"] == "weightedavg":
assert forecast_error is not None
original_weights = {
model: 1 / (err + sys.float_info.epsilon)
for model, err in forecast_error.items()
}
self.weights = {
model: err / sum(original_weights.values())
for model, err in original_weights.items()
}
else:
self.weights = None
return predicted, self.weights
def forecastExecutor(
self,
data: TimeSeriesData,
models: EnsembleParams,
steps: int,
should_auto_backtest: bool = False,
) -> Tuple[Dict[str, pd.DataFrame], Optional[Dict[str, float]]]:
"""Forecast Executor
This is a callable execution function to
(1). fit model
(2). predict with a given steps
(3). back testing (optional)
Args:
data: :class:`kats.consts.TimeSeriesData`, the input time series data as in :class:`kats.consts.TimeSeriesData`
models: the ensemble parameters as in `EnsembleParams`
steps: the length of forecasting horizon
should_auto_backtest: flag to automatically perform back test, default as False
Returns:
The predicted values from each individual model and weights
"""
# Fit individual model with given data
num_process = min(len(MODELS), (cpu_count() - 1) // 2)
if num_process < 1:
num_process = 1
pool = multiprocessing.Manager().Pool(
processes=(num_process), maxtasksperchild=1000
)
fitted_models = {}
for model in models.models:
fitted_models[model.model_name] = pool.apply_async(
self._fit_single,
args=(
data,
MODELS[model.model_name.split("_")[0].lower()],
model.model_params,
),
)
pool.close()
pool.join()
fitted = {model: res.get() for model, res in fitted_models.items()}
# simply predict with given steps
predicted = {}
for model_name, model_fitted in fitted.items():
predicted[model_name] = model_fitted.predict(steps).set_index("time")
# if auto back testing
self.model_params = models # used by _backtester_all
if should_auto_backtest:
_, errors = self._backtester_all()
else:
errors = None
return predicted, errors
def aggregate(self) -> pd.DataFrame:
"""Aggregate the results from predict method
Args:
None
Returns:
final results in pd.DataFrame
"""
predicted = self.predicted
if predicted is None:
raise _logged_error("predict must be called before aggregate.")
# create future dates
last_date = self.data.time.max()
dates = pd.date_range(start=last_date, periods=self.steps + 1, freq=self.freq)
self.dates = dates = dates[dates != last_date]
self.fcst_dates = dates.to_pydatetime()
# collect the fcst, fcst_lower, and fcst_upper into dataframes
fcsts = {}
for col in ["fcst", "fcst_lower", "fcst_upper"]:
fcsts[col] = pd.concat(
[x[col].reset_index(drop=True) for x in predicted.values()],
axis=1,
copy=False,
)
fcsts[col].columns = predicted.keys()
if self.params["aggregation"].lower() == "median":
# clean up dataframes with C.I as np.nan or zero
fcsts = self.clean_dummy_CI(fcsts, use_zero=False)
self.fcst_df = fcst_df = pd.DataFrame(
{
"time": dates,
"fcst": fcsts["fcst"].median(axis=1),
"fcst_lower": fcsts["fcst_lower"].median(axis=1),
"fcst_upper": fcsts["fcst_upper"].median(axis=1),
},
copy=False,
)
else:
if (
fcsts["fcst_lower"].isnull().values.any()
or fcsts["fcst_upper"].isnull().values.any()
):
msg = "Conf. interval contains NaN, please check individual model."
raise _logged_error(msg)
weights = self.weights
assert weights is not None
weights = np.array(list(weights.values()))
self.fcst_df = fcst_df = pd.DataFrame(
{
"time": dates,
"fcst": fcsts["fcst"].dot(weights),
"fcst_lower": fcsts["fcst_lower"].dot(weights),
"fcst_upper": fcsts["fcst_upper"].dot(weights),
},
copy=False,
)
logging.debug("Return forecast data: {fcst_df}".format(fcst_df=fcst_df))
return fcst_df
@staticmethod
def clean_dummy_CI(
fcsts: Dict[str, pd.DataFrame],
use_zero: bool = True,
) -> Dict[str, pd.DataFrame]:
"""Helper method to clean dummy prediction interval
Args:
fcsts: the dict of forecasting results from individual models
use_zero: flag to use zero to fill nan, default as True
Returns:
the cleaned results in a dict
"""
if use_zero:
fcsts["fcst_lower"] = fcsts["fcst_lower"].fillna(0)
fcsts["fcst_upper"] = fcsts["fcst_upper"].fillna(0)
else:
fcsts["fcst_lower"] = fcsts["fcst_lower"].replace(0, np.nan)
fcsts["fcst_upper"] = fcsts["fcst_upper"].replace(0, np.nan)
return fcsts
def backTestExecutor(self, err_method: str) -> Dict[str, float]:
"""wrapper for back test executor
services which use KatsEnsemble need to write their own backtest wrapper
Args:
None
Returns:
The dict of backtesting results
"""
weights, _ = self._backtester_all(err_method=err_method)
return weights
def _fit_single(
self,
data: TimeSeriesData,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
model_func: Callable,
model_param: Params
# pyre-fixme[24]: Generic type `Model` expects 1 type parameter.
) -> Model:
"""Private method to fit individual model
Args:
data: the input time series data
model_func: the callable func to fit models
model_param: the corresponding model parameter class
Returns:
Fitted Kats model
"""
# get the model function call
m = model_func(params=model_param, data=data)
m.fit()
return m
def _backtester_single(
self,
params: Params,
# pyre-fixme[24]: Generic type `Model` expects 1 type parameter.
model_class: Type[Model],
train_percentage: int = 80,
test_percentage: int = 20,
err_method: str = "mape",
) -> float:
"""Private method to run single back testing process
Args:
params: Kats model parameters
model_class: Untyped. Defines type of model
train_percentage: float. Percentage of data used for training
test_percentage: float. Percentage of data used for testing
error_method: list of strings indicating which errors to calculate
we currently support "mape", "smape", "mae", "mase", "mse", "rmse"
Returns:
float, the backtesting error
"""
bt = BackTesterSimple(
[err_method],
self.data,
params,
train_percentage,
test_percentage,
model_class,
)
bt.run_backtest()
return bt.get_error_value(err_method)
def _backtester_all(
self,
err_method: str = "mape",
) -> Tuple[Dict[str, float], Dict[str, float]]:
"""Private method to run all backtesting process
Args:
error_method: list of strings indicating which errors to calculate
we currently support "mape", "smape", "mae", "mase", "mse", "rmse"
Returns:
Dict of errors from each model
"""
model_params = self.model_params
if model_params is None:
raise _logged_error("fit must be called before backtesting.")
num_process = min(len(MODELS.keys()), (cpu_count() - 1) // 2)
if num_process < 1:
num_process = 1
pool = multiprocessing.Manager().Pool(
processes=(num_process), maxtasksperchild=1000
)
backtesters = {}
for model in model_params.models:
backtesters[model.model_name] = pool.apply_async(
self._backtester_single,
args=(
model.model_params,
MODELS[model.model_name.split("_")[0].lower()],
),
kwds={"err_method": err_method},
)
pool.close()
pool.join()
self.errors = errors = {model: res.get() for model, res in backtesters.items()}
original_weights = {
model: 1 / (err + sys.float_info.epsilon) for model, err in errors.items()
}
weights = {
model: err / sum(original_weights.values())
for model, err in original_weights.items()
}
return weights, errors
|
ee0869ff9f07e01e58e70f23b93d2487cb19612d
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/find-critical-and-pseudo-critical-edges-in-minimum-spanning-tree.py
|
341d2f701f5033d357f190d33aae34e0e4f2c98e
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
find-critical-and-pseudo-critical-edges-in-minimum-spanning-tree.py
|
# Time: O(nlogn)
# Space: O(n)
class UnionFind(object):
def __init__(self, n):
self.set = range(n)
self.count = n
def find_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
self.set[max(x_root, y_root)] = min(x_root, y_root)
self.count -= 1
return True
class Solution(object):
def findCriticalAndPseudoCriticalEdges(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[List[int]]
"""
def MST(n, edges, unused=None, used=None):
union_find = UnionFind(n)
weight = 0
if used is not None:
u, v, w, _ = edges[used]
if union_find.union_set(u, v):
weight += w
for i, (u, v, w, _) in enumerate(edges):
if i == unused:
continue
if union_find.union_set(u, v):
weight += w
return weight if union_find.count == 1 else float("inf")
for i, edge in enumerate(edges):
edge.append(i)
edges.sort(key=lambda x: x[2])
mst = MST(n, edges)
result = [[], []]
for i, edge in enumerate(edges):
if mst < MST(n, edges, unused=i):
result[0].append(edge[3])
elif mst == MST(n, edges, used=i):
result[1].append(edge[3])
return result
|
44e3af5599164fd723a6fc660b9ec0f410941320
|
cd8a9ac708be8f559da1e9cadc1d5ad457226364
|
/mods/Paint.py
|
cfae7a23db04e74699c2f674a87c07c80cecbbed
|
[
"Unlicense"
] |
permissive
|
Mrmaxmeier/BombSquad-Community-Mod-Manager
|
078d0c95f326979989b8a9338c89724a08bcf077
|
aeba3f6858335aa08243c02ea29d4d4c807d43cc
|
refs/heads/master
| 2023-01-25T05:38:15.945963
| 2023-01-22T11:20:19
| 2023-01-22T11:20:19
| 28,673,893
| 143
| 155
|
Unlicense
| 2020-04-10T10:43:37
| 2014-12-31T19:54:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,252
|
py
|
Paint.py
|
#Canvas
import bs
import random
def bsGetAPIVersion():
return 4
def bsGetGames():
return [Paint]
def bsGetLevels():
return [bs.Level('Paint',
displayName='${GAME}',
gameType=Paint,
settings={},
previewTexName='courtyardPreview')]
class Dot(bs.Actor):
def __init__(self, position=(0,0,0), color=(0,0,0), radius=(.5)):
bs.Actor.__init__(self)
self._r1 = radius
if radius < 0: self._r1 = 0
self.position = position
self.color = color
n1 = bs.newNode('locator',attrs={'shape':'circle','position':position,
'color':self.color,'opacity':1,
'drawBeauty':True,'additive':True})
bs.animateArray(n1,'size',1,{0:[0.0],200:[self._r1*2.0]})
self._node = [n1]
class Artist(bs.PlayerSpaz):
def __init__(self, color=(1,1,1), highlight=(0.5,0.5,0.5), character="Spaz", sourcePlayer=None, startInvincible=True,
canAcceptPowerups=True, powerupsExpire=False, demoMode=False):
self._player = sourcePlayer
self.mode = 'Draw'
self.dotRadius = .5
self.red = True
self.blue = True
self.green = True
self.value = 1
self.color = [1.0,0.0,0.0]
bs.PlayerSpaz.__init__(self, color, highlight, character, sourcePlayer, powerupsExpire)
def onBombPress(self):
if self.mode == 'Draw':
self.dotRadius += .1
self.setScoreText("Radius: " + str(self.dotRadius), (1,1,1))
elif self.mode == "Color":
if self.color[0] >= 1:
if self.color[2] == 0: self.color[1] += .1
else: self.color[2] -= .1
if self.color[1] >= 1:
if self.color[0] == 0: self.color[2] += .1
else: self.color[0] -= .1
if self.color[2] >= 1:
if self.color[1] == 0: self.color[0] += .1
else: self.color[1] -= .1
for i in range(len(self.color)):
if self.color[i] < 0: self.color[i] = 0
if self.color[i] > 1: self.color[i] = 1
color = (self.color[0]*self.value, self.color[1]*self.value, self.color[2]*self.value)
self.setScoreText("COLOR", color)
def onPunchPress(self):
if self.mode == 'Draw':
self.dotRadius -= .1
if self.dotRadius < .05: self.dotRadius = 0
self.setScoreText("Radius: " + str(self.dotRadius), (1,1,1))
elif self.mode == "Color":
if self.color[0] >= 1:
if self.color[1] == 0: self.color[2] += .1
else: self.color[1] -= .1
if self.color[1] >= 1:
if self.color[2] == 0: self.color[0] += .1
else: self.color[2] -= .1
if self.color[2] >= 1:
if self.color[0] == 0: self.color[1] += .1
else: self.color[0] -= .1
for i in range(len(self.color)):
if self.color[i] < 0: self.color[i] = 0
if self.color[i] > 1: self.color[i] = 1
color = (self.color[0]*self.value, self.color[1]*self.value, self.color[2]*self.value)
self.setScoreText("COLOR", color)
def onJumpPress(self):
if self.mode == 'Draw':
color = (self.color[0]*self.value, self.color[1]*self.value, self.color[2]*self.value)
pos = (self.node.positionCenter[0], self.node.positionCenter[1]-2, self.node.positionCenter[2])
dot = Dot(position=pos, color = color, radius=self.dotRadius)
elif self.mode == "Color":
self.value += .1
if self.value > 1 : self.value = 0
self.setScoreText("Value: " + str(round(self.value,2)), (self.color[0]*self.value, self.color[1]*self.value, self.color[2]*self.value))
def onPickUpPress(self):
if self.mode == 'Draw': self.mode = 'Color'
elif self.mode == "Color": self.mode = "Draw"
self.setScoreText(self.mode + " Mode", (1,1,1))
class Paint(bs.CoopGameActivity):
@classmethod
def getName(cls):
return 'Paint'
@classmethod
def getScoreInfo(cls):
return {'scoreType':'points'}
@classmethod
def getDescription(cls,sessionType):
return 'Create a masterpiece.'
@classmethod
def getSupportedMaps(cls,sessionType):
return ['Doom Shroom']
@classmethod
def supportsSessionType(cls,sessionType):
return True if issubclass(sessionType,bs.CoopSession) else False
def __init__(self, settings):
bs.CoopGameActivity.__init__(self, settings)
self.info = bs.NodeActor(bs.newNode('text',
attrs={'vAttach': 'bottom',
'hAlign': 'center',
'vrDepth': 0,
'color': (0,.2,0),
'shadow': 1.0,
'flatness': 1.0,
'position': (0,0),
'scale': 0.8,
'text': "Created by MattZ45986 on Github",
}))
def onTransitionIn(self):
bs.CoopGameActivity.onTransitionIn(self,music='ForwardMarch')
def onBegin(self):
bs.CoopGameActivity.onBegin(self)
def spawnPlayerSpaz(self,player,position=(0,5,-3),angle=None):
name = player.getName()
color = player.color
highlight = player.highlight
spaz = Artist(color=color,
highlight=highlight,
character=player.character,
sourcePlayer=player)
player.setActor(spaz)
player.actor.connectControlsToPlayer()
spaz.handleMessage(bs.StandMessage((0,3,0),90))
|
fe621391ac7f38d59256b2fc2003d677dbd116c6
|
3420cceca75758b7d0b54da3a550456234bdbe05
|
/scripts/sgdml_datasets_from_model.py
|
5fc2a1a77c6bc0f588876d5fea3ab2fe8a3cae46
|
[
"MIT"
] |
permissive
|
stefanch/sGDML
|
8663c3791bd7521c6cc7e320f493d96240bc07f3
|
b8e88de09301a8b3c35b4aee390138dec1b42521
|
refs/heads/master
| 2023-08-18T23:27:19.739089
| 2023-06-08T12:49:46
| 2023-06-08T12:49:46
| 140,593,875
| 127
| 41
|
MIT
| 2023-08-31T09:40:09
| 2018-07-11T15:20:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
sgdml_datasets_from_model.py
|
#!/usr/bin/python
# MIT License
#
# Copyright (c) 2018 Stefan Chmiela
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
from sgdml.utils import io, ui
parser = argparse.ArgumentParser(
description='Extracts the training and test data subsets from a dataset that were used to construct a model.'
)
parser.add_argument(
'model',
metavar='<model_file>',
type=lambda x: io.is_file_type(x, 'model'),
help='path to model file',
)
parser.add_argument(
'dataset',
metavar='<dataset_file>',
type=lambda x: io.is_file_type(x, 'dataset'),
help='path to dataset file referenced in model',
)
parser.add_argument(
'-o',
'--overwrite',
dest='overwrite',
action='store_true',
help='overwrite existing files',
)
args = parser.parse_args()
model_path, model = args.model
dataset_path, dataset = args.dataset
for s in ['train', 'valid']:
if dataset['md5'] != model['md5_' + s]:
sys.exit(
ui.fail_str('[FAIL]')
+ ' Dataset fingerprint does not match the one referenced in model for \'%s\'.'
% s
)
idxs = model['idxs_' + s]
R = dataset['R'][idxs, :, :]
E = dataset['E'][idxs]
F = dataset['F'][idxs, :, :]
base_vars = {
'type': 'd',
'name': dataset['name'].astype(str),
'theory': dataset['theory'].astype(str),
'z': dataset['z'],
'R': R,
'E': E,
'F': F,
}
base_vars['md5'] = io.dataset_md5(base_vars)
subset_file_name = '%s_%s.npz' % (
os.path.splitext(os.path.basename(dataset_path))[0],
s,
)
file_exists = os.path.isfile(subset_file_name)
if file_exists and args.overwrite:
print(ui.info_str('[INFO]') + ' Overwriting existing model file.')
if not file_exists or args.overwrite:
np.savez_compressed(subset_file_name, **base_vars)
ui.callback(1, disp_str='Extracted %s dataset saved to \'%s\'' % (s, subset_file_name)) # DONE
else:
print(
ui.warn_str('[WARN]')
+ ' %s dataset \'%s\' already exists.' % (s.capitalize(), subset_file_name)
+ '\n Run \'python %s -o %s %s\' to overwrite.\n'
% (os.path.basename(__file__), model_path, dataset_path)
)
sys.exit()
|
33d1d1e4ecb6fa9e1427f35d1fd46bee148ee425
|
6630694f401f6f475dd81bb01ff9368db844ccff
|
/tests/test_models/test_backbones/test_tinyvit.py
|
9747b76b3a465069fe66eb41e0a51dac28f9bd5f
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpretrain
|
98a4d6b3bb747efc3d50decebf84fc3ffa41076a
|
d2ccc44a2c8e5d49bb26187aff42f2abc90aee28
|
refs/heads/main
| 2023-08-30T19:11:24.771498
| 2023-08-23T02:45:18
| 2023-08-23T02:45:18
| 278,415,292
| 652
| 186
|
Apache-2.0
| 2023-09-08T08:01:40
| 2020-07-09T16:25:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
test_tinyvit.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmpretrain.models.backbones import TinyViT
def test_assertion():
with pytest.raises(AssertionError):
TinyViT(arch='unknown')
with pytest.raises(AssertionError):
# MobileViT out_indices should be valid depth.
TinyViT(out_indices=-100)
def test_tinyvit():
# Test forward
model = TinyViT(arch='5m')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 320])
# Test forward with multiple outputs
model = TinyViT(arch='5m', out_indices=(0, 1, 2, 3))
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 128])
assert feat[1].shape == torch.Size([1, 160])
assert feat[2].shape == torch.Size([1, 320])
assert feat[3].shape == torch.Size([1, 320])
# Test with custom arch
model = TinyViT(
arch={
'depths': [2, 3, 4, 5],
'channels': [64, 128, 256, 448],
'num_heads': [4, 4, 4, 4]
},
out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 128])
assert feat[1].shape == torch.Size([1, 256])
assert feat[2].shape == torch.Size([1, 448])
assert feat[3].shape == torch.Size([1, 448])
# Test without gap before final norm
model = TinyViT(
arch='21m', out_indices=(0, 1, 2, 3), gap_before_final_norm=False)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 192, 28, 28])
assert feat[1].shape == torch.Size([1, 384, 14, 14])
assert feat[2].shape == torch.Size([1, 576, 7, 7])
assert feat[3].shape == torch.Size([1, 576, 7, 7])
# Test frozen_stages
model = TinyViT(arch='11m', out_indices=(0, 1, 2, 3), frozen_stages=2)
model.init_weights()
model.train()
for i in range(2):
assert not model.stages[i].training
for i in range(2, 4):
assert model.stages[i].training
|
61e35536f99a74ffdeabcffe73d0772e33ffa0e1
|
8110781934e5ac078ace2430aa5d8d4c91837562
|
/tests/tools/txt2tikztiming.py
|
9c6cd3a19e54acf9c65a48f0b6f477d9ff3505fb
|
[
"ISC",
"MIT",
"LicenseRef-scancode-other-copyleft",
"BSD-2-Clause"
] |
permissive
|
YosysHQ/yosys
|
91acb592cb47b8298b115572faa02edfa468c1f0
|
72bec94ef4f0ce8090f22c16cd5163b816e8c698
|
refs/heads/master
| 2023-09-01T17:46:13.846954
| 2023-09-01T08:15:51
| 2023-09-01T08:15:51
| 7,454,197
| 2,389
| 769
|
ISC
| 2023-09-13T15:43:31
| 2013-01-05T10:10:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,144
|
py
|
txt2tikztiming.py
|
#!/usr/bin/env python3
import argparse
import fileinput
import sys
parser = argparse.ArgumentParser(description='Convert vcd2txt output to tikz-timing line.')
parser.add_argument('filename', metavar='FILE', help='input txt file')
parser.add_argument('signame', metavar='SIG', help='Signal name')
parser.add_argument('-s', metavar='scale', default=1.0, type=float, help='Scale all time spans with this factor')
parser.add_argument('-l', action='store_true', help='Logic signal (high/low)')
parser.add_argument('-b', action='store_true', help='Display binary value')
parser.add_argument('-x', action='store_true', help='Display hex value')
parser.add_argument('-d', action='store_true', help='Display decimal value')
args = parser.parse_args()
start_time = None
stop_time = None
time_val = { }
def value_to_logic(value):
found_x = False
for char in value:
if char == '1':
return "H"
if char == 'x':
found_x = True
return "U" if found_x else "L"
def value_to_binary(value):
return "D{%s}" % value
def value_to_hex(value):
hex_string = ""
found_def = False
while len(value) % 4 != 0:
value = "0" + value
while len(value) != 0:
bin_digits = value[0:4]
hex_digit = 0
value = value[4:]
for b in bin_digits:
if b == '0':
hex_digit = hex_digit * 2
elif b == '1':
hex_digit = hex_digit * 2 + 1
else:
hex_digit += 100
if hex_digit > 15:
hex_string += "x"
else:
found_def = True
hex_string += "0123456789abcdef"[hex_digit]
if not found_def:
return "U";
return "D{%s}" % hex_string
def value_to_decimal(value):
val = 0
found_def = False
found_undef = False
for digit in value:
if digit == 'x':
found_undef = True
else:
val = val*2 + int(digit)
found_def = True
if found_def:
if found_undef:
return "D{X}"
else:
return "D{%d}" % val
return "U"
for line in fileinput.input(args.filename):
(node, time, name, value) = line.strip().split('\t')
time = int(time)
if start_time is None or start_time > time:
start_time = time
if stop_time is None or stop_time < time:
stop_time = time
if name == args.signame:
if args.l:
time_val[+time] = value_to_logic(value)
elif args.b:
time_val[+time] = value_to_binary(value)
elif args.x:
time_val[+time] = value_to_hex(value)
elif args.d:
time_val[+time] = value_to_decimal(value)
else:
time_val[+time] = value
if start_time not in time_val:
time_val[start_time] = "S"
last_time = None
last_value = None
for t in sorted(time_val.keys()):
if last_time is not None:
print("%f%s" % ((t - last_time)*args.s, last_value), end='')
(last_time, last_value) = (t, time_val[t])
if last_time < stop_time:
print("%f%s" % ((stop_time - last_time)*args.s, last_value), end='')
print('')
|
15ce364ac740f2d322f003355b9a5414991cc548
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowLispEthernetMapCachePrefix/cli/equal/golden_output2_expected.py
|
224c39f248a399c2d61e5f9a9bb1772bba502025
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
golden_output2_expected.py
|
expected_output = {
"lisp_id": {
0: {
"instance_id": {
8188: {
"eid_table": "Vlan 210",
"entries": 1,
"eid_prefix": {
"0017.0100.0001/48": {
"uptime": "01:10:28",
"expiry_time": "22:49:31",
"via": "map-reply",
"map_reply_state": "complete",
"prefix_location": "local-to-site",
"source_type": "map-reply",
"last_modified": "01:10:28,",
"source_ip": "1.1.1.10",
"prefix_state": "Active",
"encap": "dynamic-EID traffic",
"rloc_set": {
"1.1.1.10": {
"uptime": "01:10:28",
"rloc_state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-",
"last_state_change": {
"time": "01:10:28",
"count": 1,
},
"last_route_reach_change": {
"time": "01:10:28",
"count": 1,
},
"last_pri_weight_change": {
"priority": "never",
"weight": "never",
},
"rloc_probe_sent": {
"time": "01:10:28",
"rtt": 1,
"rtt_unit": "ms",
},
}
},
}
},
}
}
}
}
}
|
81ce63b46a70e72e76b10a5a40b1de97d3a494e9
|
41db6c672362ccafdd28af40ecf7df51ffa90a15
|
/guppy/heapy/test/test_gsl.py
|
b6714add5a5776ba9b4b5ab88a702b747ac76a39
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
zhuyifei1999/guppy3
|
e5649a066199a92dc3bf8e77bdd6ffb44e790449
|
1b2db87af36388e43afd6ce5774b869bcc4a9452
|
refs/heads/master
| 2023-06-24T23:03:52.612921
| 2023-06-17T01:18:06
| 2023-06-17T01:54:06
| 202,611,372
| 371
| 24
|
MIT
| 2023-06-17T01:54:09
| 2019-08-15T21:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
test_gsl.py
|
# Test the gsl subpackage
# Ideally this should be a top level test.
def test_main(debug=0):
from guppy import Root
gsl = Root().guppy.gsl
gsl.Document._test_main_()
gsl.DottedTree.test_main()
gsl.FileIO.set_test_mode()
gsl.Filer._test_main_()
gsl.Gsml._test_main_()
gsl.Main._test_main_()
gsl.SpecNodes.test_main()
# gsl.Text.test()
if __name__ == "__main__":
test_main()
|
830bf432012579c11a4cfadb60a58821706e30e9
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/confidentialledger/azure-confidentialledger/azure/confidentialledger/_patch.py
|
399b716b05f79f354afe8ea4498b9243013a53e9
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,013
|
py
|
_patch.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
import os
from typing import Any, List, Union
from azure.core.credentials import TokenCredential
from azure.core.pipeline import policies
from azure.confidentialledger._client import ConfidentialLedgerClient as GeneratedClient
from azure.confidentialledger.certificate import ConfidentialLedgerCertificateClient
__all__: List[str] = [
"ConfidentialLedgerCertificateCredential",
"ConfidentialLedgerClient",
] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.
`patch_sdk` is a last resort escape hatch that allows you to do customizations
you can't accomplish using the techniques described in
https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
class ConfidentialLedgerCertificateCredential:
"""A certificate-based credential for the ConfidentialLedgerClient.
:param certificate_path: Path to the PEM certificate to use for authentication.
:type certificate_path: Union[bytes, str, os.PathLike]
"""
def __init__(self, certificate_path: Union[bytes, str, os.PathLike]):
self._certificate_path = certificate_path
@property
def certificate_path(self) -> Union[bytes, str, os.PathLike]:
"""The path to the certificate file for this credential.
:return: The path to the certificate file for this credential.
:rtype: Union[bytes, str, os.PathLike]"""
return self._certificate_path
class ConfidentialLedgerClient(GeneratedClient):
"""The ConfidentialLedgerClient writes and retrieves ledger entries against the Confidential
Ledger service.
:param endpoint: The Confidential Ledger URL, for example
https://contoso.confidentialledger.azure.com.
:type endpoint: str
:param credential: A credential object for authenticating with the Confidential Ledger.
:type credential: Union[
~azure.confidentialledger.ConfidentialLedgerCertificateCredential,
~azure.core.credentials.TokenCredential]
:keyword ledger_certificate_path: The path to the Confidential Ledger's TLS certificate. If this
file does not exist yet, the Confidential Ledger's TLS certificate will be fetched and saved
to this file.
:paramtype ledger_certificate_path: Union[bytes, str, os.PathLike]
:keyword api_version: Api Version. Default value is "2022-05-13". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
endpoint: str,
credential: Union[ConfidentialLedgerCertificateCredential, TokenCredential],
*,
ledger_certificate_path: Union[bytes, str, os.PathLike],
**kwargs: Any,
) -> None:
# Remove some kwargs first so that there aren't unexpected kwargs passed to
# get_ledger_identity.
if isinstance(credential, ConfidentialLedgerCertificateCredential):
auth_policy = None
else:
credential_scopes = kwargs.pop("credential_scopes", ["https://confidential-ledger.azure.com/.default"])
auth_policy = kwargs.pop(
"authentication_policy",
policies.BearerTokenCredentialPolicy(credential, *credential_scopes, **kwargs),
)
if os.path.isfile(ledger_certificate_path) is False:
# We'll need to fetch the TLS certificate.
identity_service_client = ConfidentialLedgerCertificateClient(**kwargs)
# Ledger URIs are of the form https://<ledger id>.confidential-ledger.azure.com.
ledger_id = endpoint.replace("https://", "").split(".")[0]
ledger_cert = identity_service_client.get_ledger_identity(ledger_id, **kwargs)
with open(ledger_certificate_path, "w", encoding="utf-8") as outfile:
outfile.write(ledger_cert["ledgerTlsCertificate"])
# For ConfidentialLedgerCertificateCredential, pass the path to the certificate down to the
# PipelineCLient.
if isinstance(credential, ConfidentialLedgerCertificateCredential):
kwargs["connection_cert"] = kwargs.get("connection_cert", credential.certificate_path)
# The auto-generated client has authentication disabled so we can customize authentication.
# If the credential is the typical TokenCredential, then construct the authentication policy
# the normal way.
else:
kwargs["authentication_policy"] = auth_policy
# Customize the underlying client to use a self-signed TLS certificate.
kwargs["connection_verify"] = kwargs.get("connection_verify", ledger_certificate_path)
super().__init__(endpoint, **kwargs)
|
d12f2762dbdc7e90d78983c5c0c33ebb77339f75
|
d7fd46dfd8aab520c4958fa065367e168b6bfee7
|
/benchmarks/parallelization_load_test_test.py
|
35d73ad27b0fff729b0dd37e81f314a36dfa2915
|
[
"MIT"
] |
permissive
|
facebookresearch/CompilerGym
|
f04a79fbfdbaf8afd6920ec205db6f1b6003d073
|
9e0c0beb12da1e1ea82ae6ce920713ee28dda4c9
|
refs/heads/development
| 2023-08-31T09:17:48.967970
| 2023-03-10T19:29:56
| 2023-03-10T19:29:56
| 312,059,069
| 787
| 126
|
MIT
| 2023-03-10T19:29:58
| 2020-11-11T18:44:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
parallelization_load_test_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for //benchmarks:parallelization_load_test."""
from pathlib import Path
from absl import flags
from benchmarks.parallelization_load_test import main as load_test
from compiler_gym.util.capture_output import capture_output
from tests.pytest_plugins.common import set_command_line_flags, skip_on_ci
from tests.test_main import main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm", "tests.pytest_plugins.common"]
@skip_on_ci
def test_load_test(env, tmpwd):
del env # Unused.
del tmpwd # Unused.
set_command_line_flags(
[
"arv0",
"--env=llvm-v0",
"--benchmark=cbench-v1/crc32",
"--max_nproc=3",
"--nproc_increment=1",
"--num_steps=2",
"--num_episodes=2",
]
)
with capture_output() as out:
load_test(["argv0"])
assert "Run 1 threaded workers in " in out.stdout
assert "Run 1 process workers in " in out.stdout
assert "Run 2 threaded workers in " in out.stdout
assert "Run 2 process workers in " in out.stdout
assert "Run 3 threaded workers in " in out.stdout
assert "Run 3 process workers in " in out.stdout
assert Path("parallelization_load_test.csv").is_file()
if __name__ == "__main__":
main()
|
4bf8228d7babb3c4ef3e48fd7bf2db4ffa3af8a6
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_azure/c7n_azure/resources/cdn.py
|
373c6539df944c4269ca803a51f64d63a7662289
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
cdn.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.filters import Filter
from c7n.utils import type_schema
@resources.register('cdnprofile')
class CdnProfile(ArmResourceManager):
"""CDN Resource
:example:
Returns all CDNs with Standard_Verizon sku
.. code-block:: yaml
policies:
- name: standard-verizon
resource: azure.cdnprofile
filters:
- type: value
key: sku
op: in
value_type: normalize
value: Standard_Verizon
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Media']
service = 'azure.mgmt.cdn'
client = 'CdnManagementClient'
enum_spec = ('profiles', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'sku.name'
)
resource_type = 'Microsoft.Cdn/profiles'
@CdnProfile.filter_registry.register('waf')
class WebAppFirewallFilter(Filter):
"""Check waf enabled/disabled on cdn profiles
:example:
.. code-block:: yaml
policies:
- name: test-waf-not-enabled
resource: azure.cdnprofile
filters:
- type: waf
state: Disabled
"""
schema = type_schema('waf',required=['state'],
state={'type': 'string', 'enum': ['Enabled', 'Disabled']})
def process(self, resources, event=None):
client = self.manager.get_client()
matched = []
for profiles in resources:
policies = list(client.security_policies.list_by_profile(
profiles["resourceGroup"],profiles["name"]))
if (self.data.get('state') == 'Disabled' and not policies) or (self.data.get('state')
== 'Enabled' and policies):
matched.append(profiles)
return matched
|
69abe278aad4a93518ad7da0664237549f49bf95
|
2b74dd623e2fd02167b74c8d1df29e53a8c95ce9
|
/docs/source/examples/client_paged_search_results.py
|
8497a2a44aec9842e257044fe4cf9e6acf7836b1
|
[
"MIT"
] |
permissive
|
twisted/ldaptor
|
19d6e5d38832a804e01444a30e9b24f415b83f7a
|
4bfe2897c8b9b510d647fb1c2a5b50c88d492ab1
|
refs/heads/master
| 2023-08-31T12:29:40.539819
| 2022-02-04T11:09:23
| 2022-02-04T11:09:23
| 21,404,464
| 153
| 57
|
MIT
| 2022-07-28T18:51:00
| 2014-07-01T22:43:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,769
|
py
|
client_paged_search_results.py
|
#! /usr/bin/env python
import argparse
import sys
from twisted.internet import defer
from twisted.internet.endpoints import clientFromString, connectProtocol
from twisted.internet.task import react
from ldaptor.protocols.ldap.ldapclient import LDAPClient
from ldaptor.protocols.ldap.ldapsyntax import LDAPEntry
from ldaptor.protocols import pureber
@defer.inlineCallbacks
def onConnect(client, args):
binddn = args.bind_dn
bindpw = args.passwd_file.read().strip()
if args.start_tls:
yield client.startTLS()
try:
yield client.bind(binddn, bindpw)
except Exception as ex:
print(ex)
raise
page_size = args.page_size
cookie = ""
page = 1
count = 0
while True:
results, cookie = yield process_entry(
client, args, args.filter, page_size=page_size, cookie=cookie
)
count += len(results)
print(f"Page {page}")
display_results(results)
if len(cookie) == 0:
break
page += 1
print(f"There were {count} results returned in total.")
@defer.inlineCallbacks
def process_entry(client, args, search_filter, page_size=100, cookie=""):
basedn = args.base_dn
control_value = pureber.BERSequence(
[
pureber.BERInteger(page_size),
pureber.BEROctetString(cookie),
]
)
controls = [("1.2.840.113556.1.4.319", None, control_value)]
o = LDAPEntry(client, basedn)
results, resp_controls = yield o.search(
filterText=search_filter,
attributes=["dn"],
controls=controls,
return_controls=True,
)
cookie = get_paged_search_cookie(resp_controls)
defer.returnValue((results, cookie))
def display_results(results):
for entry in results:
print(entry.dn.getText())
def get_paged_search_cookie(controls):
"""
Input: semi-parsed controls list from LDAP response;
list of tuples (controlType, criticality, controlValue).
Parses the controlValue and returns the cookie as a byte string.
"""
control_value = controls[0][2]
ber_context = pureber.BERDecoderContext()
ber_seq, bytes_used = pureber.berDecodeObject(ber_context, control_value)
raw_cookie = ber_seq[1]
cookie = raw_cookie.value
return cookie
def onError(err):
err.printDetailedTraceback(file=sys.stderr)
def main(reactor, args):
endpoint_str = args.endpoint
e = clientFromString(reactor, endpoint_str)
d = connectProtocol(e, LDAPClient())
d.addCallback(onConnect, args)
d.addErrback(onError)
return d
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AD LDAP demo.")
parser.add_argument(
"endpoint",
action="store",
help="The Active Directory service endpoint. See "
"https://twistedmatrix.com/documents/current/core/howto/endpoints.html#clients",
)
parser.add_argument(
"bind_dn", action="store", help="The DN to BIND to the service as."
)
parser.add_argument(
"passwd_file",
action="store",
type=argparse.FileType("r"),
help="A file containing the password used to log into the service.",
)
parser.add_argument(
"base_dn", action="store", help="The base DN to start from when searching."
)
parser.add_argument("-f", "--filter", action="store", help="LDAP filter")
parser.add_argument(
"-p",
"--page-size",
type=int,
action="store",
default=100,
help="Page size (default 100).",
)
parser.add_argument(
"--start-tls",
action="store_true",
help="Request StartTLS after connecting to the service.",
)
args = parser.parse_args()
react(main, [args])
|
9a2297cd87580f230df294bed552c5c403e14a4b
|
1bc67a91d85a7106106ca31307ef9ee93f1d1a20
|
/src/py/flwr/client/secure_aggregation/secaggplus_handler.py
|
d05fd61878a3f303acd106831d5c2fba9a344a29
|
[
"Apache-2.0"
] |
permissive
|
adap/flower
|
4915d143c674eb675504d585e1e90ed06833812f
|
55be690535e5f3feb33c888c3e4a586b7bdbf489
|
refs/heads/main
| 2023-08-17T01:18:12.168723
| 2023-08-16T17:17:48
| 2023-08-16T17:17:48
| 241,095,326
| 2,999
| 658
|
Apache-2.0
| 2023-09-14T15:43:22
| 2020-02-17T11:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 18,910
|
py
|
secaggplus_handler.py
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Message handler for the SecAgg+ protocol."""
import os
from dataclasses import dataclass, field
from logging import ERROR, INFO, WARNING
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from flwr.client.client import Client
from flwr.client.numpy_client import NumPyClient
from flwr.common import (
bytes_to_ndarray,
ndarray_to_bytes,
ndarrays_to_parameters,
parameters_to_ndarrays,
)
from flwr.common.logger import log
from flwr.common.secure_aggregation.crypto.shamir import create_shares
from flwr.common.secure_aggregation.crypto.symmetric_encryption import (
bytes_to_private_key,
bytes_to_public_key,
decrypt,
encrypt,
generate_key_pairs,
generate_shared_key,
private_key_to_bytes,
public_key_to_bytes,
)
from flwr.common.secure_aggregation.ndarrays_arithmetic import (
factor_combine,
parameters_addition,
parameters_mod,
parameters_multiply,
parameters_subtraction,
)
from flwr.common.secure_aggregation.quantization import quantize
from flwr.common.secure_aggregation.secaggplus_constants import (
KEY_ACTIVE_SECURE_ID_LIST,
KEY_CIPHERTEXT_LIST,
KEY_CLIPPING_RANGE,
KEY_DEAD_SECURE_ID_LIST,
KEY_DESTINATION_LIST,
KEY_MASKED_PARAMETERS,
KEY_MOD_RANGE,
KEY_PARAMETERS,
KEY_PUBLIC_KEY_1,
KEY_PUBLIC_KEY_2,
KEY_SAMPLE_NUMBER,
KEY_SECURE_ID,
KEY_SECURE_ID_LIST,
KEY_SHARE_LIST,
KEY_SHARE_NUMBER,
KEY_SOURCE_LIST,
KEY_STAGE,
KEY_TARGET_RANGE,
KEY_THRESHOLD,
STAGE_COLLECT_MASKED_INPUT,
STAGE_SETUP,
STAGE_SHARE_KEYS,
STAGE_UNMASK,
STAGES,
)
from flwr.common.secure_aggregation.secaggplus_utils import (
pseudo_rand_gen,
share_keys_plaintext_concat,
share_keys_plaintext_separate,
)
from flwr.common.typing import FitIns, Value
from .handler import SecureAggregationHandler
@dataclass
# pylint: disable-next=too-many-instance-attributes
class SecAggPlusState:
"""State of the SecAgg+ protocol."""
sid: int = 0
sample_num: int = 0
share_num: int = 0
threshold: int = 0
clipping_range: float = 0.0
target_range: int = 0
mod_range: int = 0
# Secret key (sk) and public key (pk)
sk1: bytes = b""
pk1: bytes = b""
sk2: bytes = b""
pk2: bytes = b""
# Random seed for generating the private mask
rd_seed: bytes = b""
rd_seed_share_dict: Dict[int, bytes] = field(default_factory=dict)
sk1_share_dict: Dict[int, bytes] = field(default_factory=dict)
# The dict of the shared secrets from sk2
ss2_dict: Dict[int, bytes] = field(default_factory=dict)
public_keys_dict: Dict[int, Tuple[bytes, bytes]] = field(default_factory=dict)
client: Optional[Union[Client, NumPyClient]] = None
class SecAggPlusHandler(SecureAggregationHandler):
"""Message handler for the SecAgg+ protocol."""
_shared_state = SecAggPlusState()
_current_stage = STAGE_UNMASK
def handle_secure_aggregation(
self, named_values: Dict[str, Value]
) -> Dict[str, Value]:
"""Handle incoming message and return results, following the SecAgg+ protocol.
Parameters
----------
named_values : Dict[str, Value]
The named values retrieved from the SecureAggregation sub-message
of Task message in the server's TaskIns.
Returns
-------
Dict[str, Value]
The final/intermediate results of the SecAgg+ protocol.
"""
# Check if self is a client
if not isinstance(self, (Client, NumPyClient)):
raise TypeError(
"The subclass of SecAggPlusHandler must be "
"the subclass of Client or NumPyClient."
)
# Check the validity of the next stage
check_stage(self._current_stage, named_values)
# Update the current stage
self._current_stage = cast(str, named_values.pop(KEY_STAGE))
# Check the validity of the `named_values` based on the current stage
check_named_values(self._current_stage, named_values)
# Execute
if self._current_stage == STAGE_SETUP:
self._shared_state = SecAggPlusState(client=self)
return _setup(self._shared_state, named_values)
if self._current_stage == STAGE_SHARE_KEYS:
return _share_keys(self._shared_state, named_values)
if self._current_stage == STAGE_COLLECT_MASKED_INPUT:
return _collect_masked_input(self._shared_state, named_values)
if self._current_stage == STAGE_UNMASK:
return _unmask(self._shared_state, named_values)
raise ValueError(f"Unknown secagg stage: {self._current_stage}")
def check_stage(current_stage: str, named_values: Dict[str, Value]) -> None:
"""Check the validity of the next stage."""
# Check the existence of KEY_STAGE
if KEY_STAGE not in named_values:
raise KeyError(
f"The required key '{KEY_STAGE}' is missing from the input `named_values`."
)
# Check the value type of the KEY_STAGE
next_stage = named_values[KEY_STAGE]
if not isinstance(next_stage, str):
raise TypeError(
f"The value for the key '{KEY_STAGE}' must be of type {str}, "
f"but got {type(next_stage)} instead."
)
# Check the validity of the next stage
if next_stage == STAGE_SETUP:
if current_stage != STAGE_UNMASK:
log(WARNING, "Restart from the setup stage")
# If stage is not "setup",
# the stage from `named_values` should be the expected next stage
else:
expected_next_stage = STAGES[(STAGES.index(current_stage) + 1) % len(STAGES)]
if next_stage != expected_next_stage:
raise ValueError(
"Abort secure aggregation: "
f"expect {expected_next_stage} stage, but receive {next_stage} stage"
)
# pylint: disable-next=too-many-branches
def check_named_values(stage: str, named_values: Dict[str, Value]) -> None:
"""Check the validity of the input `named_values`."""
# Check `named_values` for the setup stage
if stage == STAGE_SETUP:
key_type_pairs = [
(KEY_SAMPLE_NUMBER, int),
(KEY_SECURE_ID, int),
(KEY_SHARE_NUMBER, int),
(KEY_THRESHOLD, int),
(KEY_CLIPPING_RANGE, float),
(KEY_TARGET_RANGE, int),
(KEY_MOD_RANGE, int),
]
for key, expected_type in key_type_pairs:
if key not in named_values:
raise KeyError(
f"Stage {STAGE_SETUP}: the required key '{key}' is "
"missing from the input `named_values`."
)
# Bool is a subclass of int in Python,
# so `isinstance(v, int)` will return True even if v is a boolean.
# pylint: disable-next=unidiomatic-typecheck
if type(named_values[key]) is not expected_type:
raise TypeError(
f"Stage {STAGE_SETUP}: The value for the key '{key}' "
f"must be of type {expected_type}, "
f"but got {type(named_values[key])} instead."
)
elif stage == STAGE_SHARE_KEYS:
for key, value in named_values.items():
if (
not isinstance(value, list)
or len(value) != 2
or not isinstance(value[0], bytes)
or not isinstance(value[1], bytes)
):
raise TypeError(
f"Stage {STAGE_SHARE_KEYS}: "
f"the value for the key '{key}' must be a list of two bytes."
)
elif stage == STAGE_COLLECT_MASKED_INPUT:
key_type_pairs = [
(KEY_CIPHERTEXT_LIST, bytes),
(KEY_SOURCE_LIST, int),
(KEY_PARAMETERS, bytes),
]
for key, expected_type in key_type_pairs:
if key not in named_values:
raise KeyError(
f"Stage {STAGE_COLLECT_MASKED_INPUT}: "
f"the required key '{key}' is "
"missing from the input `named_values`."
)
if not isinstance(named_values[key], list) or any(
elm
for elm in cast(List[Any], named_values[key])
# pylint: disable-next=unidiomatic-typecheck
if type(elm) is not expected_type
):
raise TypeError(
f"Stage {STAGE_COLLECT_MASKED_INPUT}: "
f"the value for the key '{key}' "
f"must be of type List[{expected_type.__name__}]"
)
elif stage == STAGE_UNMASK:
key_type_pairs = [
(KEY_ACTIVE_SECURE_ID_LIST, int),
(KEY_DEAD_SECURE_ID_LIST, int),
]
for key, expected_type in key_type_pairs:
if key not in named_values:
raise KeyError(
f"Stage {STAGE_UNMASK}: "
f"the required key '{key}' is "
"missing from the input `named_values`."
)
if not isinstance(named_values[key], list) or any(
elm
for elm in cast(List[Any], named_values[key])
# pylint: disable-next=unidiomatic-typecheck
if type(elm) is not expected_type
):
raise TypeError(
f"Stage {STAGE_UNMASK}: "
f"the value for the key '{key}' "
f"must be of type List[{expected_type.__name__}]"
)
else:
raise ValueError(f"Unknown secagg stage: {stage}")
def _setup(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, Value]:
# Assigning parameter values to object fields
sec_agg_param_dict = named_values
state.sample_num = cast(int, sec_agg_param_dict[KEY_SAMPLE_NUMBER])
state.sid = cast(int, sec_agg_param_dict[KEY_SECURE_ID])
log(INFO, "Client %d: starting stage 0...", state.sid)
state.share_num = cast(int, sec_agg_param_dict[KEY_SHARE_NUMBER])
state.threshold = cast(int, sec_agg_param_dict[KEY_THRESHOLD])
state.clipping_range = cast(float, sec_agg_param_dict[KEY_CLIPPING_RANGE])
state.target_range = cast(int, sec_agg_param_dict[KEY_TARGET_RANGE])
state.mod_range = cast(int, sec_agg_param_dict[KEY_MOD_RANGE])
# Dictionaries containing client secure IDs as keys
# and their respective secret shares as values.
state.rd_seed_share_dict = {}
state.sk1_share_dict = {}
# Dictionary containing client secure IDs as keys
# and their respective shared secrets (with this client) as values.
state.ss2_dict = {}
# Create 2 sets private public key pairs
# One for creating pairwise masks
# One for encrypting message to distribute shares
sk1, pk1 = generate_key_pairs()
sk2, pk2 = generate_key_pairs()
state.sk1, state.pk1 = private_key_to_bytes(sk1), public_key_to_bytes(pk1)
state.sk2, state.pk2 = private_key_to_bytes(sk2), public_key_to_bytes(pk2)
log(INFO, "Client %d: stage 0 completes. uploading public keys...", state.sid)
return {KEY_PUBLIC_KEY_1: state.pk1, KEY_PUBLIC_KEY_2: state.pk2}
# pylint: disable-next=too-many-locals
def _share_keys(
state: SecAggPlusState, named_values: Dict[str, Value]
) -> Dict[str, Value]:
named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], named_values)
key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()}
log(INFO, "Client %d: starting stage 1...", state.sid)
state.public_keys_dict = key_dict
# Check if the size is larger than threshold
if len(state.public_keys_dict) < state.threshold:
raise Exception("Available neighbours number smaller than threshold")
# Check if all public keys are unique
pk_list: List[bytes] = []
for pk1, pk2 in state.public_keys_dict.values():
pk_list.append(pk1)
pk_list.append(pk2)
if len(set(pk_list)) != len(pk_list):
raise Exception("Some public keys are identical")
# Check if public keys of this client are correct in the dictionary
if (
state.public_keys_dict[state.sid][0] != state.pk1
or state.public_keys_dict[state.sid][1] != state.pk2
):
raise Exception(
"Own public keys are displayed in dict incorrectly, should not happen!"
)
# Generate the private mask seed
state.rd_seed = os.urandom(32)
# Create shares for the private mask seed and the first private key
b_shares = create_shares(state.rd_seed, state.threshold, state.share_num)
sk1_shares = create_shares(state.sk1, state.threshold, state.share_num)
srcs, dsts, ciphertexts = [], [], []
# Distribute shares
for idx, (sid, (_, pk2)) in enumerate(state.public_keys_dict.items()):
if sid == state.sid:
state.rd_seed_share_dict[state.sid] = b_shares[idx]
state.sk1_share_dict[state.sid] = sk1_shares[idx]
else:
shared_key = generate_shared_key(
bytes_to_private_key(state.sk2),
bytes_to_public_key(pk2),
)
state.ss2_dict[sid] = shared_key
plaintext = share_keys_plaintext_concat(
state.sid, sid, b_shares[idx], sk1_shares[idx]
)
ciphertext = encrypt(shared_key, plaintext)
srcs.append(state.sid)
dsts.append(sid)
ciphertexts.append(ciphertext)
log(INFO, "Client %d: stage 1 completes. uploading key shares...", state.sid)
return {KEY_DESTINATION_LIST: dsts, KEY_CIPHERTEXT_LIST: ciphertexts}
# pylint: disable-next=too-many-locals
def _collect_masked_input(
state: SecAggPlusState, named_values: Dict[str, Value]
) -> Dict[str, Value]:
log(INFO, "Client %d: starting stage 2...", state.sid)
available_clients: List[int] = []
ciphertexts = cast(List[bytes], named_values[KEY_CIPHERTEXT_LIST])
srcs = cast(List[int], named_values[KEY_SOURCE_LIST])
if len(ciphertexts) + 1 < state.threshold:
raise Exception("Not enough available neighbour clients.")
# Decrypt ciphertexts, verify their sources, and store shares.
for src, ciphertext in zip(srcs, ciphertexts):
shared_key = state.ss2_dict[src]
plaintext = decrypt(shared_key, ciphertext)
actual_src, dst, rd_seed_share, sk1_share = share_keys_plaintext_separate(
plaintext
)
available_clients.append(src)
if src != actual_src:
raise ValueError(
f"Client {state.sid}: received ciphertext "
f"from {actual_src} instead of {src}."
)
if dst != state.sid:
ValueError(
f"Client {state.sid}: received an encrypted message"
f"for Client {dst} from Client {src}."
)
state.rd_seed_share_dict[src] = rd_seed_share
state.sk1_share_dict[src] = sk1_share
# Fit client
parameters_bytes = cast(List[bytes], named_values[KEY_PARAMETERS])
parameters = [bytes_to_ndarray(w) for w in parameters_bytes]
if isinstance(state.client, Client):
fit_res = state.client.fit(
FitIns(parameters=ndarrays_to_parameters(parameters), config={})
)
parameters_factor = fit_res.num_examples
parameters = parameters_to_ndarrays(fit_res.parameters)
elif isinstance(state.client, NumPyClient):
parameters, parameters_factor, _ = state.client.fit(parameters, {})
else:
log(ERROR, "Client %d: fit function is missing.", state.sid)
# Quantize parameter update (vector)
quantized_parameters = quantize(
parameters, state.clipping_range, state.target_range
)
quantized_parameters = parameters_multiply(quantized_parameters, parameters_factor)
quantized_parameters = factor_combine(parameters_factor, quantized_parameters)
dimensions_list: List[Tuple[int, ...]] = [a.shape for a in quantized_parameters]
# Add private mask
private_mask = pseudo_rand_gen(state.rd_seed, state.mod_range, dimensions_list)
quantized_parameters = parameters_addition(quantized_parameters, private_mask)
for client_id in available_clients:
# Add pairwise masks
shared_key = generate_shared_key(
bytes_to_private_key(state.sk1),
bytes_to_public_key(state.public_keys_dict[client_id][0]),
)
pairwise_mask = pseudo_rand_gen(shared_key, state.mod_range, dimensions_list)
if state.sid > client_id:
quantized_parameters = parameters_addition(
quantized_parameters, pairwise_mask
)
else:
quantized_parameters = parameters_subtraction(
quantized_parameters, pairwise_mask
)
# Take mod of final weight update vector and return to server
quantized_parameters = parameters_mod(quantized_parameters, state.mod_range)
log(INFO, "Client %d: stage 2 completes. uploading masked parameters...", state.sid)
return {
KEY_MASKED_PARAMETERS: [ndarray_to_bytes(arr) for arr in quantized_parameters]
}
def _unmask(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, Value]:
log(INFO, "Client %d: starting stage 3...", state.sid)
active_sids = cast(List[int], named_values[KEY_ACTIVE_SECURE_ID_LIST])
dead_sids = cast(List[int], named_values[KEY_DEAD_SECURE_ID_LIST])
# Send private mask seed share for every avaliable client (including itclient)
# Send first private key share for building pairwise mask for every dropped client
if len(active_sids) < state.threshold:
raise Exception("Available neighbours number smaller than threshold")
sids, shares = [], []
sids += active_sids
shares += [state.rd_seed_share_dict[sid] for sid in active_sids]
sids += dead_sids
shares += [state.sk1_share_dict[sid] for sid in dead_sids]
log(INFO, "Client %d: stage 3 completes. uploading key shares...", state.sid)
return {KEY_SECURE_ID_LIST: sids, KEY_SHARE_LIST: shares}
|
6095df6d4bde6ebdfbd99069d0504eeee6051e35
|
932ea8ba9b39bc2f37347d04d74c618f1bd37535
|
/lm_val_fns.py
|
8b34d1b47c6fd165f57c62315884f5407eb26c90
|
[] |
no_license
|
sgugger/Adam-experiments
|
03224552defb1c8ff45e98c1e3eaff53f5535af5
|
b1ede770e6403e3d555d80280538def63e69a8da
|
refs/heads/master
| 2020-03-22T04:26:49.166001
| 2018-09-05T17:30:03
| 2018-09-05T17:30:03
| 139,499,459
| 191
| 36
| null | 2018-09-05T17:30:04
| 2018-07-02T22:03:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
lm_val_fns.py
|
from fastai.text import *
class TextReader():
""" Returns a language model iterator that iterates through batches that are of length N(bptt,5)
The first batch returned is always bptt+25; the max possible width. This is done because of they way that pytorch
allocates cuda memory in order to prevent multiple buffers from being created as the batch width grows.
"""
def __init__(self, nums, bptt, backwards=False):
self.bptt,self.backwards = bptt,backwards
self.data = self.batchify(nums)
self.i,self.iter = 0,0
self.n = len(self.data)
def __iter__(self):
self.i,self.iter = 0,0
while self.i < self.n-1 and self.iter<len(self):
res = self.get_batch(self.i, self.bptt)
self.i += self.bptt
self.iter += 1
yield res
def __len__(self): return self.n // self.bptt
def batchify(self, data):
data = np.array(data)[:,None]
if self.backwards: data=data[::-1]
return T(data)
def get_batch(self, i, seq_len):
source = self.data
seq_len = min(seq_len, len(source) - 1 - i)
return source[i:i+seq_len], source[i+1:i+1+seq_len].view(-1)
def my_validate(model, source, bptt=2000):
"""
Return the validation loss and perplexity of a model
model: model to test
source: data on which to evaluate the mdoe
bptt: bptt for this evaluation (doesn't change the result, only the speed)
"""
data_source = TextReader(source, bptt)
model.eval()
model.reset()
total_loss = 0.
for inputs, targets in tqdm(data_source):
outputs, raws, outs = model(V(inputs))
p_vocab = F.softmax(outputs,1)
for i, pv in enumerate(p_vocab):
targ_pred = pv[targets[i]]
total_loss -= torch.log(targ_pred.detach())
mean = total_loss / (bptt * len(data_source))
return mean, np.exp(mean)
def one_hot1(vec, size):
a = torch.zeros(len(vec), size)
for i,v in enumerate(vec):
a[i,v] = 1.
return V(a)
def my_cache_pointer(model, source, vocab_size, scale=1, theta = 0.662, lambd = 0.1279, window=3785, bptt=2000):
data_source = TextReader(source, bptt)
model.eval()
model.reset()
total_loss = 0.
targ_history = None
hid_history = None
for inputs, targets in tqdm(data_source):
outputs, raws, outs = model(V(inputs))
p_vocab = F.softmax(outputs * scale,1)
start = 0 if targ_history is None else targ_history.size(0)
targ_history = one_hot1(targets, vocab_size) if targ_history is None else torch.cat([targ_history, one_hot1(targets, vocab_size)])
hiddens = raws[-1].squeeze() #results of the last layer + remove the batch size.
hid_history = hiddens * scale if hid_history is None else torch.cat([hid_history, hiddens * scale])
for i, pv in enumerate(p_vocab):
#Get the cached values
p = pv
if start + i > 0:
targ_cache = targ_history[:start+i] if start + i <= window else targ_history[start+i-window:start+i]
hid_cache = hid_history[:start+i] if start + i <= window else hid_history[start+i-window:start+i]
all_dot_prods = torch.mv(theta * hid_cache, hiddens[i])
exp_dot_prods = F.softmax(all_dot_prods).unsqueeze(1)
p_cache = (exp_dot_prods.expand_as(targ_cache) * targ_cache).sum(0).squeeze()
p = (1-lambd) * pv + lambd * p_cache
targ_pred = p[targets[i]]
total_loss -= torch.log(targ_pred.detach())
targ_history = targ_history[-window:]
hid_history = hid_history[-window:]
mean = total_loss / (bptt * len(data_source))
return mean, np.exp(mean)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.