text stringlengths 957 885k |
|---|
<reponame>flatearthws/nearest-satellites<gh_stars>0
#!/usr/bin/env python
import ephem
from datetime import datetime, timedelta
from math import cos, sqrt
from operator import itemgetter
import statistics
# TLE file
tlefile = 'tle.txt'
# ISS name in the TLE file
refbodyname = '0 ISS (ZARYA)'
# start of analysis (90 days in the past)
starttime = datetime.utcnow() - timedelta(90)
# end of analysis (180 days after starting time)
endtime = starttime + timedelta(180)
# sampling rate
resdelta = timedelta(seconds = 60)
# resdelta = timedelta(days = 2)
startprocess = datetime.utcnow()
bodies = []
refbody = ''
with open(tlefile) as f:
l0 = ''
l1 = ''
l2 = ''
body = ''
for line in f:
line = line.strip()
if line.startswith('0'):
l0 = line
elif line.startswith('1'):
l1 = line
elif line.startswith('2'):
if 'SOYUZ' in l0 or 'PROGRESS' in l0:
break
l2 = line
body = ephem.readtle(l0, l1, l2)
if l0 == refbodyname:
refbody = body
else:
bodies.append(body)
nearests = []
curtime = starttime
while curtime <= endtime:
curtimes = curtime.strftime('%Y/%m/%d %H:%M:%S')
print('== processing ' + curtimes)
observer = ephem.Observer()
observer.lon = 0
observer.lat = 0
observer.elevation = -6371000 # center of the earth
observer.date = curtimes
# minimize refraction:
observer.temp = -272
observer.pressure = 0
refbody.compute(observer)
nearestdistance = 99999999999999999999999999999
nearestname = ''
for body in bodies:
try:
body.compute(observer)
angle = float(repr(ephem.separation(refbody, body)))
a = refbody.range
b = body.range
distance = sqrt(a**2 + b**2 - 2*a*b*cos(angle))
if distance < nearestdistance:
nearestdistance = distance
nearestname = body.name
except:
pass
print(nearestdistance, nearestname)
nearests.append([nearestdistance, nearestname, curtimes])
curtime += resdelta
nearests.sort(key=itemgetter(0), reverse=True)
uniquenearest = {}
for distance in nearests:
uniquenearest[distance[1]] = [distance[0], distance[2]]
# sumdistance = ndistance = 0
distances = []
for distance in nearests:
distances.append(distance[0])
mean = statistics.mean(distances)
stdev = statistics.stdev(distances)
pstdev = statistics.pstdev(distances)
pvariance = statistics.pvariance(distances)
variance = statistics.variance(distances)
endprocess = datetime.utcnow()
print()
print('RESULTS: ')
print('start of procesing: ', startprocess)
print('end of procesing: ', endprocess)
print('start of simulation: ', starttime)
print('end of simulation: ', endtime)
print('sampling rate: ', resdelta)
print('mean: ', mean)
print('stddev: ', stdev)
print('pstddev: ', pstdev)
print('variance: ', variance)
print('pvariance: ', pvariance)
print('n: ', len(distances))
print('total satellites: ', len(bodies)-1)
print('list of nearest satellites:')
i=0
for key, value in sorted(uniquenearest.items(), key=lambda item: (item[1][0],item[0])):
print("%s: %s, %s" % (value[0], key, value[1]))
i += 1
if i>100:
break
|
"""
Filesystem file tree
"""
import filecmp
import hashlib
import itertools
import os
import pathlib
from datetime import datetime
from zoneinfo import ZoneInfo
from magic import Magic
from .exceptions import FilesystemError
from .patterns import match_path_patterns
from .utils import current_umask
#: Files and directories never included in tree scans
SKIPPED_PATHS = [
'.DocumentRevisions-V100',
'.Spotlight-V100',
'.TemporaryItems',
'.Trashes',
'.fseventsd',
'.metadata_never_index',
'TheVolumeSettingsFolder',
]
#: Skipped hash algoritms (do not implement common call format)
SKIPPED_CHECKSUMS = (
'shake_128',
'shake_256',
)
# Default timezone for local filesystem timestamp parsing
DEFAULT_TIMEZONE = ZoneInfo('UTC')
#: Default checksum hash algorithm
DEFAULT_CHECKSUM = 'sha256'
#: Default block size when calculating file checksums
DEFAULT_CHECKSUM_BLOCK_SIZE = 2**20
class TreeItem(pathlib.Path):
"""
File items in a tree
Extends pathlib.Path with some properties and utility methods
"""
# pylint: disable=protected-access
_flavour = pathlib._windows_flavour if os.name == 'nt' else pathlib._posix_flavour
__checksums__ = {}
@property
def gid(self):
"""
Return st_gid
"""
return self.lstat().st_gid
@property
def uid(self):
"""
Return st_uid
"""
return self.lstat().st_uid
@property
def atime(self):
"""
Return st_atime as UTC datetime
"""
return datetime.fromtimestamp(self.lstat().st_atime).astimezone(DEFAULT_TIMEZONE)
@property
def ctime(self):
"""
Return st_ctime as UTC datetime
"""
return datetime.fromtimestamp(self.lstat().st_ctime).astimezone(DEFAULT_TIMEZONE)
@property
def mtime(self):
"""
Return st_mtime as UTC datetime
"""
return datetime.fromtimestamp(self.lstat().st_mtime).astimezone(DEFAULT_TIMEZONE)
@property
def size(self):
"""
Return st_size
"""
return self.lstat().st_size
@property
def magic(self):
"""
Return file magic string
"""
try:
with Magic() as handle:
return handle.id_filename(str(self))
except Exception as error:
raise FileExistsError(f'Error reading file magic from {self}: {error}') from error
def __get_cached_checksum__(self, algorithm):
"""
Get cached checksum if st_mtime is not changed
"""
try:
cached_item = self.__checksums__[algorithm]
if cached_item['st_mtime'] != self.lstat().st_mtime:
del self.__checksums__[algorithm]
return None
return cached_item['hex_digest']
except KeyError:
return None
def checksum(self, algorithm=DEFAULT_CHECKSUM, block_size=DEFAULT_CHECKSUM_BLOCK_SIZE):
"""
Calculate hex digest for file with specified checksum algorithm
"""
if algorithm in SKIPPED_CHECKSUMS:
raise FilesystemError(f'Calculating {algorithm} not supported')
if not self.is_file() or not self.exists():
raise FilesystemError(f'No such file: {self}')
cached_checksum = self.__get_cached_checksum__(algorithm)
if cached_checksum is not None:
return cached_checksum
try:
hash_callback = getattr(hashlib, algorithm)()
except AttributeError as error:
raise FilesystemError(f'Unexpected algorithm: {algorithm}') from error
with self.open('rb') as filedescriptor:
while True:
chunk = filedescriptor.read(block_size)
if not chunk:
break
hash_callback.update(chunk)
hex_digest = hash_callback.hexdigest()
self.__checksums__[algorithm] = {
'st_mtime': self.lstat().st_mtime,
'hex_digest': hex_digest
}
return hex_digest
class Tree(pathlib.Path):
"""
Extend pathlib.Path to use for filesystem tree processing
"""
__directory_loader_class__ = None
"""Tree item loader for directories"""
__file_loader_class__ = None
"""Tree item loader class for files"""
# pylint: disable=protected-access
_flavour = pathlib._windows_flavour if os.name == 'nt' else pathlib._posix_flavour
# pylint: disable=redefined-builtin
# pylint: disable=unused-argument
def __init__(self, path, create_missing=False, sorted=True, mode=None, excluded=None): # noqa
self.excluded = self.__configure_excluded__(excluded)
self.sorted = sorted # noqa
if create_missing and not self.exists():
self.create(mode)
self.__items__ = None
self.__iter_items__ = None
self.__iter_child__ = None
self.__iterator__ = None
self.reset()
def __repr__(self):
return str(self)
@staticmethod
def __configure_excluded__(excluded):
"""
Merge excluded with skipped paths
"""
if excluded is None:
excluded = []
for skipped in SKIPPED_PATHS:
if skipped not in excluded:
excluded.append(skipped)
return excluded
@property
def __directory_loader__(self):
"""
Get loader class for subdirectory items
"""
if self.__directory_loader_class__ is not None:
return self.__directory_loader_class__
return self.__class__
@property
def __file_loader__(self):
"""
Get loader class for file items
"""
if self.__file_loader_class__ is not None:
return self.__file_loader_class__
return TreeItem
def __getitem__(self, path):
"""
Get cached path item by path
"""
if not self.__items__:
list(self)
if isinstance(path, pathlib.Path):
path = str(path)
return self.__items__[path]
def __iter__(self):
return self
def __load_tree__(self, item):
"""
Load sub directory
"""
# pylint: disable=not-callable
return self.__directory_loader__(item, sorted=self.sorted, excluded=self.excluded)
def __load_file__(self, item):
"""
Load file item
"""
# pylint: disable=not-callable
return self.__file_loader__(item)
# pylint: disable=too-many-branches
def __next__(self):
"""
Walk tree items recursively, returning Tree or Path objects
Tree is walked depth first. If self.sorted is set, Tree items are sorted
before iterating.
"""
if not self.__items__:
self.__iter_child__ = None
self.__items__ = {}
if self.sorted:
try:
items = sorted(self.iterdir())
except FileNotFoundError as error:
raise FilesystemError(f'{error}') from error
else:
try:
items = self.iterdir()
except FileNotFoundError as error:
raise FilesystemError(f'{error}') from error
self.__iter_items__ = []
for item in items:
if self.is_excluded(item):
continue
if item.is_dir():
item = self.__load_tree__(item)
else:
item = self.__load_file__(item)
self.__items__[str(item)] = item
self.__iter_items__.append(item)
self.__iterator__ = itertools.chain(self.__iter_items__)
try:
if self.__iter_child__ is not None:
try:
item = next(self.__iter_child__)
if str(item) not in self.__items__:
if item.is_dir():
item = self.__load_tree__(item)
else:
item = self.__load_file__(item)
self.__items__[str(item)] = item
return item
except StopIteration:
self.__iter_child__ = None
item = next(self.__iterator__)
if item.is_dir():
item = self.__load_tree__(item)
self.__iter_child__ = item
self.__items__[str(self.__iter_child__)] = self.__iter_child__
else:
item = self.__load_file__(item)
return item
except StopIteration as stop:
self.__iterator__ = itertools.chain(self.__iter_items__)
self.__iter_child__ = None
raise StopIteration from stop
@property
def is_empty(self):
"""
Check if tree is empty
"""
return list(self) == []
def is_excluded(self, item):
"""
Check if item is excluded
"""
if item.name in self.excluded:
return True
if match_path_patterns(self.excluded, self, item.name):
return True
return False
def reset(self):
"""
Result cached items loaded to the tree
"""
self.__items__ = None
self.__iter_items__ = None
self.__iter_child__ = None
self.__iterator__ = None
def resolve(self, strict=False):
"""
Return correct type of tree from pathlib.Path.resolve() parent method
"""
return self.__class__(path=super().resolve(strict), sorted=self.sorted, excluded=self.excluded)
def create(self, mode=None):
"""
Create directory
Raises FilesystemError if path already exists or creation failed.
"""
if self.exists():
if self.is_dir():
raise FilesystemError(f'Directory already exists: {self}')
raise FilesystemError(f'File with path already exists: {self}')
try:
if mode is None:
value = current_umask() ^ int('777', 8)
mode = f'{value:04o}'
if isinstance(mode, str):
mode = int(mode, 8)
if not 0 <= mode <= 4095:
raise ValueError('Invalid mode value')
except ValueError as error:
raise FilesystemError(f'Error parsing filesystem mode value as octal {mode}: {error}') from error
try:
self.mkdir(mode)
except OSError as error:
raise FilesystemError(f'Error creating directory {self}: {error}') from error
def filter(self, patterns=None, extensions=None): # noqa
"""
Filter specified name patterns from tree
Patterns can be either a glob pattern or list of glob patterns
"""
return TreeSearch(self, list(self)).filter(patterns, extensions)
def exclude(self, patterns):
"""
Exclude specified name patterns from tree
Patterns can be either a glob pattern or list of glob patterns
"""
return TreeSearch(self, list(self)).exclude(patterns)
def remove(self, recursive=False):
"""
Remove tree
"""
if not recursive and not self.is_empty:
raise FilesystemError(f'Tree is not empty: {self}')
for item in list(self):
if not item.exists():
continue
if isinstance(item, self.__directory_loader__):
item.remove(recursive)
else:
item.unlink()
self.rmdir()
def diff(self, other):
"""
Run simple diff using filecmp.cmp against files in other tree, returning differences in files
and files missing from either directory
Returns three lists with:
- list of files with differing contents
- files missing from this tree
- files missing from other tree
"""
if isinstance(other, str):
other = Tree(other, sorted=self.sorted, excluded=self.excluded)
missing_self = []
missing_other = []
different = []
for item in self:
path = other.joinpath(item.relative_to(self))
if item.is_dir() or path.is_dir():
if item.is_dir() and path.is_file():
missing_self.append(path)
if item.is_file() and path.is_dir():
missing_other.append(path)
elif path.exists():
if not filecmp.cmp(str(item), str(path), shallow=False):
different.append(path)
else:
missing_other.append(path)
for item in other:
path = self.joinpath(item.relative_to(other))
if item.is_dir() or path.is_dir():
if item.is_dir() and path.is_file():
missing_other.append(path)
if item.is_file() and path.is_dir():
missing_self.append(path)
elif not path.exists():
missing_self.append(path)
return different, missing_self, missing_other
class TreeSearch(list):
"""
Chainable tree search results
"""
def __init__(self, tree, items):
self.tree = tree
super().__init__(items)
def filter(self, patterns=None, extensions=None): # noqa
"""
Match specified patterns from matched items
"""
if isinstance(extensions, str):
extensions = extensions.split(',')
if isinstance(patterns, str):
patterns = [patterns]
matches = []
for item in self:
if extensions and item.suffix in extensions:
matches.append(item)
if patterns and match_path_patterns(patterns, self.tree, item):
matches.append(item)
return self.__class__(self.tree, matches)
def exclude(self, patterns):
"""
Exclude specified patterns from matched items
"""
if isinstance(patterns, str):
patterns = [patterns]
matches = []
for item in self:
if not match_path_patterns(patterns, self.tree, item):
matches.append(item)
return self.__class__(self.tree, matches)
|
<reponame>nutti/Introduction-to-Addon-Development-in-Blender-Web
import bpy
from bpy.props import FloatProperty, EnumProperty
bl_info = {
"name": "サンプル 2-3: オブジェクトを並進移動するアドオン②",
"author": "ぬっち(Nutti)",
"version": (3, 0),
"blender": (2, 80, 0),
"location": "3Dビューポート > オブジェクト",
"description": "アクティブなオブジェクトを並進移動するサンプルアドオン(移動量、移動軸 任意指定版)",
"warning": "",
"support": "TESTING",
"doc_url": "",
"tracker_url": "",
"category": "Object"
}
# オブジェクトを並進移動するオペレータ
class SAMPLE23_OT_TranslateObject(bpy.types.Operator):
bl_idname = "object.sample23_translate_object"
bl_label = "並進移動"
bl_description = "アクティブなオブジェクトを並進移動します"
bl_options = {'REGISTER', 'UNDO'}
# @include-source start [prop_translate_object]
axis: EnumProperty(
name="移動軸",
description="移動軸を設定します",
default='X',
items=[
('X', "X軸", "X軸に沿って並進移動します"),
('Y', "Y軸", "Y軸に沿って並進移動します"),
('Z', "Z軸", "Z軸に沿って並進移動します"),
]
)
amount: FloatProperty(
name="移動量",
description="移動量を設定します",
default=1.0,
)
# @include-source end [prop_translate_object]
# メニューを実行したときに呼ばれるメソッド
def execute(self, context):
active_obj = context.active_object
# @include-source start [access_to_prop]
if self.axis == 'X':
active_obj.location[0] += self.amount
elif self.axis == 'Y':
active_obj.location[1] += self.amount
elif self.axis == 'Z':
active_obj.location[2] += self.amount
# @include-source end [access_to_prop]
self.report({'INFO'}, "サンプル 2-3: 『{}』を{}軸方向へ {} 並進移動しました。"
.format(active_obj.name, self.axis, self.amount))
print("サンプル 2-3: オペレータ『{}』が実行されました。".format(self.bl_idname))
return {'FINISHED'}
def menu_fn(self, context):
self.layout.separator()
self.layout.operator(SAMPLE23_OT_TranslateObject.bl_idname)
classes = [
SAMPLE23_OT_TranslateObject,
]
def register():
for c in classes:
bpy.utils.register_class(c)
bpy.types.VIEW3D_MT_object.append(menu_fn)
print("サンプル 2-3: アドオン『サンプル 2-3』が有効化されました。")
def unregister():
bpy.types.VIEW3D_MT_object.remove(menu_fn)
for c in classes:
bpy.utils.unregister_class(c)
print("サンプル 2-3: アドオン『サンプル 2-3』が無効化されました。")
if __name__ == "__main__":
register()
|
<filename>interactive_text_to_sql/src/utils/link_util.py
# coding: utf-8
import json
from typing import List
from src.utils.utils import lemma_token
STOP_WORD_LIST = [_.strip() for _ in open('data/common/stop_words.txt', 'r', encoding='utf-8').readlines()]
def align_two_sentences_in_token_level(token_list1, token_list2, stop_word_list=[]):
token_list1 = [(word, idx) for idx, word in enumerate(token_list1) if word not in stop_word_list]
token_list2 = [(word, idx) for idx, word in enumerate(token_list2) if word not in stop_word_list]
def find_exact_match_pairs_from_two_sentences(word_list1, word_list2):
pairs = []
for word1, idx1 in word_list1:
for word2, idx2 in word_list2:
if word1 == word2:
word_list2.remove((word2, idx2))
pairs.append((word1, idx1, word2, idx2))
return pairs
exact_match = find_exact_match_pairs_from_two_sentences(token_list1, token_list2)
exact_match_tokens_idx1 = [_[1] for _ in exact_match]
exact_match_tokens_idx2 = [_[3] for _ in exact_match]
sentence1_lemma = [(lemma_token(word), idx) for word, idx in token_list1 if idx not in exact_match_tokens_idx1]
sentence2_lemma = [(lemma_token(word), idx) for word, idx in token_list2 if idx not in exact_match_tokens_idx2]
lemma_match = find_exact_match_pairs_from_two_sentences(sentence1_lemma, sentence2_lemma)
lemma_match_tokens_idx1 = [_[1] for _ in lemma_match]
lemma_match_tokens_idx2 = [_[3] for _ in lemma_match]
return exact_match + lemma_match
def find_keyword_alignment_by_rule(nl_tokens: List, keyword: str, stop_word_list: List = STOP_WORD_LIST,
only_one_match: bool = False, aligned_mark: List[bool] = None):
aligned_results = []
position_pairs = set()
# step0: eliminate stop words, but keep position info
keyword = keyword.split()
for stop_word in stop_word_list:
if stop_word in keyword:
keyword = keyword.remove(stop_word)
keyword_lemma = [lemma_token(_) for _ in keyword]
informative_token_pairs = []
informative_token_lemma_pairs = []
for pos, token in enumerate(nl_tokens):
if token not in stop_word_list:
informative_token_pairs.append((token, pos))
informative_token_lemma_pairs.append((lemma_token(token), pos))
if not aligned_mark:
aligned_mark = [False for _ in range(len(informative_token_pairs))]
# step1: exact match
for i in range(len(informative_token_pairs) - len(keyword) + 1):
if only_one_match and True in aligned_mark[i: i + len(keyword)]:
continue
st_position = informative_token_pairs[i][1]
ed_position = informative_token_pairs[i + len(keyword) - 1][1]
if [_[0] for _ in informative_token_pairs[i: i + len(keyword)]] == keyword \
and (st_position, ed_position) not in position_pairs:
aligned_results.append((st_position, ed_position, 'exact', keyword))
position_pairs.add((st_position, ed_position))
if only_one_match:
for j in range(i, i + len(keyword)):
aligned_mark[j] = True
# step2: lemma exactly match
for i in range(len(informative_token_lemma_pairs) - len(keyword_lemma) + 1):
if only_one_match and True in aligned_mark[i: i + len(keyword_lemma)]:
continue
st_position = informative_token_lemma_pairs[i][1]
ed_position = informative_token_lemma_pairs[i + len(keyword) - 1][1]
if [_[0] for _ in informative_token_lemma_pairs[i: i + len(keyword_lemma)]] == keyword_lemma \
and (st_position, ed_position) not in position_pairs:
aligned_results.append((st_position, ed_position, 'exact lemma', keyword))
position_pairs.add((st_position, ed_position))
if only_one_match:
for j in range(i, i + len(keyword_lemma)):
aligned_mark[j] = True
def check_in(utterance_span, keyword_tokens):
return len(set(utterance_span) & set(keyword_tokens)) == len(utterance_span) and len(keyword_tokens) <= 3
# step3: partial match
for i in range(len(informative_token_pairs) - len(keyword) + 1):
st_position = informative_token_pairs[i][1]
for end_idx in reversed(range(i + 1, len(informative_token_pairs))):
if only_one_match and True in aligned_mark[i: end_idx]:
continue
sub_tokens = [_[0] for _ in informative_token_pairs[i:end_idx]]
if not sub_tokens:
continue
else:
ed_position = informative_token_pairs[end_idx - 1][1]
if check_in(sub_tokens, keyword):
aligned_results.append((st_position, ed_position, 'partial', keyword))
if only_one_match:
for j in range(i, end_idx):
aligned_mark[j] = True
# step4: lemma partial match
for i in range(len(informative_token_lemma_pairs) - len(keyword) + 1):
for end_idx in reversed(range(i + 1, len(informative_token_lemma_pairs))):
if only_one_match and True in aligned_mark[i: end_idx]:
continue
sub_tokens = [_[0] for _ in informative_token_lemma_pairs[i:end_idx]]
if not sub_tokens:
continue
else:
if check_in(sub_tokens, keyword):
aligned_results.append((informative_token_lemma_pairs[i][1],
informative_token_lemma_pairs[end_idx - 1][1],
'partial lemma', keyword))
if only_one_match:
for j in range(i, end_idx):
aligned_mark[j] = True
return aligned_results, aligned_mark
def find_alignment_by_rule(nl_tokens: List, table_names: List, column_names: List, values: List, only_one_match=False):
aligned_mark = None
# step1: find value match
value_matches = []
for value in values:
value_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, value, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
value_matches += value_match
# step2: find table match
table_matches = []
for table_name in table_names:
table_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, table_name, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
table_matches += table_match
# step3 find column match
column_matches = []
for column_name in column_names:
column_match, aligned_mark = \
find_keyword_alignment_by_rule(nl_tokens, column_name, STOP_WORD_LIST,
only_one_match=only_one_match, aligned_mark=aligned_mark)
column_matches += column_match
alignment_results = {'value': value_matches, 'table': table_matches, 'column': column_matches}
return alignment_results
def test():
nl_tokens = 'show me the name of all English songs and their singers'.split()
table_names = ['singer', 'song']
column_names = ['singer name', 'song name', 'age', 'year']
values = ['English', 'Show time']
ret = find_alignment_by_rule(nl_tokens, table_names, column_names, values, only_one_match=False)
print(json.dumps(ret, indent=4))
if __name__ == '__main__':
test()
|
<filename>lib/bullseye.py
import copy
import math
import scipy
import scipy.spatial
import numpy as np
from skimage import measure
def mask2sectors(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors):
"""
Split myocardium to num_sectors sectors
Input :
endo_mask : [RO, E1], mask for endo
epi_mask : [RO, E1], mask for epi
rv_mask : [RO, E1], mask for rv
rvi_mask : [RO, E1], mask for rv insertion mask, can be None; if not None, rv_mask is not used
Output :
sectors : [RO, E1] sector mask, sector 1 is labelled as value 1
"""
def get_angle(a, b):
# angle from a to b (rotate a to b)
# positve angle for counter-clock wise
# 0-360 degrees
v1_theta = math.atan2(a[1], a[0])
v2_theta = math.atan2(b[1], b[0])
r = (v2_theta - v1_theta) * (180.0 / math.pi)
if r < 0:
r += 360.0
return r
def img_to_xy(rvi_, _, e1_):
return rvi_[1], e1_ - 1 - rvi_[0]
img_height, img_width = endo_mask.shape
# find lv center
endo_pts = np.argwhere(endo_mask > 0)
lv_center = np.mean(endo_pts, axis=0)
lv_center2 = img_to_xy(lv_center, img_height, img_width)
# find rv center
if rv_mask is not None:
rv_pts = np.argwhere(rv_mask > 0)
rv_center = np.mean(rv_pts, axis=0)
else:
if rvi_mask is None:
raise ValueError("Both rv_mask and rvi_mask are None")
rvi_pts = np.argwhere(rvi_mask > 0)
rvi_pt = np.mean(rvi_pts, axis=0)
dist = np.linalg.norm(rvi_pt - lv_center)
if rvi_pt[1] < lv_center[1]:
rv_center = lv_center
rv_center[1] -= 2 * dist
rv_center[0] += dist
else:
rv_center = lv_center
rv_center[0] -= 2 * dist
rv_center[1] -= dist
rv_center2 = img_to_xy(rv_center, img_height, img_width)
rv_vec = (rv_center2[0] - lv_center2[0], rv_center2[1] - lv_center2[1])
# find rvi
if rvi_mask is None:
num_rv_pts = rv_pts.shape[0]
rvi = np.zeros((1, 2))
max_angle = 0
for pt in range(num_rv_pts):
pt2 = img_to_xy((rv_pts[pt, 0], rv_pts[pt, 1]), img_height, img_width)
rv_pt_vec = (pt2[0] - lv_center2[0], pt2[1] - lv_center2[1])
rv_rvi_angle = get_angle(rv_pt_vec, rv_vec)
if 180 >= rv_rvi_angle > max_angle:
max_angle = rv_rvi_angle
rvi[0, 0] = rv_pts[pt, 0]
rvi[0, 1] = rv_pts[pt, 1]
else:
rvi = np.argwhere(rvi_mask > 0)
rvi2 = img_to_xy((rvi[0, 0], rvi[0, 1]), img_height, img_width)
# split endo/epi to sectors
rvi_vec = (rvi2[0] - lv_center2[0], rvi2[1] - lv_center2[1])
rv_rvi_angle = get_angle(rv_vec, rvi_vec)
delta_rvi_angle = 360 / num_sectors
sectors = np.zeros(endo_mask.shape)
myo_mask = epi_mask - endo_mask
myo_pts = np.argwhere(myo_mask > 0)
n_myo_pts = myo_pts.shape[0]
angle_myo_pts = np.zeros(n_myo_pts)
for n in range(n_myo_pts):
myo_pts_xy = img_to_xy(myo_pts[n, :], img_height, img_width)
angle_myo_pts[n] = get_angle(rvi_vec, (myo_pts_xy[0] - lv_center2[0], myo_pts_xy[1] - lv_center2[1]))
if rv_rvi_angle >= 180: # rotate rvi clock wise
angle_myo_pts[n] = 360 - angle_myo_pts[n]
sector_no = np.floor(angle_myo_pts[n] / delta_rvi_angle) + 1
if sector_no == 1:
sectors[myo_pts[n, 0], myo_pts[n, 1]] = sector_no
else:
sectors[myo_pts[n, 0], myo_pts[n, 1]] = num_sectors + 2 - sector_no
return sectors
def smooth_contours(contour_x, contour_y, n_components=24, circularise=False, n_pts=2000):
""" takes contour_x,contour_y the cartesian coordinates of a contour,
then procdues a smoothed more circular contour smoothed_contour_x,smoothed_contour_y"""
if n_components is None:
n_components = 12 # slightly arbitary number, but seems to work well
npts = n_pts + 1
contour_pts = np.transpose(np.stack([contour_x, contour_y]))
if circularise:
# get the contour points that form a convex hull
hull = scipy.spatial.ConvexHull(contour_pts)
to_sample = hull.vertices
else:
to_sample = range(0, len(contour_x))
# wrap around cirlce
to_sample = np.hstack([to_sample, to_sample[0]])
sample_pts = contour_pts[to_sample, :]
# sample each curve at uniform distances according to arc length parameterisation
dist_between_pts = np.diff(sample_pts, axis=0)
cumulative_distance = np.sqrt(dist_between_pts[:, 0] ** 2 + dist_between_pts[:, 1] ** 2)
cumulative_distance = np.insert(cumulative_distance, 0, 0, axis=0)
cumulative_distance = np.cumsum(cumulative_distance)
cumulative_distance = cumulative_distance / cumulative_distance[-1]
contour_x = np.interp(np.linspace(0, 1, npts), cumulative_distance, sample_pts[:, 0], period=360)
contour_y = np.interp(np.linspace(0, 1, npts), cumulative_distance, sample_pts[:, 1], period=360)
contour_x = contour_x[:-1]
contour_y = contour_y[:-1]
# smooth out contour by keeping the lowest nkeep Fourier components
n = len(contour_x)
n_filt = n - n_components - 1
f = np.fft.fft(contour_x)
f[int(n / 2 + 1 - n_filt / 2):int(n / 2 + n_filt / 2)] = 0.0
smoothed_contour_x = np.abs(np.fft.ifft(f))
f = np.fft.fft(contour_y)
f[int(n / 2 + 1 - n_filt / 2):int(n / 2 + n_filt / 2)] = 0.0
smoothed_contour_y = np.abs(np.fft.ifft(f))
return smoothed_contour_x, smoothed_contour_y
def extract_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):
"""Extract contours from segmentation mask or probability map
Inputs:
preds : [RO E1], input mask or probablity map
thres : threshold to extract contours, a 2D marching cube extration is performed
smoothing : True or False, if true, contours are smoothed
num_components_smoothing : number of fft components kept after smoothing
circular : True or False, if true, contours are kept to approx. circle
Outputs:
contours : a list of contours, every contour is a nx2 numpy array
"""
contours = measure.find_contours(preds, thres)
len_contours = list()
for n, contour in enumerate(contours):
len_contours.append(contours[n].shape[0])
if smoothing:
s_c = copy.deepcopy(contours)
for n, contour in enumerate(contours):
sc_x, sc_y = smooth_contours(contour[:, 0],
contour[:, 1],
n_components=num_components_smoothing,
circularise=circular,
n_pts=n_pts)
s_c[n] = np.zeros((sc_x.shape[0], 2))
s_c[n][:, 0] = sc_x
s_c[n][:, 1] = sc_y
contours = copy.deepcopy(s_c)
return contours, len_contours
def extract_epi_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):
"""Extract myocardium epi contours from segmentation mask or probability map
Inputs:
preds : [RO E1], input mask or probablity map
thres : threshold to extract contours, a 2D marching cube extration is performed
smoothing : True or False, if true, contours are smoothed
num_components_smoothing : number of fft components kept after smoothing
circular : True or False, if true, contours are kept to approx. circle
Outputs:
epi : a nx2 numpy array for epi contour
"""
contours, len_contour = extract_contours(preds, thres, smoothing, num_components_smoothing, circular, n_pts)
num_c = len(contours)
epi = None
if num_c == 0:
return epi
if num_c == 1:
epi = contours[0]
return epi
if num_c > 1:
# find the longest contours as epi
c_len = np.zeros([num_c])
for n, contour in enumerate(contours):
c_len[n] = len_contour[n]
c_ind = np.argsort(c_len)
epi = contours[c_ind[-1]]
return epi
def compute_bullseye_sector_mask_for_slice(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors=None):
"""
Compute sector masks for single slice
Input :
endo_mask, epi_mask, rv_mask, rvi_mask : [RO, E1]
rvi_mask can be all zeros. In this case, rv_mask is used
num_sectors : 6, but should be for 4 apex
Output :
sectors : [RO, E1], sector mask. For 6 sectors, its values are 1, 2, 3, 4, 5, 6. background is 0.
sectors_32 : [RO, E1], sector mask for endo and epi.
For 6 EPI sectors, its values are 1-6. background is 0.
For ENDO sectors, it is 7-12
"""
rvi_pt = np.argwhere(rvi_mask > 0)
has_rvi = True
if (rvi_pt is None) or (rvi_pt.shape[0] == 0):
print("Cannot find rvi point, image must be in CMR view ... ")
endo_mask = np.transpose(endo_mask, [1, 0, 2])
epi_mask = np.transpose(epi_mask, [1, 0, 2])
rv_mask = np.transpose(rv_mask, [1, 0, 2])
has_rvi = False
img_height, img_width = endo_mask.shape
# refine epi
m = np.zeros((img_height, img_width))
m[np.where(epi_mask > 0)] = 1
m[np.where(endo_mask > 0)] = 1
epi_mask_2 = m
# get contours
contours_endo = extract_epi_contours(endo_mask,
thres=0.5,
smoothing=True,
num_components_smoothing=36,
circular=False,
n_pts=2000)
contours_epi = extract_epi_contours(epi_mask_2,
thres=0.95,
smoothing=True,
num_components_smoothing=36,
circular=False,
n_pts=2000)
# split sectors
rvi_pt = np.argwhere(rvi_mask > 0)
if rvi_pt is None:
raise ValueError("Cannot find rv insertion point")
# split 16 sectors
sectors = mask2sectors(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors)
# split 32 sectors
endo_kd = scipy.spatial.KDTree(contours_endo)
epi_kd = scipy.spatial.KDTree(contours_epi)
myo = np.copy(sectors)
max_myo = np.max(myo)
pts = np.where(myo > 0)
n_pts = pts[0].shape[0]
pts_2 = np.zeros((n_pts, 2))
pts_2[:, 0] = pts[0]
pts_2[:, 1] = pts[1]
d_endo, i_endo = endo_kd.query(pts_2)
d_epi, i_epi = epi_kd.query(pts_2)
for p in range(n_pts):
if d_epi[p] > d_endo[p]:
myo[pts[0][p], pts[1][p]] = myo[pts[0][p], pts[1][p]] + max_myo
sectors_32 = myo
if (rvi_pt is None) or (rvi_pt.shape[0] == 0):
sectors = np.transpose(sectors, [1, 0, 2])
sectors_32 = np.transpose(sectors_32, [1, 0, 2])
return sectors, sectors_32
|
config='''import os, sys, re, clr, math
try:
dll_dir='C:/Program Files/AnsysEM/AnsysEM19.3/Win64/common/IronPython/DLLs'
if not os.path.isdir(dll_dir):
raise Exception
except:
m=re.search('(.*Win64)', __file__)
dll_dir=m.group(1)+'/common/IronPython/DLLs'
finally:
sys.path.append(dll_dir)
clr.AddReference('IronPython.Wpf')
import wpf
from System.Windows import Window
os.chdir(os.path.dirname(__file__))
'''
exec(config)
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.SetActiveEditor("3D Modeler")
#Code Start-----------------------------------
class line():
def __init__(self, v0, v1):
self.v0=v0
self.v1=v1
self.l=[v1[0]-v0[0],v1[1]-v0[1],v1[2]-v0[2]]
self.length=math.sqrt(sum([i*i for i in self.l]))
def _move(self, dl):
if dl>self.length:
return False
v0, v1=self.v0, self.v1
t=dl/self.length
v0=(v0[0]+t*self.l[0], v0[1]+t*self.l[1], v0[2]+t*self.l[2])
self.v0=v0
self.l=[v1[0]-v0[0],v1[1]-v0[1],v1[2]-v0[2]]
self.length=math.sqrt(sum([i*i for i in self.l]))
return True
def getLoc(self, dl, offset):
result=[]
if self._move(offset):
result.append(self.v0)
else:
return result, round(self.length,15)
while self._move(dl):
result.append(self.v0)
else:
return result, round(self.length,15)
def getLocations(x, dl):
results=[]
surplus=dl
for i in range(len(x)-1):
y=line(x[i], x[i+1])
locs, surplus=y.getLoc(dl, dl-surplus)
results+=locs
return results
def polylinePoints(plines):
points=[]
for pline in plines:
for i in oEditor.GetVertexIDsFromObject(pline):
u=oEditor.GetVertexPosition(i)
px = map(float, u)
if points and px == points[-1]:
pass
else:
points.append(px)
p0=points[0]
points=[[i[0]-p0[0], i[1]-p0[1], i[2]-p0[2]] for i in points[0:]]
return points
class MyWindow(Window):
def __init__(self):
wpf.LoadComponent(self, 'DuplicateAlongPath.xaml')
oDesktop.ClearMessages("", "", 2)
try:
selections=oEditor.GetSelections()
self.path.Text=str(selections[1:])
self.objects.Text=selections[0]
self.selections=selections
except:
raise Exception('Please Select "object" and then "polyline"!')
def duplicate_Click(self, sender, e):
objects=self.objects
path=self.path
pitch=self.pitch
unit=oEditor.GetModelUnits()
points=polylinePoints(self.selections[1:])
if not self.onVertex.IsChecked:
points=getLocations(points, float(self.pitch.Text))
for u in points[1:]:
oEditor.Copy(["NAME:Selections","Selections:=",self.objects.Text])
name=oEditor.Paste()
for j in name:
oEditor.Move(["NAME:Selections","Selections:=",j,"NewPartsModelFlag:=","Model"],["NAME:TranslateParameters","TranslateVectorX:=","{}{}".format(u[0], unit),"TranslateVectorY:=","{}{}".format(u[1], unit),"TranslateVectorZ:=","{}{}".format(u[2], unit)])
#Code End-------------------------------------
MyWindow().ShowDialog()
|
<filename>prdl/prdl.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from download import download
import eyed3
from eyed3.id3 import ID3_V2_4
from mutagen.mp3 import MP3
from mutagen.id3 import ID3, APIC, error
import hashlib
import os
from slugify import slugify
import requests
import urllib
import urllib.request
import json
import validators
from clint.textui import puts, colored
from PIL import Image
from lxml import etree
import logging
class PrDlLoggingClass(object):
def __init__(self):
self.log = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(name)s] - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.log.addHandler(ch)
self.log.debug("Logger zainicjowany")
class PrDlPodcast(PrDlLoggingClass):
def __init__(self, podcast, track_number = None):
self.url = podcast['url']
self.uid = podcast['uid']
self.article_url = podcast['article_url']
self.title = podcast['title']
self.url_hash = self._getUrlHash()
self.description = podcast['description']
self.file_name = self.getFileName()
self.file_size = 0
self.thumbnail_url = podcast['thumb']
self.thumbnail_delete_after = False
self.thumbnail_default_fn = self.getDefaultThumbnail()
self.setThumbnailFileName()
self.track_number = track_number
super().__init__()
def _getUrlHash(self):
url_hash = hashlib.md5(self.url.encode("utf-8")).hexdigest()
return str(url_hash[0:20])
def getFileName(self, suffix=''):
file_name = slugify(self.title.replace('ł', 'l').replace('Ł', 'L'))
if len(file_name) > 100:
file_name = file_name[0:100]
if len(file_name) == 0:
file_name = self.url_hash
if suffix:
file_name = file_name + "_" + str(suffix)
file_name = file_name + ".mp3"
return file_name
def setThumbnailFileName(self):
if self.thumbnail_url:
expr = self.thumbnail_url.split(".")[-1]
if expr.find("?"):
expr = expr.split("?")[0]
if expr == 'file':
expr = 'jpg'
self.thumbnail_mime = 'image/jpeg'
self.thumbnail_delete_after = True
self.thumbnail_file_name = self.url_hash + "." + expr
else:
self.thumbnail_delete_after = False
self.thumbnail_file_name = self.thumbnail_default_fn
def getDefaultThumbnail(self):
self.thumbnail_mime = 'image/jpg'
tpath = os.path.realpath(__file__).split('/')
tpath.pop()
tpath.append('polskieradio_logo_cover.jpg')
tpath = '/'.join(tpath)
return tpath
def downloadThumbnail(self):
fpath = os.getcwd() + '/' + str(self.thumbnail_file_name).strip()
if (os.path.isfile(fpath)):
os.remove(fpath)
if self.thumbnail_url:
urllib.request.urlretrieve(self.thumbnail_url, fpath)
size = (200, 200)
image = Image.open(fpath)
image.thumbnail(size, Image.ANTIALIAS)
background = Image.open(self.thumbnail_default_fn)
background.paste(
image, (int((size[0] - image.size[0]) / 2), int((size[1] - image.size[1]) / 2))
)
background.save(self.thumbnail_file_name)
def addThumbnail(self):
if (os.path.isfile(self.file_name)):
try:
audio = MP3(self.file_name, ID3=ID3)
if (os.path.isfile(self.thumbnail_file_name)):
thumbf = open(self.thumbnail_file_name, 'rb')
audio.tags.add(
APIC(
encoding=3,
mime=self.thumbnail_mime,
type=3,
desc='Cover',
data=thumbf.read()
)
)
audio.save()
if self.thumbnail_delete_after:
os.remove(self.thumbnail_file_name)
except Exception as e:
self.log.error(e)
def id3tag(self):
if (os.path.isfile(self.file_name)):
audiofile = eyed3.load(self.file_name)
if audiofile:
audiofile.tag = eyed3.id3.Tag()
audiofile.tag.file_info = eyed3.id3.FileInfo(self.file_name)
comments = "{}\nUrl artykułu: {}\nUrl pliku mp3: {}\n\nPobrane przy pomocy skryptu https://github.com/bohdanbobrowski/pr-dl".format(
self.description,
self.article_url,
self.url
)
audiofile.tag.comments.set(comments)
audiofile.tag.artist = "Polskie Radio"
audiofile.tag.album = "polskieradio.pl"
audiofile.tag.genre = "Speech"
audiofile.tag.title = self.title
audiofile.tag.audio_file_url = self.url
if self.track_number:
audiofile.tag.track_num = self.track_number
audiofile.tag.save(version=eyed3.id3.ID3_DEFAULT_VERSION,encoding='utf-8')
else:
if audiofile is None:
os.remove(self.file_name)
if os.path.isfile(self.thumbnail_file_name):
os.remove(self.thumbnail_file_name)
class PrDl(PrDlLoggingClass):
def __init__(self, phrase, save_all = False, forced_search = False):
self.phrase = phrase.lower()
self.forced_search = forced_search
self.save_all = save_all
super().__init__()
def getKey(self):
if os.name == 'nt':
from msvcrt import getch
ch = getch()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def confirmSave(self, answer):
if (answer == 1):
return 1
else:
puts(colored.red("Czy zapisać podcast? ([t]ak / [n]ie / [z]akoncz)"))
key = self.getKey()
if key == 'z' or key == 'Z':
self.log.info("Przerwano na polecenie użytkownika")
exit()
if key == 't' or key == 'T':
return 1
else:
return 0
def get_resource_path(self, rel_path):
dir_of_py_file = os.path.dirname(__file__)
rel_path_to_resource = os.path.join(dir_of_py_file, rel_path)
abs_path_to_resource = os.path.abspath(rel_path_to_resource)
return abs_path_to_resource
def downloadPodcast(self, podcast, current=0, total=0):
self.log.debug("Znaleziono podcast [{}/{}]: {}".format(current, podcast, podcast))
podcast = PrDlPodcast(podcast, track_number=current)
puts(colored.blue('[' + str(current) + '/' + str(total) + ']'))
puts(colored.white('Tytuł: ' + podcast.title, bold=True))
puts(colored.white('Link: ' + podcast.url))
puts(colored.white('Plik: ' + podcast.file_name))
if podcast.thumbnail_url:
puts(colored.white('Miniaturka: ' + podcast.thumbnail_url))
x = 1
while os.path.isfile(podcast.file_name):
podcast.file_name = podcast.getFileName(x)
x += 1
else:
if (self.confirmSave(self.save_all) == 1):
download(podcast.url, './' + podcast.file_name)
podcast.id3tag()
podcast.downloadThumbnail()
podcast.addThumbnail()
def getWebPageContent(self, url):
response = requests.get(url)
return response.text
class PrDlSearch(PrDl):
def getFiles(self, results):
# Najpierw szukam w responseach plików dźwiekowych
files = {}
for r in results:
crawl = PrDlCrawl("https://www.polskieradio.pl{}".format(r['url']), self.save_all)
files_on_page = crawl.getPodcastsList()
if r['image']:
default_thumb = "https:{}".format(r['image'])
for f in files_on_page:
if not files_on_page[f]['thumb']:
files_on_page[f]['thumb'] = default_thumb
for f in files_on_page:
if not self.forced_search or self.phrase in files_on_page[f]['title'].lower():
files[f] = files_on_page[f]
return files
def _get_search_url(self, page =1):
search_url = 'https://portalsearch.polskieradio.pl/api/search?pageSize=50&page=' + str(page) + '&query=%' + urllib.parse.quote(self.phrase.replace(" ", "+")) + '%22'
self.log.info("Pobieram: {}".format(search_url))
return search_url
def downloadPodcastsList(self, podcasts):
a = 1
for k in podcasts:
self.downloadPodcast(podcasts[k], a, len(podcasts))
a += 1
def start(self):
response = json.loads(urllib.request.urlopen(self._get_search_url()).read())
pages = round(int(response['count']) / int(response['pageSize']))
podcasts = self.getFiles(response['results'])
self.downloadPodcastsList(podcasts)
if pages > 1:
for p in range(2, pages):
self.log.info("Strona {} z {}:".format(p, pages))
response = json.loads(urllib.request.urlopen(self._get_search_url(p)).read())
podcasts = self.getFiles(response['results'])
self.downloadPodcastsList(podcasts)
class PrDlCrawl(PrDl):
def __init__(self, url, save_all = False):
self.url = url
self.save_all = save_all
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.DEBUG)
def getFilename(self, title):
fname = title
title = title.replace('"', '"')
title = title.replace('""', '"')
for x in ['"','„','”','…','?','(',')']:
fname = fname.replace(x, '')
for y in [':',' ','/','"','.',',']:
fname = fname.replace(y, '_')
while '__' in fname:
fname = fname.replace('__', '_')
while '_-_' in fname:
fname = fname.replace('_-_', '-')
fname = fname.strip('_')
return fname
def getArticles(self, html_dom):
""" Get articles - web page parts, with attached podcasts
"""
articles = html_dom.xpath("//article")
articles += html_dom.xpath("//div[contains(@class, 'atarticle')]")
return articles
def getThumb(self, html_dom, art):
thumb = None
try:
thumb = "https:"+art.xpath(".//img[contains(@class, 'NoRightBtn')]")[0].get("src")
except Exception:
pass
if thumb is None:
try:
thumb = html_dom.xpath(".//span[contains(@class, 'img')]")[0].get("style")
thumb = thumb.replace('background-image:url(', 'https:')
thumb = thumb.replace(');', '')
except Exception:
pass
if thumb and validators.url(thumb):
return thumb
else:
return None
def getPodcasts(self, html_dom, article_url = ''):
result = {}
html_title = html_dom.xpath("//title")[0].text.strip()
for art in self.getArticles(html_dom):
podcast = art.xpath(".//*[@data-media]")
thumb = self.getThumb(html_dom, art)
for p in podcast:
try:
pdata_media = json.loads(p.attrib.get('data-media'))
uid = hashlib.md5(pdata_media['file'].encode('utf-8')).hexdigest()
try:
title = art.xpath(".//h1[contains(@class, 'title')]")[0].text.strip()
if not title:
title = art.xpath(".//h1[contains(@class, 'title')]/a")[0].text.strip()
except Exception:
title = html_title + " - " + urllib.parse.unquote(pdata_media['title']).strip()
try:
description = urllib.parse.unquote(pdata_media['desc']).strip()
except Exception:
description = title
result[uid] = {
'url': "https:" + pdata_media['file'],
'uid': uid,
'article_url': article_url,
'title': title,
'description': description,
'fname': self.getFilename(title),
'thumb': thumb
}
except Exception as e:
self.log.error(e)
return result
def getPodcastsList(self):
self.log.info("Analizowany url: {}".format(self.url))
downloads_list = []
try:
html = self.getWebPageContent(self.url)
downloads_list = self.getPodcasts(etree.HTML(html), self.url)
except Exception as e:
self.log.error(e)
return downloads_list
def start(self):
podcasts_list = self.getPodcastsList()
a = 1
for k in podcasts_list:
self.downloadPodcast(podcasts_list[k], a, len(podcasts_list))
a += 1
|
<gh_stars>0
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
dataset.head()
# Check if there's any column which is not useful and remove it like the column id
dataset = dataset.drop('Id',axis=1)
dataset.head()
# check the statistical description
dataset.describe()
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns.tolist()
print(cols)
#number of attributes (exclude target)
x = cols[-1]
print(x)
y = cols[0:len(cols)]
size = len(y)
#x-axis has target attribute to distinguish between classes
#y-axis shows values of an attribute
#Plot violin for all attributes
for i in y:
sns.violinplot(dataset[i])
plt.show()
# --------------
import numpy
threshold = 0.5
# no. of features considered after ignoring categorical variables
num_features = 10
# create a subset of dataframe with only 'num_features'
subset_train = dataset.iloc[:,0:11]
cols = subset_train.columns.tolist()
#Calculate the pearson co-efficient for all possible combinations
data_corr = subset_train.corr()
sns.heatmap(data_corr, annot=True)
# Set the threshold and search for pairs which are having correlation level above threshold
#corr_var_list = data_corr[abs(data_corr)>0.5]
corr_var_list=data_corr[(data_corr.abs() > 0.5) & (data_corr.abs() < 1)]
corr_var_list.dropna(how='all',inplace=True)
print(corr_var_list)
temp_list=[]
# Sort the list showing higher ones first
for i in range(0,corr_var_list.shape[0]):
for j in range(0,corr_var_list.shape[0]):
if(i >= j):
if not pd.isnull(corr_var_list.iloc[i][j]):
temp_list.append([corr_var_list.iloc[i][j],cols[i],cols[j]])
#print(temp_list)
#print('-----------------------------------------------')
corr_var_list=temp_list
s_corr_list=sorted(corr_var_list,key=lambda lst: -abs(lst[0]))
#Print correlations and column names
print("Correlations: ")
for corr,i,j in s_corr_list:
print ("%s and %s = %.2f" % (j,i,corr))
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X=dataset.drop('Cover_Type',axis=1)
Y=dataset['Cover_Type']
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,random_state=0,test_size=0.2)
#X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, train_size=0.2, random_state=0)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
#Standardized
#Apply transform only for non-categorical data
scale=StandardScaler()
X_train_temp = scale.fit_transform(X_train.iloc[:,:10])
X_test_temp = scale.fit_transform(X_test.iloc[:,:10])
#Concatenate non-categorical data and categorical
X_train1=numpy.concatenate([X_train_temp,X_train.iloc[:,10:]],axis=1)
X_test1=numpy.concatenate([X_test_temp,X_test.iloc[:,10:]],axis=1)
scaled_features_train_df = pd.DataFrame(X_train1, index=X_train.index, columns=X_train.columns)
scaled_features_test_df = pd.DataFrame(X_test1, index=X_test.index, columns=X_test.columns)
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
skb=SelectPercentile(score_func=f_classif,percentile=20)
predictors=skb.fit_transform(X_train1,Y_train)
scores=predictors.tolist()
#print(scores)
top_k_index=skb.get_support(indices=True)
print(top_k_index)
top_k_predictors=predictors[top_k_index]
print(top_k_predictors)
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
clf=OneVsRestClassifier(LogisticRegression())
clf1=OneVsRestClassifier(LogisticRegression())
model_fit_all_features = clf1.fit(X_train,Y_train)
predictions_all_features = clf1.predict(X_test)
score_all_features = accuracy_score(Y_test,predictions_all_features)
print("score_all_features: ",score_all_features)
print("top_k_predictors: ",top_k_predictors)
print(scaled_features_train_df.columns[skb.get_support()])
print(scaled_features_train_df.loc[:,skb.get_support()])
X_train_top_k = scaled_features_train_df.loc[:,skb.get_support()]
X_test_top_k = scaled_features_test_df.loc[:,skb.get_support()]
model_fit_top_features = clf.fit(X_train_top_k,Y_train)
predictions_top_features= clf.predict(X_test_top_k)
score_top_features = accuracy_score(Y_test,predictions_top_features)
print(score_top_features)
|
<gh_stars>1-10
import csv
import io
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Set, Any, Tuple, Iterable
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.timezone import now
from pytz import timezone
from annotation.citations import get_citations, CitationDetails
from annotation.models import CitationSource, Citation
from annotation.views import simple_citation_html
from classification.enums.classification_enums import SpecialEKeys
from classification.models import ClassificationGroups, ClassificationModification
from classification.models.evidence_key import EvidenceKeyMap
from classification.views.classification_export_utils import ExportFormatter, \
AlleleGroup, ConflictStrategy
from library.django_utils import get_url_from_view_path
@dataclass(frozen=True, eq=True)
class CitationStub:
source: str
idx: str
def __lt__(self, other):
if self.source < other.source:
return True
if self.source == other.source:
return self.idx.rjust(10, '0') < other.idx.rjust(10, '0')
return False
class CitationCounter:
def __init__(self):
self.all_citations: Dict[CitationStub, Set[str]] = defaultdict(set)
def reference_citations(self, cm: ClassificationModification):
for db_ref in cm.db_refs:
db = db_ref.get('db')
if source := CitationSource.CODES.get(db):
idx = db_ref.get('idx')
stub = CitationStub(source=source, idx=idx)
self.all_citations[stub].add(cm.classification.lab.name)
def ordered_references(self) -> Iterable[Tuple[CitationDetails, List[Any]]]:
citations: List[Citation] = list()
for stub in list(self.all_citations.keys()):
# TODO do this in bulk
citation, _ = Citation.objects.get_or_create(citation_source=stub.source, citation_id=stub.idx)
citations.append(citation)
details = get_citations(citations)
for citation_detail in details:
stub = CitationStub(CitationSource.CODES.get(citation_detail.source), citation_detail.citation_id)
references = list(self.all_citations[stub])
references.sort()
yield citation_detail, references
class ExportFormatterMVL(ExportFormatter):
"""
Formats classifications for Agilent 5.2 MVL usage
"""
@property
def version(self):
return '3'
@property
def use_full_chgvs(self):
return True
def __init__(self, conflict_strategy: str, cs_override_labels: Dict[str, str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.conflict_strategy = conflict_strategy
self.cs_translator = {
'B': 'BENIGN',
'LB': 'LIKELY_BENIGN',
'VUS': 'VOUS',
'LP': 'LIKELY_PATHOGENIC',
'P': 'PATHOGENIC'
}
self.cs_translator.update(cs_override_labels)
# VUS label will be used as the default for any unknown values
# useful for VUS_A, VUS_B, VUS_C
# slightly dodgy (but only option) for Risk Factor, Drug Response etc
self.vous_label = self.cs_translator.get('VUS')
RAW_SCORE = {
'B': 1,
'LB': 2,
'VUS': 3,
'VUS_A': 3,
'VUS_B': 3,
'VUS_C': 3,
'LP': 4,
'P': 5
}
DEFAULT_SCORE = 3
def header(self) -> str:
return '\t'.join(['transcript', 'c_nomen', 'classification', 'variant information', 'report abstract']) + '\n'
def row(self, group: AlleleGroup) -> str:
out = io.StringIO()
writer = csv.writer(out, delimiter='\t')
date_str = now().astimezone(tz=timezone(settings.TIME_ZONE)).strftime("%Y-%m-%d")
url = get_url_from_view_path(group.target_variant.get_absolute_url()) + f'?refer=mvl&seen={date_str}'
variant_details = f'<a href="{url}" target="_blank">Click here for up-to-date classifications on this variant.</a>'
for c_parts, vcms_w_chgvs in group.iter_c_hgvs_versionless_transcripts():
transcript = c_parts.transcript
c_hgvs = c_parts.raw_c
warnings: List[str] = []
using_classification_score = None
classification = ''
different_strengths = set()
has_special_cs = False
discordant_count = 0
has_diff_chgvs = False
for vcm_w_chgvs in vcms_w_chgvs:
vcm = vcm_w_chgvs.vcm
vc = vcm.classification
if self.is_discordant(vc):
discordant_count += 1
raw_classification = vcm.get(SpecialEKeys.CLINICAL_SIGNIFICANCE)
label = EvidenceKeyMap.cached_key(SpecialEKeys.CLINICAL_SIGNIFICANCE).pretty_value(raw_classification) or 'Unclassified'
different_strengths.add(label)
this_classification = self.cs_translator.get(raw_classification, self.vous_label)
this_classification_score = ExportFormatterMVL.RAW_SCORE.get(raw_classification, ExportFormatterMVL.DEFAULT_SCORE)
has_special_cs = has_special_cs or raw_classification not in ExportFormatterMVL.RAW_SCORE
if using_classification_score is None or \
(self.conflict_strategy == ConflictStrategy.MOST_BENIGN and this_classification_score < using_classification_score) or \
(self.conflict_strategy == ConflictStrategy.MOST_PATHOGENIC and this_classification_score > using_classification_score):
using_classification_score = this_classification_score
classification = this_classification
if vcm_w_chgvs.chgvs.raw_c != c_hgvs:
has_diff_chgvs = True
groups = ClassificationGroups(classification_modifications=[cnchgvs.vcm for cnchgvs in vcms_w_chgvs], genome_build=self.genome_build)
groups_html = render_to_string('classification/classification_groups_mvl.html', {"groups": groups}).replace('\n', '').strip()
citation_counter = CitationCounter()
for group in groups:
citation_counter.reference_citations(group.most_recent)
if has_diff_chgvs:
warnings.append('Warning <b>c.hgvs representations are different across transcript versions</b>')
if has_special_cs:
warnings.append('Warning <b>Contains non-standard clinical significance</b>')
if discordant_count:
if discordant_count == 1:
warnings.append(f'Warning <b>1 record is in discordance</b>')
else:
warnings.append(f'Warning <b>{discordant_count} records are in discordance</b>')
if len(different_strengths) > 1:
strength_list = list(different_strengths)
strength_list.sort()
strength_list = ', '.join(strength_list)
warnings.append(f'Warning <b>Multiple clinical significances recorded for this transcript : {strength_list}</b>')
warning_text = '<br>'.join(warnings)
citations_html = "<br><b>Citations Latest</b>:<br>"
has_citation = False
for citation, labs in citation_counter.ordered_references():
has_citation = True
references = ", ".join(labs)
citations_html += f"<p>{simple_citation_html(citation)}<br><i>Referenced by</i>: {references}</p>"
if not has_citation:
citations_html += "No citations provided"
combined_data = f'{warning_text}{groups_html}<p>Data as of {date_str} <a href="{url}" target="_blank">Click here for up-to-date classifications on this variant.</a></p>{citations_html}'
self.row_count += 1
writer.writerow([transcript, c_hgvs, classification, combined_data, variant_details])
return out.getvalue()
def filename(self) -> str:
return self.generate_filename(suffix='mvl', extension='tsv')
|
from dataclasses import dataclass
from enum import Enum
import logging
import re
import sre_constants
import sre_parse
import typing
import z3 # type: ignore
# Z3 Node Constants
app_labels = z3.Function('app_labels', z3.StringSort(), z3.StringSort())
app_label_keys = z3.Function('app_label_keys', z3.StringSort(), z3.BoolSort())
node_labels = z3.Function('node_labels', z3.StringSort(), z3.StringSort())
node_label_keys = z3.Function('node_label_keys', z3.StringSort(), z3.BoolSort())
k8s_labels = z3.Function('k8s_labels', z3.StringSort(), z3.StringSort())
k8s_label_keys = z3.Function('k8s_label_keys', z3.StringSort(), z3.BoolSort())
db_labels = z3.Function('db_labels', z3.StringSort(), z3.StringSort())
db_label_keys = z3.Function('db_label_keys', z3.StringSort(), z3.BoolSort())
entity_types = {
'app_labels' : (app_labels, app_label_keys),
'node_labels' : (node_labels, node_label_keys),
'kubernetes_labels' : (k8s_labels, k8s_label_keys),
'db_labels' : (db_labels, db_label_keys)
}
class EntityType(Enum):
APP = (app_labels, app_label_keys)
NODE = (node_labels, node_label_keys)
K8S = (k8s_labels, k8s_label_keys)
DB = (db_labels, db_label_keys)
def other_entity_types(entity_type : EntityType) -> list[EntityType]:
return list(filter(lambda e : e != entity_type, EntityType))
# Z3 User Constants
internal_traits = z3.Function(
'internal_traits',
z3.StringSort(),
z3.StringSort(),
z3.BoolSort()
)
external_traits = z3.Function(
'external_traits',
z3.StringSort(),
z3.StringSort(),
z3.BoolSort()
)
template_types = {
'internal' : internal_traits,
'external' : external_traits
}
class UserType(Enum):
INTERNAL = internal_traits
EXTERNAL = external_traits
def other_user_type(user_type : UserType) -> UserType:
if UserType.INTERNAL == user_type:
return UserType.EXTERNAL
elif UserType.EXTERNAL == user_type:
return UserType.INTERNAL
else:
raise ValueError(f'Invalid user type {user_type}')
@dataclass
class AnyValueConstraint:
value : str
@dataclass
class StringConstraint:
value : str
@dataclass
class RegexConstraint:
regex : sre_parse.SubPattern
@dataclass
class UserTraitConstraint:
trait_type : str
trait_key : str
inner_trait_key : str
@dataclass
class InterpolationConstraint:
prefix : str
trait_type : str
trait_key : str
inner_trait_key : str
suffix : str
@dataclass
class EmailFunctionConstraint:
trait_type : str
trait_key : str
inner_trait_key : str
@dataclass
class RegexReplaceFunctionConstraint:
trait_type : str
trait_key : str
inner_trait_key : str
pattern : str
replace : str
# Attempts to parse the given value as a regex.
def try_parse_regex(value : str) -> typing.Optional[RegexConstraint]:
try:
parsed_regex = sre_parse.parse(value)
is_regex = any([
sre_constants.LITERAL != node_type
for node_type, _ in parsed_regex.data
])
return RegexConstraint(parsed_regex) if is_regex else None
except Exception as e:
logging.debug(f'Cannot parse regex {value} - {e}')
return None
# Regex pattern for {{internal.logins}} or {{external.email}} type template values.
template_value_pattern = re.compile(r'\{\{(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?\}\}')
# Attempts to parse template constraints of type {{internal.logins}}
def try_parse_template(value : str) -> typing.Optional[UserTraitConstraint]:
match = template_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = match.group('type')
trait_key = match.group('key')
inner_trait_key = match.group('inner_key')
return UserTraitConstraint(user_type, trait_key, inner_trait_key)
else:
return None
# Regex pattern for IAM#{{internal.logins}}#user type interpolation values.
interpolation_value_pattern = re.compile(r'(?P<prefix>.*)\{\{(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?\}\}(?P<suffix>.*)')
# Attempts to parse interpolation constraints of type IAM#{external.foo}
def try_parse_interpolation(value : str) -> typing.Optional[InterpolationConstraint]:
match = interpolation_value_pattern.match(value)
if isinstance(match, re.Match):
prefix = match.group('prefix')
user_type = match.group('type')
trait_key = match.group('key')
inner_trait_key = match.group('inner_key')
suffix = match.group('suffix')
return InterpolationConstraint(prefix, user_type, trait_key, inner_trait_key, suffix)
else:
return None
# Regex pattern for {{email.local(external.email)}}
email_function_value_pattern = re.compile(r'\{\{email\.local\([\s]*(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?[\s]*\)\}\}')
# Attempts to parse email function constraints of type {{email.local(external.email)}}
def try_parse_email_function(value : str) -> typing.Optional[EmailFunctionConstraint]:
match = email_function_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = match.group('type')
trait_key = match.group('key')
inner_trait_key = match.group('inner_key')
return EmailFunctionConstraint(user_type, trait_key, inner_trait_key)
else:
return None
# Regex pattern for {{regexp.replace(external.access["env"], "^(staging)$", "$1")}}
regex_function_value_pattern = re.compile(r'\{\{regexp\.replace\([\s]*(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?[\s]*,[\s]*"(?P<pattern>.*)"[\s]*,[\s]*"(?P<replace>.*)"[\s]*\)\}\}')
# Attempts to parse regexp replace function constraints of type {{regexp.replace(external.access, "foo", "bar")}}
def try_parse_regexp_replace_function(value : str) -> typing.Optional[RegexReplaceFunctionConstraint]:
match = regex_function_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = match.group('type')
trait_key = match.group('key')
inner_trait_key = match.group('inner_key')
pattern = match.group('pattern')
replace = match.group('replace')
return RegexReplaceFunctionConstraint(user_type, trait_key, inner_trait_key, pattern, replace)
else:
return None
# Determines whether the given constraint requires user traits to specify.
def requires_user_traits(values : typing.Union[str, list[str]]) -> bool:
if not isinstance(values, list):
values = [values]
for value in values:
is_template = try_parse_template(value) != None
is_interpolation = try_parse_interpolation(value) != None
is_email_function = try_parse_email_function(value) != None
is_regexp_replace_function = try_parse_regexp_replace_function(value) != None
if is_template or is_interpolation or is_email_function or is_regexp_replace_function:
return True
return False
# Determines the category of the constraint value and parses it appropriately.
def parse_constraint(
value : str
) -> typing.Union[
AnyValueConstraint,
StringConstraint,
RegexConstraint,
UserTraitConstraint,
InterpolationConstraint,
EmailFunctionConstraint,
RegexReplaceFunctionConstraint]:
if '*' == value:
return AnyValueConstraint(value)
parsed_trait_constraint = try_parse_template(value)
if isinstance(parsed_trait_constraint, UserTraitConstraint):
return parsed_trait_constraint
parsed_interpolation_constraint = try_parse_interpolation(value)
if isinstance(parsed_interpolation_constraint, InterpolationConstraint):
return parsed_interpolation_constraint
parsed_email_constraint = try_parse_email_function(value)
if isinstance(parsed_email_constraint, EmailFunctionConstraint):
return parsed_email_constraint
parsed_regex_function_constraint = try_parse_regexp_replace_function(value)
if isinstance(parsed_regex_function_constraint, RegexReplaceFunctionConstraint):
return parsed_regex_function_constraint
parsed_regex_constraint = try_parse_regex(value)
if isinstance(parsed_regex_constraint, RegexConstraint):
return parsed_regex_constraint
return StringConstraint(value)
# The Z3 regex matching all strings accepted by re1 but not re2.
# Formatted in camelcase to mimic Z3 regex API.
def Minus(re1 : z3.ReRef, re2 : z3.ReRef) -> z3.ReRef:
return z3.Intersect(re1, z3.Complement(re2))
# The Z3 regex matching any character (currently only ASCII supported).
# Formatted in camelcase to mimic Z3 regex API.
def AnyChar() -> z3.ReRef:
return z3.Range(chr(0), chr(127))
#return z3.AllChar(z3.StringSort())
# Defines regex categories in Z3.
def category_regex(category : sre_constants._NamedIntConstant) -> z3.ReRef:
if sre_constants.CATEGORY_DIGIT == category:
return z3.Range('0', '9')
elif sre_constants.CATEGORY_SPACE == category:
return z3.Union(z3.Re(' '), z3.Re('\t'), z3.Re('\n'), z3.Re('\r'), z3.Re('\f'), z3.Re('\v'))
elif sre_constants.CATEGORY_WORD == category:
return z3.Union(z3.Range('a', 'z'), z3.Range('A', 'Z'), z3.Range('0', '9'), z3.Re('_'))
else:
raise NotImplementedError(f'ERROR: regex category {category} not yet implemented')
# Translates a specific regex construct into its Z3 equivalent.
def regex_construct_to_z3_expr(regex_construct) -> z3.ReRef:
node_type, node_value = regex_construct
if sre_constants.LITERAL == node_type: # a
return z3.Re(chr(node_value))
if sre_constants.NOT_LITERAL == node_type: # [^a]
return Minus(AnyChar(), z3.Re(chr(node_value)))
if sre_constants.SUBPATTERN == node_type:
_, _, _, value = node_value
return regex_to_z3_expr(value)
elif sre_constants.ANY == node_type: # .
return AnyChar()
elif sre_constants.MAX_REPEAT == node_type:
low, high, value = node_value
if (0, 1) == (low, high): # a?
return z3.Option(regex_to_z3_expr(value))
elif (0, sre_constants.MAXREPEAT) == (low, high): # a*
return z3.Star(regex_to_z3_expr(value))
elif (1, sre_constants.MAXREPEAT) == (low, high): # a+
return z3.Plus(regex_to_z3_expr(value))
else: # a{3,5}, a{3}
return z3.Loop(regex_to_z3_expr(value), low, high)
elif sre_constants.IN == node_type: # [abc]
first_subnode_type, _ = node_value[0]
if sre_constants.NEGATE == first_subnode_type: # [^abc]
return Minus(AnyChar(), z3.Union([regex_construct_to_z3_expr(value) for value in node_value[1:]]))
else:
return z3.Union([regex_construct_to_z3_expr(value) for value in node_value])
elif sre_constants.BRANCH == node_type: # ab|cd
_, value = node_value
return z3.Union([regex_to_z3_expr(v) for v in value])
elif sre_constants.RANGE == node_type: # [a-z]
low, high = node_value
return z3.Range(chr(low), chr(high))
elif sre_constants.CATEGORY == node_type: # \d, \s, \w
if sre_constants.CATEGORY_DIGIT == node_value: # \d
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_DIGIT == node_value: # \D
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_DIGIT))
elif sre_constants.CATEGORY_SPACE == node_value: # \s
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_SPACE == node_value: # \S
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_SPACE))
elif sre_constants.CATEGORY_WORD == node_value: # \w
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_WORD == node_value: # \W
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_WORD))
else:
raise NotImplementedError(f'ERROR: regex category {node_value} not implemented')
elif sre_constants.AT == node_type:
if node_value in {sre_constants.AT_BEGINNING, sre_constants.AT_BEGINNING_STRING}: # ^a, \A
raise NotImplementedError(f'ERROR: regex position {node_value} not implemented')
elif sre_constants.AT_BOUNDARY == node_value: # \b
raise NotImplementedError(f'ERROR: regex position {node_value} not implemented')
elif sre_constants.AT_NON_BOUNDARY == node_value: # \B
raise NotImplementedError(f'ERROR: regex position {node_value} not implemented')
elif node_value in {sre_constants.AT_END, sre_constants.AT_END_STRING}: # a$, \Z
raise NotImplementedError(f'ERROR: regex position {node_value} not implemented')
else:
raise NotImplementedError(f'ERROR: regex position {node_value} not implemented')
else:
raise NotImplementedError(f'ERROR: regex construct {regex_construct} not implemented')
# Translates a parsed regex into its Z3 equivalent.
# The parsed regex is a sequence of regex constructs (literals, *, +, etc.)
def regex_to_z3_expr(regex : sre_parse.SubPattern) -> z3.ReRef:
if 0 == len(regex.data):
raise ValueError('ERROR: regex is empty')
elif 1 == len(regex.data):
return regex_construct_to_z3_expr(regex.data[0])
else:
return z3.Concat([regex_construct_to_z3_expr(construct) for construct in regex.data])
# Constructs an expression evaluating whether a specific label constraint
# is satisfied by a given node, database, or k8s cluster.
# Example value for key : value parameters:
#
# 'location' : 'us-east-[\d]+'
# 'owner' : {{external.email}}
#
def matches_value(
labels : z3.FuncDeclRef,
key : z3.SeqRef,
value : str
) -> z3.BoolRef:
constraint = parse_constraint(value)
# 'key' : '*'
if isinstance(constraint, AnyValueConstraint):
return z3.BoolVal(True)
# 'key' : 'value'
elif isinstance(constraint, StringConstraint):
return labels(key) == z3.StringVal(constraint.value)
# 'key' : '(ab)*a
elif isinstance(constraint, RegexConstraint):
logging.debug(f'Uncompiled regex {constraint.regex}')
regex = regex_to_z3_expr(constraint.regex)
logging.debug(f'Compiled regex {regex}')
return z3.InRe(labels(key), regex)
# 'key' : '{internal.trait_key}'
elif isinstance(constraint, UserTraitConstraint):
logging.debug(f'User trait constraint of type {constraint.trait_type} on key {constraint.trait_key}[{constraint.inner_trait_key}]')
if None != constraint.inner_trait_key:
raise NotImplementedError(f'Nested trait maps are not supported: {value}')
user_trait_type = template_types[constraint.trait_type]
user_trait_key = z3.StringVal(constraint.trait_key)
return user_trait_type(user_trait_key, labels(key))
# 'key' : 'prefix#{internal.trait_key}#suffix'
elif isinstance(constraint, InterpolationConstraint):
logging.debug(f'User interpolation constraint of type {constraint.trait_type} on key {constraint.trait_key}[{constraint.inner_trait_key}] with prefix {constraint.prefix} and suffix {constraint.suffix}')
if None != constraint.inner_trait_key:
raise NotImplementedError(f'Nested trait maps are not supported: {value}')
prefix = z3.StringVal(constraint.prefix)
suffix = z3.StringVal(constraint.suffix)
user_trait_type = template_types[constraint.trait_type]
user_trait_key = z3.StringVal(constraint.trait_key)
user_trait_value = z3.String(f'{constraint.trait_type}_{constraint.trait_key}')
is_user_trait_value = user_trait_type(user_trait_key, user_trait_value)
label_equals_interpolation = labels(key) == z3.Concat(prefix, user_trait_value, suffix)
return z3.Exists(user_trait_value, z3.And(is_user_trait_value, label_equals_interpolation))
# 'key' : '{{email.local(external.email)}}'
elif isinstance(constraint, EmailFunctionConstraint):
logging.debug(f'Email function constraint of type {constraint.trait_type} on key {constraint.trait_key}[{constraint.inner_trait_key}]')
if None != constraint.inner_trait_key:
raise NotImplementedError(f'Nested trait maps are not supported: {value}')
user_trait_type = template_types[constraint.trait_type]
user_trait_key = z3.StringVal(constraint.trait_key)
user_trait_value = z3.String(f'{constraint.trait_type}_{constraint.trait_key}_email')
is_user_trait_value = user_trait_type(user_trait_key, user_trait_value)
index_end_of_local = z3.IndexOf(user_trait_value, z3.StringVal('@'))
label_equals_email_local = labels(key) == z3.SubString(user_trait_value, z3.IntVal(0), index_end_of_local)
return z3.Exists(user_trait_value, z3.And(is_user_trait_value, label_equals_email_local))
# 'key' : '{{regexp.replace(external.access["env"], "^(staging)$", "$1")}}'
elif isinstance(constraint, RegexReplaceFunctionConstraint):
logging.debug(f'Regexp replace function constraint of type {constraint.trait_type} on key {constraint.trait_key}[{constraint.inner_trait_key}], replacing {constraint.pattern} with {constraint.replace}')
raise NotImplementedError(f'Regexp replace function constraint not yet supported given {key} : {value}')
else:
raise NotImplementedError(f'Unknown constraint value type {value}; not supported.')
# Constructs an expression evaluating whether a specific label constraint
# is satisfied by a given node, database, or k8s cluster; constraint can
# take the form of a list of permissible values.
# Example value for key : value parameters:
#
# 'env' : ['test', 'prod']
#
def matches_constraint(
labels : z3.FuncDeclRef,
label_keys : z3.FuncDeclRef,
key : str,
value : typing.Union[str, list[str]]
) -> z3.BoolRef:
logging.debug(f'Compiling {key} : {value} constraint')
if '*' == key:
if '*' == value:
return z3.BoolVal(True)
else:
raise ValueError(f'Constraint of type \'*\' : {value} is not valid')
key = z3.StringVal(key)
if isinstance(value, list):
return z3.And(label_keys(key), z3.Or([matches_value(labels, key, v) for v in value]))
else:
return z3.And(label_keys(key), matches_value(labels, key, value))
# Constructs an expression evaluating to whether a given set of label
# requirements are satisfied by a given node, database, or k8s cluster.
# Example value for constraints parameter:
#
# {'env' : ['test', 'prod'], 'location' : 'us-east-[\d]+' }
#
# The constraint_fold parameter is itself a function determining how the
# sub-constraints should be combined (conjunction or disjunction).
#
def matches_constraints(
constraint_type : str,
labels : z3.FuncDeclRef,
label_keys : z3.FuncDeclRef,
constraints : dict[str, typing.Union[str, list[str]]],
constraint_fold : typing.Callable
) -> z3.BoolRef:
logging.debug(f'Compiling {constraint_type} constraints')
return constraint_fold([
matches_constraint(labels, label_keys, key, value)
for key, value in constraints.items()
])
# Constructs an expression evaluating to whether a given constraint group
# (either Allow or Deny) matches the labels of a given node, database, or
# k8s cluster.
# Example value for group parameter:
#
# node_labels:
# 'env' : 'test'
# 'owner' : '.*<EMAIL>'
# database_labels:
# 'contains_PII' : 'no'
#
# The constraint_fold parameter is itself a function determining how the
# sub-constraints should be combined (conjunction or disjunction).
#
def matches_constraint_group(
group : dict[str, dict[str, typing.Union[str, list[str]]]],
constraint_fold : typing.Callable
) -> z3.BoolRef:
return z3.Or([
constraint_type in group
and matches_constraints(constraint_type, labels, label_keys, group[constraint_type], constraint_fold)
for constraint_type, (labels, label_keys) in entity_types.items()
])
# Constructs an expression evaluating to whether a given role
# gives access to a specific node, database, or k8s cluster.
# Example value for role parameter:
#
# spec:
# allow:
# node_labels:
# 'env' : 'test'
# kubernetes_labels:
# 'service' : 'company_app'
# deny:
# node_labels:
# 'env' : 'prod'
#
def allows(role : typing.Any) -> z3.BoolRef:
role_name = role['metadata']['name']
logging.debug(f'Compiling role template {role_name}')
spec = role['spec']
logging.debug('Compiling allow constraints')
allow_expr = 'allow' in spec and matches_constraint_group(spec['allow'], z3.And)
logging.debug('Compiling deny constraints')
deny_expr = 'deny' in spec and matches_constraint_group(spec['deny'], z3.Or)
return z3.And(allow_expr, z3.Not(deny_expr))
# Determines whether the given role is a role template, filled in by user traits.
def is_role_template(role) -> bool:
spec = role['spec']
if 'allow' in spec:
allow = spec['allow']
groups = [allow[constraint_type].values() for constraint_type in entity_types.keys() if constraint_type in allow]
if any([requires_user_traits(value) for values in groups for value in values]):
return True
if 'deny' in spec:
deny = spec['deny']
groups = [deny[constraint_type] for constraint_type in entity_types.keys() if constraint_type in deny]
if any([requires_user_traits(value) for values in groups for value in values]):
return True
return False
# Compiles the labels of a given app, node, k8s cluster, or database into a
# form understood by Z3 that can be checked against a compiled set of role
# constraints.
def labels_as_z3_map(
concrete_labels : typing.Optional[dict[str, str]],
entity_type : EntityType
) -> z3.BoolRef:
logging.debug(f'Compiling labels {concrete_labels} of type {entity_type.name}')
# Specify unused entity types have no label values
others_unused = z3.BoolVal(True)
for other_entity_type in other_entity_types(entity_type):
_, other_label_keys = other_entity_type.value
any_key = z3.String(f'{other_entity_type.name}_any_key')
other_unused = z3.ForAll(any_key, z3.Not(other_label_keys(any_key)))
others_unused = z3.And(others_unused, other_unused)
labels, label_keys = entity_type.value
# It isn't enough to specify which values are in the set, we must also
# specify which values are *not* in the set. Otherwise Z3 finds the
# trivial set model [else -> True] which contains all string values.
if concrete_labels is not None and any(concrete_labels):
included = z3.And([label_keys(z3.StringVal(key)) for key in concrete_labels.keys()])
excluded_key = z3.String('excluded_key')
is_excluded_key = z3.And([excluded_key != z3.StringVal(key) for key in concrete_labels.keys()])
excluded = z3.Implies(is_excluded_key, z3.Not(label_keys(excluded_key)))
restrictive_key_set = z3.And(included, z3.ForAll(excluded_key, excluded))
return z3.And(others_unused, restrictive_key_set, z3.And([
labels(z3.StringVal(key)) == z3.StringVal(value)
for key, value in concrete_labels.items()
]))
else:
any_key = z3.String(f'{entity_type.name}_any_key')
this_unused = z3.ForAll(any_key, z3.Not(label_keys(any_key)))
return z3.And(others_unused, this_unused)
# Compiles the traits of a given internal or external user into a form
# understood by Z3 that can be checked against a compiled set of role constraints.
def traits_as_z3_map(
concrete_traits : typing.Optional[dict[str, list[str]]],
user_type : UserType
) -> typing.Optional[z3.BoolRef]:
logging.debug(f'Compiling user traits {concrete_traits} of type {user_type.name}')
# Specify unused user type has no trait values
other_traits = other_user_type(user_type)
any_key = z3.String(f'{other_traits.name}_any_key')
any_value = z3.String(f'{other_traits.name}_any_value')
other_is_unused = z3.ForAll([any_key, any_value], z3.Not(other_traits.value(any_key, any_value)))
traits = user_type.value
# It isn't enough to specify which values are in the set, we must also
# specify which values are *not* in the set. Otherwise Z3 finds the
# trivial set model [else -> True] which contains all string values.
if concrete_traits is not None and any(concrete_traits):
included = z3.And([
traits(z3.StringVal(key), (z3.StringVal(value)))
for key, values in concrete_traits.items() for value in values
])
excluded_key = z3.String('excluded_key')
any_value = z3.String('any_value')
is_excluded_key = z3.And([excluded_key != z3.StringVal(key) for key in concrete_traits.keys()])
excluded_keys_excluded = z3.Implies(is_excluded_key, z3.Not(traits(excluded_key, any_value)))
exclude_excluded_keys = z3.ForAll([excluded_key, any_value], excluded_keys_excluded)
included_key = z3.String('included_key')
excluded_value = z3.String('excluded_value')
is_included_key = z3.Or([included_key == z3.StringVal(key) for key in concrete_traits.keys()])
is_excluded_value = z3.And([
z3.Implies(included_key == z3.StringVal(key), excluded_value != z3.StringVal(value))
for key, values in concrete_traits.items() for value in values
])
excluded_values_excluded = z3.Implies(z3.And(is_included_key, is_excluded_value), z3.Not(traits(included_key, excluded_value)) )
exclude_excluded_values = z3.ForAll([included_key, excluded_value], excluded_values_excluded)
return z3.And(other_is_unused, included, exclude_excluded_keys, exclude_excluded_values)
else: # User does not have any traits.
any_key = z3.String(f'{user_type.name}_any_key')
any_value = z3.String(f'{user_type.name}_any_value')
this_is_unused = z3.ForAll([any_key, any_value], z3.Not(traits(any_key, any_value)))
return z3.And(other_is_unused, this_is_unused)
# Determines whether the given role provides the user access to the entity.
# Does not check whether the user actually possesses that role.
def role_allows_user_access_to_entity(
role : typing.Any,
user_traits : typing.Optional[dict[str, list[str]]],
user_type : UserType,
entity_labels : dict[str, str],
entity_type : EntityType,
solver : z3.Solver = z3.Solver()
) -> bool:
solver.add(traits_as_z3_map(user_traits, user_type))
solver.add(labels_as_z3_map(entity_labels, entity_type))
solver.add(allows(role))
return z3.sat == solver.check()
|
<reponame>teresa-ho/stx-nova<gh_stars>0
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import six
from nova import utils
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import volume as libvirt_volume
@six.add_metaclass(abc.ABCMeta)
class LibvirtBaseFileSystemVolumeDriver(
libvirt_volume.LibvirtBaseVolumeDriver):
"""The base class for file system type volume drivers"""
def __init__(self, host):
super(LibvirtBaseFileSystemVolumeDriver,
self).__init__(host, is_block_dev=False)
@abc.abstractmethod
def _get_mount_point_base(self):
"""Return the mount point path prefix.
This is used to build the device path.
:returns: The mount point path prefix.
"""
raise NotImplementedError('_get_mount_point_base')
def _normalize_export(self, export):
"""Normalize the export (share) if necessary.
Subclasses should override this method if they have a non-standard
export value, e.g. if the export is a URL. By default this method just
returns the export value passed in unchanged.
:param export: The export (share) value to normalize.
:returns: The normalized export value.
"""
return export
def _get_mount_path(self, connection_info):
"""Returns the mount path prefix using the mount point base and share.
:param connection_info: dict of the form
::
connection_info = {
'data': {
'export': the file system share,
...
}
...
}
:returns: The mount path prefix.
"""
share = self._normalize_export(connection_info['data']['export'])
return os.path.join(self._get_mount_point_base(),
utils.get_hash_str(share))
def _get_device_path(self, connection_info):
"""Returns the hashed path to the device.
:param connection_info: dict of the form
::
connection_info = {
'data': {
'export': the file system share,
'name': the name of the device,
...
}
...
}
:returns: The full path to the device.
"""
mount_path = self._get_mount_path(connection_info)
return os.path.join(mount_path, connection_info['data']['name'])
@six.add_metaclass(abc.ABCMeta)
class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver):
# NOTE(mdbooth): Hopefully we'll get to the point where everything which
# previously subclassed LibvirtBaseFileSystemVolumeDriver now subclasses
# LibvirtMountedFileSystemVolumeDriver. If we get there, we should fold
# this class into the base class.
def __init__(self, host, fstype):
super(LibvirtMountedFileSystemVolumeDriver, self).__init__(host)
self.fstype = fstype
def connect_volume(self, connection_info, disk_info, instance):
"""Connect the volume."""
export = connection_info['data']['export']
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
mount.mount(self.fstype, export, vol_name, mountpoint, instance,
self._mount_options(connection_info))
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Disconnect the volume."""
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
mount.umount(vol_name, mountpoint, instance)
@abc.abstractmethod
def _mount_options(self, connection_info):
"""Return a list of additional arguments to pass to the mount command.
"""
pass
|
<reponame>MikeAT/visualizer
# Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
import argparse
import configparser
import copy
import logging
import logging.config
import os
import pwd
import sys
import tempfile
import unittest
_testconfig = {
'gearman': {
'host': 'localhost',
'port': 4730
},
'datastore': {
'path': '/srv/cbor/',
'cdns_file_pattern': '*.cbor.xz',
'tsv_file_pattern': '*.tsv*',
'lockfile': '/run/lock/dns-stats-visualization/{}.lock',
'user': 'dsv',
# NOTE: The order is important. Priority of worker processing
# INCREASES the later in the list the queue name is.
'queues': 'cdns-to-pcap,cdns-to-tsv,import-tsv'
},
'pcap': {
'compress': 'Y',
'compression-level': 2,
'query-only': '',
'pseudo-anonymise': '',
'pseudo-anonymisation-key': '',
'pseudo-anonymisation-passphrase': '',
'replace': 'Y'
},
'postgres': {
'connection': 'postgresql://%(user)s:%(password)s@%(host)s/%(database)s',
'database': 'dsv',
'user': 'dsv',
'password': '<PASSWORD>',
'host': 'postgres'
},
'clickhouse': {
'servers': 'dsv-clickhouse1,' \
'dsv-clickhouse2,' \
'dsv-clickhouse3,' \
'dsv-clickhouse4,',
'dbdir': '/src/clickhouse',
'import-server': 'dsv1',
'node-shard-default': 'auto',
'database': 'dsv',
'user': 'dsv',
'password': '<PASSWORD>',
'querytable': 'QueryResponse',
'packetcountstable': 'PacketCounts',
},
'rssac': {
'outdir': '.',
'grafana-url': 'https://localhost',
'server': 'Z-Root',
'zone': '.',
'tsig': {
'name': '',
'key': '',
'algo': ''
},
'xfr-server': ''
},
}
class DSVTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
def setUp(self):
self._orig_stdin = sys.stdin
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self._stdin = tempfile.NamedTemporaryFile(mode='w+', prefix='stdin_')
self._stdout = tempfile.NamedTemporaryFile(mode='w+', prefix='stdout_')
self._stderr = tempfile.NamedTemporaryFile(mode='w+', prefix='stderr_')
sys.stdin = self._stdin
sys.stdout = self._stdout
sys.stderr = self._stderr
testcfg = copy.deepcopy(_testconfig)
# Set up directories
self._datastore_path = tempfile.TemporaryDirectory(prefix='dpath_')
self._datastore_lockdir = tempfile.TemporaryDirectory(prefix='dlockdir_')
self._clickhouse_dbdir = tempfile.TemporaryDirectory(prefix='cdbdir_')
testcfg['datastore']['path'] = self._datastore_path.name
testcfg['datastore']['lockfile'] = self._datastore_lockdir.name + '/{}.lock'
testcfg['clickhouse']['dbdir'] = self._clickhouse_dbdir.name
testcfg['datastore']['user'] = pwd.getpwuid(os.getuid()).pw_name
# Set up test config object
self._config = configparser.ConfigParser()
self._config.read_dict(testcfg)
self._log = tempfile.NamedTemporaryFile(mode='w+', prefix='log_')
_log_cfg = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(name)s:%(levelname)s:%(message)s'
},
},
'handlers': {
'stream': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': self._log,
'formatter': 'simple'
},
},
'root': {
'handlers': ['stream'],
'level': 'DEBUG',
'propagate': True
}
}
logging.config.dictConfig(_log_cfg)
def tearDown(self):
self._log.close()
self._clickhouse_dbdir.cleanup()
self._datastore_lockdir.cleanup()
self._datastore_path.cleanup()
sys.stderr = self._orig_stderr
sys.stdout = self._orig_stdout
sys.stdin = self._orig_stdin
self._stderr.close()
self._stdout.close()
self._stdin.close()
def get_args(module, argv=[]):
parser = argparse.ArgumentParser(module.description)
module.add_args(parser)
return parser.parse_args(argv)
|
from .providers import esi
from .models import Fleet, FleetInformation
from esi.models import Token
from celery import shared_task
from django.utils import timezone
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
logger = logging.getLogger(__name__)
@shared_task
def open_fleet(character_id, motd, free_move, name, groups):
required_scopes = ["esi-fleets.read_fleet.v1", "esi-fleets.write_fleet.v1"]
c = esi.client
token = Token.get_token(character_id, required_scopes)
fleet_result = c.Fleets.get_characters_character_id_fleet(
character_id=token.character_id, token=token.valid_access_token()
).result()
fleet_id = fleet_result.pop("fleet_id")
fleet_role = fleet_result.pop("role")
if fleet_id == None or fleet_role == None or fleet_role != "fleet_commander":
return
fleet = Fleet(
fleet_id=fleet_id,
created_at=timezone.now(),
motd=motd,
is_free_move=free_move,
fleet_commander_id=token.character_id,
name=name,
)
fleet.save()
fleet.groups.set(groups)
esiFleet = {"is_free_move": free_move, "motd": motd}
c.Fleets.put_fleets_fleet_id(
fleet_id=fleet_id, token=token.valid_access_token(), new_settings=esiFleet
).result()
@shared_task
def send_fleet_invitation(character_ids, fleet_id):
required_scopes = ["esi-fleets.write_fleet.v1"]
c = esi.client
fleet = Fleet.objects.get(fleet_id=fleet_id)
fleet_commander_token = Token.get_token(fleet.fleet_commander_id, required_scopes)
_processes = []
with ThreadPoolExecutor(max_workers=50) as ex:
for _chracter_id in character_ids:
_processes.append(
ex.submit(
send_invitation,
character_id=_chracter_id,
fleet_commander_token=fleet_commander_token,
fleet_id=fleet_id,
)
)
for item in as_completed(_processes):
_ = item.result()
@shared_task
def send_invitation(character_id, fleet_commander_token, fleet_id):
c = esi.client
invitation = {"character_id": character_id, "role": "squad_member"}
c.Fleets.post_fleets_fleet_id_members(
fleet_id=fleet_id,
token=fleet_commander_token.valid_access_token(),
invitation=invitation,
).result()
@shared_task
def check_fleet_adverts():
required_scopes = ["esi-fleets.read_fleet.v1", "esi-fleets.write_fleet.v1"]
c = esi.client
fleets = Fleet.objects.all()
for fleet in fleets:
token = Token.get_token(fleet.fleet_commander_id, required_scopes)
try:
fleet_result = c.Fleets.get_characters_character_id_fleet(
character_id=token.character_id, token=token.valid_access_token()
).result()
fleet_id = fleet_result["fleet_id"]
if fleet_id != fleet.fleet_id:
fleet.delete()
except Exception as e:
if e.status_code == 404: # 404 means the character is not in a fleet
fleet.delete()
logger.info("Character is not in a fleet - fleet advert removed")
@shared_task
def get_fleet_composition(fleet_id):
required_scopes = ["esi-fleets.read_fleet.v1", "esi-fleets.write_fleet.v1"]
c = esi.client
fleet = Fleet.objects.get(fleet_id=fleet_id)
token = Token.get_token(fleet.fleet_commander_id, required_scopes)
fleet_infos = c.Fleets.get_fleets_fleet_id_members(
fleet_id=fleet_id, token=token.valid_access_token()
).result()
characters = {}
systems = {}
ship_type = {}
for member in fleet_infos:
characters[member["character_id"]] = ""
systems[member["solar_system_id"]] = ""
ship_type[member["ship_type_id"]] = ""
ids = []
ids.extend(list(characters.keys()))
ids.extend(list(systems.keys()))
ids.extend(list(ship_type.keys()))
ids_to_name = c.Universe.post_universe_names(ids=ids).result()
for member in fleet_infos:
index = [x["id"] for x in ids_to_name].index(member["character_id"])
member["character_name"] = ids_to_name[index]["name"]
for member in fleet_infos:
index = [x["id"] for x in ids_to_name].index(member["solar_system_id"])
member["solar_system_name"] = ids_to_name[index]["name"]
for member in fleet_infos:
index = [x["id"] for x in ids_to_name].index(member["ship_type_id"])
member["ship_type_name"] = ids_to_name[index]["name"]
aggregate = get_fleet_aggregate(fleet_infos)
differential = dict()
for key, value in aggregate.items():
fleet_info_agg = FleetInformation.objects.filter(
fleet__fleet_id=fleet_id, ship_type_name=key
)
if fleet_info_agg.count() > 0:
differential[key] = value - fleet_info_agg.latest("date").count
else:
differential[key] = value
FleetInformation.objects.create(fleet=fleet, ship_type_name=key, count=value)
return FleetViewAggregate(fleet_infos, aggregate, differential)
@shared_task
def get_fleet_aggregate(fleet_infos):
counts = dict()
for member in fleet_infos:
type_ = member.get("ship_type_name")
if type_ in counts:
counts[type_] += 1
else:
counts[type_] = 1
return counts
class FleetViewAggregate(object):
def __init__(self, fleet, aggregate, differential):
self.fleet = fleet
self.aggregate = aggregate
self.differential = differential
|
<reponame>zhupangithub/WEBERP
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.osv.orm import setup_modifiers
from openerp.tools.translate import _
class account_common_report(osv.osv_memory):
_name = "account.common.report"
_description = "Account Common Report"
def onchange_chart_id(self, cr, uid, ids, chart_account_id=False, context=None):
res = {}
if chart_account_id:
company_id = self.pool.get('account.account').browse(cr, uid, chart_account_id, context=context).company_id.id
now = time.strftime('%Y-%m-%d')
domain = [('company_id', '=', company_id), ('date_start', '<=', now), ('date_stop', '>=', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
res['value'] = {'company_id': company_id, 'fiscalyear_id': fiscalyears and fiscalyears[0] or False}
return res
_columns = {
'chart_account_id': fields.many2one('account.account', 'Chart of Account', help='Select Charts of Accounts', required=True, domain = [('parent_id','=',False)]),
'company_id': fields.related('chart_account_id', 'company_id', type='many2one', relation='res.company', string='Company', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from': fields.many2one('account.period', 'Start Period'),
'period_to': fields.many2one('account.period', 'End Period'),
'journal_ids': fields.many2many('account.journal', string='Journals', required=True),
'date_from': fields.date("Start Date"),
'date_to': fields.date("End Date"),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _check_company_id(self, cr, uid, ids, context=None):
for wiz in self.browse(cr, uid, ids, context=context):
company_id = wiz.company_id.id
if wiz.fiscalyear_id and company_id != wiz.fiscalyear_id.company_id.id:
return False
if wiz.period_from and company_id != wiz.period_from.company_id.id:
return False
if wiz.period_to and company_id != wiz.period_to.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The fiscalyear, periods or chart of account chosen have to belong to the same company.', ['chart_account_id','fiscalyear_id','period_from','period_to']),
]
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(account_common_report, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context.get('active_model', False) == 'account.account':
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='chart_account_id']")
for node in nodes:
node.set('readonly', '1')
node.set('help', 'If you print the report from Account list/form view it will not consider Charts of account')
setup_modifiers(node, res['fields']['chart_account_id'])
res['arch'] = etree.tostring(doc)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {'value': {}}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': time.strftime('%Y-01-01'), 'date_to': time.strftime('%Y-%m-%d')}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.special = false
ORDER BY p.date_start ASC, p.special ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND p.special = false
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _get_account(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
accounts = self.pool.get('account.account').search(cr, uid, [('parent_id', '=', False), ('company_id', '=', user.company_id.id)], limit=1)
return accounts and accounts[0] or False
def _get_fiscalyear(self, cr, uid, context=None):
if context is None:
context = {}
now = time.strftime('%Y-%m-%d')
company_id = False
ids = context.get('active_ids', [])
if ids and context.get('active_model') == 'account.account':
company_id = self.pool.get('account.account').browse(cr, uid, ids[0], context=context).company_id.id
else: # use current company id
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
domain = [('company_id', '=', company_id), ('date_start', '<=', now), ('date_stop', '>=', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
return fiscalyears and fiscalyears[0] or False
def _get_all_journal(self, cr, uid, context=None):
return self.pool.get('account.journal').search(cr, uid ,[])
_defaults = {
'fiscalyear_id': _get_fiscalyear,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.common.report',context=c),
'journal_ids': _get_all_journal,
'filter': 'filter_no',
'chart_account_id': _get_account,
'target_move': 'posted',
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id' in data['form'] and data['form']['fiscalyear_id'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter'] == 'filter_date':
result['date_from'] = data['form']['date_from']
result['date_to'] = data['form']['date_to']
elif data['form']['filter'] == 'filter_period':
if not data['form']['period_from'] or not data['form']['period_to']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period.'))
result['period_from'] = data['form']['period_from']
result['period_to'] = data['form']['period_to']
return result
def _print_report(self, cr, uid, ids, data, context=None):
raise (_('Error!'), _('Not implemented.'))
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = {}
data['ids'] = context.get('active_ids', [])
data['model'] = context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(cr, uid, ids, ['date_from', 'date_to', 'fiscalyear_id', 'journal_ids', 'period_from', 'period_to', 'filter', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id', 'chart_account_id', 'period_from', 'period_to']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
used_context = self._build_contexts(cr, uid, ids, data, context=context)
data['form']['periods'] = used_context.get('periods', False) and used_context['periods'] or []
data['form']['used_context'] = dict(used_context, lang=context.get('lang', 'en_US'))
return self._print_report(cr, uid, ids, data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os.path
import numpy as np
import collections
import matplotlib.pyplot as plt
from matplotlib import style
from tqdm import tqdm
from .autograd import Variable
def timer(func):
'''
decorator function that will print the excecution time of a function.
'''
import time
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print('[*] The function \'{}\' took {} sec to excecute.'.format(func.__name__,start-end))
return result
return wrapper
def trainer(model, criterion, optimizer, dataloader, epochs, minibatch, filename, load_weights=False):
'''
This trainer will ease the process of training a model.
Args:
model (Module): model to train
criterion (Regression|Classification): loss function to use
optimizer (Optimizer): optimizer to use
dataloader (DataLoader): dataloader to use
epochs (int): number of epochs
minibatch (int): number of batch to use for training
filename (string): specify the filename as well as the saving path without the file extension. (ex) path/to/filename
load_weights (bool): load pre-trained weights if available. Default:False
'''
best_acc = 0
i = 0
if os.path.exists(filename+'.hdf5') and load_weights:
model.load(filename)
print('[*] weights loaded.')
if criterion.live_plot:
fig = plt.gcf()
fig.show()
fig.canvas.draw()
print('[*] training...')
dataloader.set_batch(minibatch)
for epoch in tqdm(range(epochs)):
if minibatch > 1:
dataloader.train_dataset.shuffle()
for data, label in tqdm(dataloader.train_dataset, ncols=150, desc='epoch {}, acc {}'.format(epoch+1, best_acc)):
output = model(data)
loss = criterion(output, label)
model.zero_grad()
loss.backward()
optimizer.step()
if criterion.live_plot:
plt.plot(len(criterion.losses) ,[criterion.losses[j] for j in len(criterion.losses)])
plt.pause(0.001)
fig.canvas.draw()
i += 1
tmp_acc = 0
num = 0
for data, label in dataloader.test_dataset:
output = model(data)
tmp_acc += criterion.get_acc(output, label)
num += 1
tmp_acc /= num
if tmp_acc > best_acc:
best_acc = tmp_acc
model.save(filename)
print('[*] training completed.')
print('best accuracy: {}'.format(best_acc))
def tester(model, criterion, dataloader, minibatch, filename, show_img=False):
'''
This tester will ease the evaluation process of a model
Args:
model (Module): model to train
criterion (Regression|Classification): loss function to use
dataloader (DataLoader): dataloader to use
minibatch (int): number of batch to use for training
filename (string): specify the filename as well as the saving path without the file extension. (ex) path/to/filename
'''
model.load(filename)
acc = 0
num = 0
print('[*] testing...')
dataloader.set_batch(minibatch)
for data, label in dataloader.test_dataset:
output = model(data)
if show_img:
imgs_show(data, label, output)
acc += criterion.get_acc(output, label)
num += 1
print('accuracy: {}'.format(acc/num))
def imgs_show(data, label, pred, size=(28,28), col=4):
'''
Draws images along it's labels and the prediction of the model
Args:
data (Variable): images to visualize
label (Variable): the labels for the data
pred (Variable): predicted value by the network
size (tuple of int): image size
col (int): number of images to display in a row
'''
data = data.data
if label.shape[1] != 1:
label = np.argmax(label.data, axis=1).reshape(-1,1)
else:
label = label.data
if pred.shape[1] != 1:
pred = np.argmax(pred.data, axis=1).reshape(-1,1)
else:
pred = pred.data
for i in range(data.shape[0]):
plt.subplot(int(data.shape[0])/col+1,col,i+1)
plt.imshow(data[i].reshape(size), cmap='gray', interpolation='nearest')
plt.title('Ans:{}, Pred:{}'.format(label[i], pred[i]), fontsize=7)
plt.axis('off')
plt.tight_layout()
plt.show()
class DataSet(object):
'''Dataset\n
Attributes:
batch (int): batch size for sampling
attributes (str): attributes of the dataset, default is None
data (ndarray): features of the dataset, default is None
label (ndarray): labels of the dataset, default is None
corr (dict): dictionary that holds correration between class index and class labels
'''
def __init__(self):
self.batch = 1
self.attributes = None
self.data = None
self.label = None
self.corr = None
self.idx = 0
def __add__(self, a):
if self.data is None or a.data is None:
raise ValueError
if not isinstance(a, DataSet):
raise TypeError
dataset = DataSet()
if self.attributes == a.attributes:
dataset.attributes = self.attributes
else:
dataset.attributes = self.attributes + '\n' + a.attributes
dataset.data = np.concatenate((self.data, a.data), axis=0)
if self.label is not None and a.label is not None:
if self.corr is not None:
inv_corr1 = {v:k for k, v in self.corr.items()}
label1 = np.array(list(map(lambda x:inv_corr1[x],self.label)))
else:
label1 = self.label
if a.corr is not None:
inc_corr2 = {v:k for k, v in a.corr.items()}
label2 = np.array(list(map(lambda x:inv_corr2[x],a.label)))
else:
label2 = a.label
dataset.label = np.concatenate((label1, label2), axis=0)
return dataset
def __setattr__(self, key, value):
if key == 'label' and value is not None:
if np.issubdtype(value.dtype, np.string_):
labels = collections.Counter(value)
corr = {_key:i for i, _key in enumerate(sorted(labels.keys()))}
object.__setattr__(self, 'corr', corr)
tmp = list(map(lambda x:self.corr[x],value))
tmp_label = np.zeros(len(tmp),dtype=np.int32)
for i in range(len(tmp_label)):
tmp_label[i] = tmp[i]
object.__setattr__(self, 'label', tmp_label)
else:
object.__setattr__(self, 'label', value)
else:
object.__setattr__(self, key, value)
def __len__(self):
if self.data is None:
return 0
else:
return self.data.shape[0]
def __repr__(self):
return self.attributes
def __iter__(self):
return self
def __next__(self):
if (self.idx > len(self)//self.batch and len(self) % self.batch != 0) or (self.idx >= len(self)//self.batch and len(self) % self.batch == 0):
self.idx = 0
self.shuffle()
raise StopIteration()
features = self.data[self.idx*self.batch:(self.idx+1)*self.batch]
if self.label is not None:
target = self.label[self.idx*self.batch:(self.idx+1)*self.batch]
self.idx += 1
return Variable(features, requires_grad=False), Variable(target, requires_grad=False)
self.idx += 1
return Variable(features, requires_grad=False)
def shuffle(self):
idx = np.random.permutation(len(self.data))
self.data = self.data[idx]
if self.label is not None:
object.__setattr__(self, 'label', self.label[idx])
def to_one_hot(self):
if self.label is None:
raise ValueError
if np.issubdtype(self.label.dtype, np.float64):
raise TypeError
if self.corr is None:
labels = collections.Counter(self.label)
corr = {key:i for i, key in enumerate(sorted(labels.keys()))}
object.__setattr__(self, 'corr', corr)
if np.issubdtype(self.label.dtype, np.string_):
tmp = list(map(lambda x:self.corr[x],self.label))
else:
tmp = self.label
object.__setattr__(self, 'label', np.eye(len(self.corr),dtype=np.int32)[self.label])
def to_vector(self):
raise NotImplementedError
class DataLoader(object):
'''DataLoader\n
Attributes:
train_dataset (DataSet): training dataset that consists of numpy array for features, labels and its size
valid_dataset (DataSet): validation dataset that consists of numpy array for features, labels and its size
test_dataset (DataSet): testing dataset that consists of numpy array for features, labels and its size
batch (int): batch size for sampling
'''
def __init__(self):
self.train_dataset = DataSet()
self.valid_dataset = DataSet()
self.test_dataset = DataSet()
def set_batch(self, batch):
self.train_dataset.batch = batch
self.valid_dataset.batch = batch
self.test_dataset.batch = batch
class ImageLoader(DataLoader):
'''ImageLoader\n
DataLoader class that carries data augmentation functions for images
'''
def __init__(self):
super().__init__()
def crop(self, num):
''' Random Crop
'''
pass
def scale(self, num):
''' Scale Augmentation
'''
pass
def rotate(self, num):
pass
def flip(self, num):
pass
def noise(self, num):
pass
|
<reponame>obroomhall/AutoGIF<filename>clipsnip/gif_extractor.py
import os
import re
import subprocess
import syllables
from pysubs2 import SSAEvent, SSAFile
from scenedetect.detectors import ContentDetector
from scenedetect.frame_timecode import FrameTimecode
from scenedetect.scene_manager import SceneManager
from scenedetect.video_manager import VideoManager
from clipsnip.config import tmp_dir
class GifExtractor:
def __init__(self, padding_seconds=1.5):
self.padding_milliseconds = padding_seconds*1000
self.output_format = '.mp4'
def extract_gif(self, source, subtitles_list):
for subtitles_obj in subtitles_list:
# gets an estimated start/end time from padding subtitles times
[start_time_padded, end_time_padded] = self.get_padded_trim_times(subtitles_obj)
subtitles = subtitles_obj.subs
# gets frame accurate start/end times for scene cuts
[trim_start, trim_end] = find_trim_times(source, subtitles, start_time_padded/1000, end_time_padded/1000)
subtitles.shift(s=-trim_start.get_seconds())
subtitles = add_effects(subtitles)
ass_filename = os.path.join(tmp_dir, os.urandom(24).hex() + '.ass')
subtitles.save(ass_filename)
output_filename = get_output_name(source, subtitles, self.output_format)
trim(source, ass_filename, output_filename, trim_start, trim_end)
os.remove(ass_filename)
def get_padded_trim_times(self, subtitles):
start_time_padded = subtitles.subs[0].start - self.padding_milliseconds
if subtitles.previous_end_time and start_time_padded < subtitles.previous_end_time:
start_time_padded = subtitles.previous_end_time
end_time_padded = subtitles.subs[-1].end + self.padding_milliseconds
if subtitles.next_start_time and end_time_padded > subtitles.next_start_time:
end_time_padded = subtitles.next_start_time
return [start_time_padded, end_time_padded]
def add_effects(subtitles):
effected_subs = SSAFile()
for sub in subtitles:
content = sub.plaintext.strip().replace('\n', ' ')
time_per_syllable = (sub.end-sub.start)/syllables.estimate(content)
current_time = sub.start
current_index = 0
for word in content.split(' '):
sylls = syllables.estimate(word)
sub_end_time = current_time + time_per_syllable*sylls
current_index += len(word) if current_index == 0 else len(word) + 1
text = content[:current_index] + '{\\alpha&HFF}' + content[current_index:] # adds transparency
effected_subs.append(SSAEvent(start=current_time, end=sub_end_time, text=text))
current_time = sub_end_time
return effected_subs
def trim(source, subs_filename, output, start, end):
subprocess.run([
'ffmpeg',
'-ss', str(start),
'-i', source,
'-vf', 'ass=' + subs_filename,
'-t', str(end - start),
'-c:v', 'libx264',
# '-async', '1',
# '-an', # Removes audio
output,
'-n'
], check=True)
def find_trim_times(source, subtitles, min_start_time, max_end_time):
video_manager = VideoManager([source])
scene_manager = SceneManager()
scene_manager.add_detector(ContentDetector())
base_timecode = video_manager.get_base_timecode()
try:
# Set downscale factor to improve processing speed (no args means default).
video_manager.set_downscale_factor()
x = FrameTimecode(timecode=min_start_time, fps=video_manager.get_framerate())
y = FrameTimecode(timecode=max_end_time, fps=video_manager.get_framerate())
video_manager.set_duration(start_time=x, end_time=y)
video_manager.start()
scene_manager.detect_scenes(frame_source=video_manager)
scene_list = scene_manager.get_scene_list(base_timecode)
subs_start = FrameTimecode(timecode=str(subtitles[0].start), fps=video_manager.get_framerate())
subs_end = FrameTimecode(timecode=str(subtitles[-1].end), fps=video_manager.get_framerate())
trim_start = x
trim_end = y
for scene in enumerate(scene_list):
timecodes = scene[1]
start_frame = timecodes[0].get_frames()
end_frame = timecodes[1].get_frames()
if subs_start.get_frames() >= start_frame > trim_start.get_frames():
trim_start = timecodes[0]
if trim_end.get_frames() > end_frame >= subs_end.get_frames():
trim_end = timecodes[1]
trim_end = FrameTimecode(timecode=trim_end.get_frames() - 1, fps=video_manager.get_framerate())
finally:
video_manager.release()
return [trim_start, trim_end]
def get_output_name(source, subtitles, output_format):
out_path = os.path.dirname(os.path.abspath(source))
no_new_lines = subtitles[0].plaintext.strip('\n').lower()[:30]
with_dashes = re.sub('[^0-9A-z]+', '-', no_new_lines).strip('-')
unique_output = with_dashes + '-' + ''.join(os.urandom(4).hex()) + output_format
return os.path.join(out_path, unique_output)
|
<filename>backend/marche/settings.py<gh_stars>1-10
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
root = environ.Path(__file__) - 3
env = environ.Env(
DEBUG=(bool, False)
)
env_file = os.path.join(BASE_DIR, ".env")
environ.Env.read_env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
SITE_ROOT = root()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
#third party packages
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'rest_auth',
'corsheaders',
'storages',
#developer apps
'apps.users',
'apps.adverts'
]
AUTH_USER_MODEL = 'users.CustomUser'
SITE_ID = 1
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'marche.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'marche.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 15
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS=True
if 'DATABASE_URL' in os.environ:
import dj_database_url
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
if DEBUG:
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
else:
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_DEFAULT_ACL = ''
AWS_S3_SIGNATURE_VERSION = "s3v4"
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
# s3 public media settings
MEDIA_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
DEFAULT_FILE_STORAGE = 'marche.storage_backends.MediaStorage'
|
import requests
import io
import os
KATSU_URL = os.getenv("KATSU_URL")
def post_data(url, data):
response = requests.post(url, json=data)
print(response.json())
return response.json()
def get_project_id(project_title):
'''
does a post request to /api/projects
creates a project with the project title passed in
returns the project id
'''
projects = requests.get(f"{KATSU_URL}/api/projects").json()["results"]
for project in projects:
if project["title"] == project_title:
return project["identifier"]
result = post_data(f"{KATSU_URL}/api/projects", {"title": project_title, "description": "Project for tests"})
return result["identifier"]
def get_dataset_id(dataset_title, project_id):
'''
does a post request to /api/dataset
creates a dataset with the dataset title passed in under the project with the project id passed in
returns the dataset id
'''
datasets = requests.get(f"{KATSU_URL}/api/projects/{project_id}").json()["datasets"]
for dataset in datasets:
if dataset["title"] == dataset_title:
return dataset["identifier"]
dataset = {
"title": "DATASET_TITLE",
"data_use": {
"consent_code": {
"primary_category": {
"code": "GRU"
},
"secondary_categories": [
{
"code": "GSO"
}
]
},
"data_use_requirements": [
{
"code": "COL"
},
{
"code": "PUB"
}
]
},
"project": "PROJECT_ID"
}
dataset["title"] = dataset_title
dataset["project"] = project_id
return post_data(f"{KATSU_URL}/api/datasets", dataset)["identifier"]
def create_table_ownership(table_id, dataset_id):
'''
does a post request to /api/table_ownership
creates a table ownership between the table with the table id passed in and the dataset with the dataset id passed in
'''
table_ownership = {
"table_id": "TABLE_ID",
"service_id": "service",
"dataset": "DATASET_ID"
}
table_ownership["table_id"] = table_id
table_ownership["dataset"] = dataset_id
post_data(f"{KATSU_URL}/api/table_ownership", table_ownership)
def create_table(table_id):
'''
does a post request to /api/tables
creates a table with the table id passed in
'''
table = {
"ownership_record": "TABLE_ID",
"name": f"table{table_id}",
"data_type": "phenopacket"
}
table["ownership_record"] = table_id
post_data(f"{KATSU_URL}/api/tables", table)
def get_meta_data_id():
'''
does a post request to /api/metadata
creates a metadata object
'''
return post_data(f"{KATSU_URL}/api/metadata", {"created_by": "test"})["id"]
def create_htsfile(dataset_name, htsfile_description):
'''
does post request to /api/htsfiles
creates a htsfile with the description passed in and with the uri http://{dataset_name}.com under the dataset with the dataset name passed in
'''
htsfile = {
"uri": "http://link.com",
"hts_format": "UNKNOWN",
"genome_assembly": "genome_assembly",
"description": "HTSFILE_ID"
}
htsfile["description"] = htsfile_description
htsfile["uri"] = f"http://{dataset_name}.com"
post_data(f"{KATSU_URL}/api/htsfiles", htsfile)
def get_biosample(biosample_id, procedure_code_id):
'''
returns a sample biosample json with the biosample id passed in
the biosample object has procedure and sampled tissue field,
so the procedure_code_id is used to fill in required fields in procedure and sampled tissue field
'''
biosample = {
"id": "BIOSAMPLE_ID",
"procedure": {
"code": {
"id": "BIOSAMPLE_ID",
"label": "procedure"
}
},
"sampled_tissue": {
"id": "BIOSAMPLE_ID",
"label": "label"
}
}
biosample["id"] = biosample_id
biosample["procedure"]["code"]["id"] = procedure_code_id
biosample["sampled_tissue"]["id"] = biosample_id
return biosample
def get_disease(disease_term_id):
'''
returns a sample disease json
disease_term_id is used for the field required in disease object under term field
'''
disease = {
"term": {
"id": "DISEASE_ID",
"label": "label"
}
}
disease["term"]["id"] = disease_term_id
return disease
def get_gene(gene_id):
'''
returns a sample gene json with the gene id passed in
'''
gene = {
"id": "GENE_ID",
"symbol": "symbol"
}
gene["id"] = gene_id
return gene
def get_variant(variant_allele_hgvs):
'''
returns a sample variant json with hgvsAllele type and variant_allele_hgvs for its required field under allele[“hgvs”]
'''
variant = {
"allele_type": "hgvsAllele",
"allele": {
"hgvs": "variant for phenopacket with id PHENOPACKET_ID"
}
}
variant["allele"]["hgvs"] = variant_allele_hgvs
return variant
def get_genomic_interpretation(extra_properties_description, gene_id, variant_id):
'''
returns a sample genomic interpretation json with extra_properties description as passed in
and related to gene and variant with the id passed in
'''
genomic_interpretation = {
"status": "UNKNOWN",
"gene": "GENE_ID",
"variant": "VARIANT_ID",
"extra_properties": {
"description": "PHENOPACKET_ID"
}
}
genomic_interpretation["gene"] = gene_id
genomic_interpretation["variant"] = variant_id
genomic_interpretation["extra_properties"]["description"] = extra_properties_description
return genomic_interpretation
def get_diagnosis(extra_properties_description, disease_id, genomic_interpretation_id):
'''
returns a sample diagnosis interpretation json with extra_properties description as passed in and related to disease
and genomic interpretation with the id passed in
'''
diagnosis = {
"disease": 0,
"genomic_interpretations": [
"GENOMICINTERPRETATION_ID"
],
"extra_properties": {
"description": "PHENOPACKET_ID"
}
}
diagnosis["disease"] = disease_id
diagnosis["genomic_interpretations"][0] = genomic_interpretation_id
diagnosis["extra_properties"]["description"] = extra_properties_description
return diagnosis
def get_phenotypicfeature(type_id, phenopacket_id, biosample_id):
'''
returns a sample phenotypic feature json
phenotypic feature json contains a field with type with the type_id as its id field
the phenotypic feature is related to the phenopacket with the phenopacket id passed in and the biosample with the biosample id passed in
'''
phenotypicfeature = {
"type": {
"id": "PHENOTYPICFEATURE_ID",
"label": "phenotypicfeature label"
},
"phenopacket": "PHENOPACKET_ID",
"biosample": "BIOSAMPLE_ID"
}
phenotypicfeature["type"]["id"] = type_id
phenotypicfeature["phenopacket"] = phenopacket_id
phenotypicfeature["biosample"] = biosample_id
return phenotypicfeature
def get_phenopacket(phenopacket_id, individual_id, meta_data_id, table_id, biosample_id, gene_id, variant_id, disease_id):
'''
returns a sample phenopacket json
the phenopacket json contains an id as the phenopacket_id passed in
The phenopacket is related to the individual with the individual id passed in,
the metadata with the metadata id passed in, the table with the table id passed in,
the biosample with the biosample id passed in, the variant with the variant id passed in and the disease with the disease id passed in
'''
phenopacket = {
"id": "PHENOPACKET_ID",
"subject": "INDIVIDUAL_ID",
"meta_data": 0,
"table": "TABLE_ID",
"biosamples": ["BIOSAMPLE_ID"],
"genes": ["GENE_ID"],
"variants": ["VARIANT_ID"],
"diseases": ["DISEASE_ID"]
}
phenopacket["id"] = phenopacket_id
phenopacket["subject"] = individual_id
phenopacket["meta_data"] = meta_data_id
phenopacket["table"] = table_id
phenopacket["biosamples"][0] = biosample_id
phenopacket["genes"][0] = gene_id
phenopacket["variants"][0] = variant_id
phenopacket["diseases"][0] = disease_id
return phenopacket
def get_interpretation(interpretation_id, phenopacket_id, meta_data_id, diagnosis_id):
'''
returns a sample interpretation id
the interpretation id contains an id as the interpretation id passed in
The interpretation will be related to the phenopacket with the phenopacket id passed in,
the metadata with the metadata id passed in, and the diagnosis with the diagnosis id passed in
'''
interpretation = {
"id": "INTERRPRETATION_ID",
"phenopacket": "PHENOPACKET_ID",
"meta_data": 0,
"diagnosis": ["DIAGNOSIS_ID"]
}
interpretation["id"] = interpretation_id
interpretation["phenopacket"] = phenopacket_id
interpretation["meta_data"] = meta_data_id
interpretation["diagnosis"][0] = diagnosis_id
return interpretation
def create_sample_data(dataset_name, meta_data_id, individual_id, table_id):
'''
creates sample data in dataset with name as dataset_name
'''
biosample_id = dataset_name
biosample = get_biosample(biosample_id, biosample_id)
post_data(f"{KATSU_URL}/api/biosamples", biosample)
disease = get_disease(dataset_name)
disease_id = post_data(f"{KATSU_URL}/api/diseases", disease)["id"]
gene = get_gene(dataset_name)
gene_id = dataset_name
post_data(f"{KATSU_URL}/api/genes", gene)
variant = get_variant(dataset_name)
variant_id = post_data(f"{KATSU_URL}/api/variants", variant)["id"]
genomic_interpretation = get_genomic_interpretation(
dataset_name, gene_id, variant_id)
genomic_interpretation_id = post_data(
f"{KATSU_URL}/api/genomicinterpretations", genomic_interpretation)["id"]
diagnosis = get_diagnosis(dataset_name, disease_id,
genomic_interpretation_id)
diagnosis_id = post_data(f"{KATSU_URL}/api/diagnoses", diagnosis)["id"]
phenopacket_id = dataset_name
phenopacket = get_phenopacket(dataset_name, individual_id, meta_data_id,
table_id, biosample_id, gene_id, variant_id, disease_id)
post_data(f"{KATSU_URL}/api/phenopackets", phenopacket)
phenotypicfeature = get_phenotypicfeature(
dataset_name, phenopacket_id, biosample_id)
post_data(f"{KATSU_URL}/api/phenotypicfeatures", phenotypicfeature)
interpretation_id = dataset_name
interpretation = get_interpretation(
interpretation_id, phenopacket_id, meta_data_id, diagnosis_id)
post_data(f"{KATSU_URL}/api/interpretations", interpretation)
def create_project_table_meta_data(project_title):
'''
creates a project with the project_title passed in
creates a meta data as it is required for creating some objects
creates an individual as it is required for creating some objects
creates datasets, tables and table ownerships
creates sample data in datasets
'''
project_id = get_project_id(project_title)
meta_data_id = get_meta_data_id()
post_data(f"{KATSU_URL}/api/individuals", {"id": "test_individual"})
for t in ["open1", "open2", "registered3", "controlled4", "controlled5", "controlled6"]:
dataset_id = get_dataset_id(t, project_id)
create_table_ownership(t, dataset_id)
create_table(t)
create_sample_data(t, meta_data_id,
"test_individual", t)
title = "test"
create_project_table_meta_data(title)
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `bq_to_vcf` module."""
import collections
import unittest
from apache_beam.io import filesystems
from apache_beam.io.gcp.internal.clients import bigquery
from gcp_variant_transforms import bq_to_vcf
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.testing import bigquery_schema_util
from gcp_variant_transforms.testing import temp_dir
class BqToVcfTest(unittest.TestCase):
"""Test cases for the `bq_to_vcf` module."""
def _create_mock_args(self, **args):
return collections.namedtuple(
'MockArgs', list(args.keys()))(*list(args.values()))
def test_write_vcf_data_header(self):
lines = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n',
'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="GQ">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT \n']
with temp_dir.TempDir() as tempdir:
representative_header = tempdir.create_temp_file(lines=lines)
file_path = filesystems.FileSystems.join(tempdir.get_path(),
'data_header')
bq_to_vcf._write_vcf_header_with_sample_names(
['Sample 1', 'Sample 2'],
['#CHROM', 'POS', 'ID', 'REF', 'ALT'],
representative_header,
file_path)
expected_content = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n',
'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="GQ">\n',
'#CHROM\tPOS\tID\tREF\tALT\tSample 1\tSample 2\n'
]
with filesystems.FileSystems.open(file_path) as f:
content = [line.decode('utf-8') for line in f.readlines()]
self.assertEqual(content, expected_content)
def test_get_variant_query_no_region(self):
args = self._create_mock_args(
input_table='my_bucket:my_dataset.my_table',
genomic_regions=None)
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.REFERENCE_NAME,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Reference name.'))
self.assertEqual(bq_to_vcf._get_variant_query(args, schema),
'SELECT reference_name FROM '
'`my_bucket.my_dataset.my_table`')
def test_get_variant_query_with_regions(self):
args_1 = self._create_mock_args(
input_table='my_bucket:my_dataset.my_table',
genomic_regions=['c1:1,000-2,000', 'c2'])
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.REFERENCE_NAME,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Reference name.'))
schema.fields.append(bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.START_POSITION,
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=('Start position (0-based). Corresponds to the first base '
'of the string of reference bases.')))
expected_query = (
'SELECT reference_name, start_position FROM '
'`my_bucket.my_dataset.my_table` WHERE '
'(reference_name="c1" AND start_position>=1000 AND end_position<=2000) '
'OR (reference_name="c2" AND start_position>=0 AND '
'end_position<=9223372036854775807)'
)
self.assertEqual(bq_to_vcf._get_variant_query(args_1, schema),
expected_query)
def test_get_query_columns(self):
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.REFERENCE_NAME,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Reference name.'))
schema.fields.append(bigquery.TableFieldSchema(
name='partition_date_please_ignore',
type='Date',
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Column required by BigQuery partitioning logic.'))
expected_columns = [bigquery_util.ColumnKeyConstants.REFERENCE_NAME]
self.assertEqual(bq_to_vcf._get_query_columns(schema), expected_columns)
def test_get_annotation_names(self):
schema_with_annotations = bigquery_schema_util.get_sample_table_schema(
with_annotation_fields=True)
self.assertEqual(
bq_to_vcf._extract_annotation_names(schema_with_annotations),
{'CSQ': ['Consequence', 'IMPACT']})
schema_with_no_annotation = bigquery_schema_util.get_sample_table_schema()
self.assertEqual(
bq_to_vcf._extract_annotation_names(schema_with_no_annotation),
{})
def test_get_annotation_names_multiple_annotations(self):
schema = bigquery.TableSchema()
alternate_bases_record = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
annotation_record_1 = bigquery.TableFieldSchema(
name='CSQ_1',
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='desc')
annotation_record_1.fields.append(bigquery.TableFieldSchema(
name='allele',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc.'))
annotation_record_1.fields.append(bigquery.TableFieldSchema(
name='Consequence',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc.'))
alternate_bases_record.fields.append(annotation_record_1)
annotation_record_2 = bigquery.TableFieldSchema(
name='CSQ_2',
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='desc')
annotation_record_2.fields.append(bigquery.TableFieldSchema(
name='allele',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc.'))
annotation_record_2.fields.append(bigquery.TableFieldSchema(
name='IMPACT',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc.'))
alternate_bases_record.fields.append(annotation_record_2)
schema.fields.append(alternate_bases_record)
self.assertEqual(
bq_to_vcf._extract_annotation_names(schema),
{'CSQ_1': ['allele', 'Consequence'], 'CSQ_2': ['allele', 'IMPACT']})
|
# -*- coding: utf-8 -*-
""" The Exp3 randomized index policy.
Reference: [Regret Analysis of Stochastic and Nonstochastic Multi-armed Bandit Problems, S.Bubeck & N.Cesa-Bianchi, §3.1](http://research.microsoft.com/en-us/um/people/sebubeck/SurveyBCB12.pdf)
See also [Evaluation and Analysis of the Performance of the EXP3 Algorithm in Stochastic Environments, <NAME> & <NAME> & <NAME> & <NAME>, 2012](http://proceedings.mlr.press/v24/seldin12a/seldin12a.pdf).
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.6"
import numpy as np
import numpy.random as rn
try:
from .BasePolicy import BasePolicy
except ImportError:
from BasePolicy import BasePolicy
#: self.unbiased is a flag to know if the rewards are used as biased estimator,
#: i.e., just :math:`r_t`, or unbiased estimators, :math:`r_t / trusts_t`.
UNBIASED = False
UNBIASED = True
#: Default :math:`\gamma` parameter.
GAMMA = 0.01
class Exp3(BasePolicy):
""" The Exp3 randomized index policy.
Reference: [Regret Analysis of Stochastic and Nonstochastic Multi-armed Bandit Problems, S.Bubeck & N.Cesa-Bianchi, §3.1](http://research.microsoft.com/en-us/um/people/sebubeck/SurveyBCB12.pdf)
See also [Evaluation and Analysis of the Performance of the EXP3 Algorithm in Stochastic Environments, <NAME> & <NAME> & <NAME> & <NAME>, 2012](http://proceedings.mlr.press/v24/seldin12a/seldin12a.pdf).
"""
def __init__(self, nbArms, gamma=GAMMA,
unbiased=UNBIASED, lower=0., amplitude=1.):
super(Exp3, self).__init__(nbArms, lower=lower, amplitude=amplitude)
if gamma is None: # Use a default value for the gamma parameter
gamma = np.sqrt(np.log(nbArms) / nbArms)
assert 0 < gamma <= 1, "Error: the 'gamma' parameter for Exp3 class has to be in (0, 1]." # DEBUG
self._gamma = gamma
self.unbiased = unbiased #: Unbiased estimators ?
# Internal memory
self.weights = np.full(nbArms, 1. / nbArms) #: Weights on the arms
# trying to randomize the order of the initial visit to each arm; as this determinism breaks its habitility to play efficiently in multi-players games
# XXX do even more randomized, take a random permutation of the arm ?
self._initial_exploration = rn.permutation(nbArms)
# The proba that another player has the same is nbPlayers / factorial(nbArms) : should be SMALL !
def startGame(self):
"""Start with uniform weights."""
super(Exp3, self).startGame()
self.weights.fill(1. / self.nbArms)
def __str__(self):
return r"Exp3($\gamma: {:.3g}$)".format(self.gamma)
# This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property
@property
def gamma(self):
r"""Constant :math:`\gamma_t = \gamma`."""
return self._gamma
@property
def trusts(self):
r"""Update the trusts probabilities according to Exp3 formula, and the parameter :math:`\gamma_t`.
.. math::
\mathrm{trusts}'_k(t+1) &= (1 - \gamma_t) w_k(t) + \gamma_t \frac{1}{K}, \\
\mathrm{trusts}(t+1) &= \mathrm{trusts}'(t+1) / \sum_{k=1}^{K} \mathrm{trusts}'_k(t+1).
If :math:`w_k(t)` is the current weight from arm k.
"""
# Mixture between the weights and the uniform distribution
trusts = ((1 - self.gamma) * self.weights) + (self.gamma / self.nbArms)
# XXX Handle weird cases, slow down everything but safer!
if not np.all(np.isfinite(trusts)):
trusts[~np.isfinite(trusts)] = 0 # set bad values to 0
# Bad case, where the sum is so small that it's only rounding errors
# or where all values where bad and forced to 0, start with trusts=[1/K...]
if np.isclose(np.sum(trusts), 0):
trusts[:] = 1.0 / self.nbArms
# Normalize it and return it
return trusts / np.sum(trusts)
def getReward(self, arm, reward):
r"""Give a reward: accumulate rewards on that arm k, then update the weight :math:`w_k(t)` and renormalize the weights.
- With unbiased estimators, divide by the trust on that arm k, i.e., the probability of observing arm k: :math:`\tilde{r}_k(t) = \frac{r_k(t)}{\mathrm{trusts}_k(t)}`.
- But with a biased estimators, :math:`\tilde{r}_k(t) = r_k(t)`.
.. math::
w'_k(t+1) &= w_k(t) \times \exp\left( \frac{\tilde{r}_k(t)}{\gamma_t N_k(t)} \right) \\
w(t+1) &= w'(t+1) / \sum_{k=1}^{K} w'_k(t+1).
"""
super(Exp3, self).getReward(arm, reward) # XXX Call to BasePolicy
# Update weight of THIS arm, with this biased or unbiased reward
if self.unbiased:
reward = reward / self.trusts[arm]
# Multiplicative weights
self.weights[arm] *= np.exp(reward * (self.gamma / self.nbArms))
# Renormalize weights at each step
self.weights /= np.sum(self.weights)
# --- Choice methods
def choice(self):
"""One random selection, with probabilities = trusts, thank to :func:`numpy.random.choice`."""
# Force to first visit each arm once in the first steps
if self.t < self.nbArms:
# DONE we could use a random permutation instead of deterministic order!
return self._initial_exploration[self.t]
else:
return rn.choice(self.nbArms, p=self.trusts)
def choiceWithRank(self, rank=1):
"""Multiple (rank >= 1) random selection, with probabilities = trusts, thank to :func:`numpy.random.choice`, and select the last one (less probable).
- Note that if not enough entries in the trust vector are non-zero, then :func:`choice` is called instead (rank is ignored).
"""
if (self.t < self.nbArms) or (rank == 1) or np.sum(~np.isclose(self.trusts, 0)) < rank:
return self.choice()
else:
return rn.choice(self.nbArms, size=rank, replace=False, p=self.trusts)[rank - 1]
def choiceFromSubSet(self, availableArms='all'):
"""One random selection, from availableArms, with probabilities = trusts, thank to :func:`numpy.random.choice`."""
if (self.t < self.nbArms) or (availableArms == 'all') or (len(availableArms) == self.nbArms):
return self.choice()
else:
return rn.choice(availableArms, p=self.trusts[availableArms])
def choiceMultiple(self, nb=1):
"""Multiple (nb >= 1) random selection, with probabilities = trusts, thank to :func:`numpy.random.choice`."""
return rn.choice(self.nbArms, size=nb, replace=False, p=self.trusts)
# --- Other methods
def estimatedOrder(self):
""" Return the estimate order of the arms, as a permutation on [0..K-1] that would order the arms by increasing trust probabilities."""
return np.argsort(self.trusts)
def estimatedBestArms(self, M=1):
""" Return a (non-necessarily sorted) list of the indexes of the M-best arms. Identify the set M-best."""
assert 1 <= M <= self.nbArms, "Error: the parameter 'M' has to be between 1 and K = {}, but it was {} ...".format(self.nbArms, M) # DEBUG
order = self.estimatedOrder()
return order[-M:]
# --- Three special cases
class Exp3WithHorizon(Exp3):
r""" Exp3 with fixed gamma, :math:`\gamma_t = \gamma_0`, chosen with a knowledge of the horizon."""
def __init__(self, nbArms, horizon, unbiased=UNBIASED, lower=0., amplitude=1.):
super(Exp3WithHorizon, self).__init__(nbArms, unbiased=unbiased, lower=lower, amplitude=amplitude)
assert horizon > 0, "Error: the 'horizon' parameter for SoftmaxWithHorizon class has to be > 0."
self.horizon = int(horizon) #: Parameter :math:`T` = known horizon of the experiment.
def __str__(self):
return r"Exp3($T={}$)".format(self.horizon)
# This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property
@property
def gamma(self):
r""" Fixed temperature, small, knowing the horizon: :math:`\gamma_t = \sqrt(\frac{2 \log(K)}{T K})` (*heuristic*).
- Cf. Theorem 3.1 case #1 of [Bubeck & Cesa-Bianchi, 2012](http://sbubeck.com/SurveyBCB12.pdf).
"""
return np.sqrt(2 * np.log(self.nbArms) / (self.horizon * self.nbArms))
class Exp3Decreasing(Exp3):
r""" Exp3 with decreasing parameter :math:`\gamma_t`."""
def __str__(self):
return "Exp3(decreasing)"
# This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property
@property
def gamma(self):
r""" Decreasing gamma with the time: :math:`\gamma_t = \min(\frac{1}{K}, \sqrt(\frac{\log(K)}{t K}))` (*heuristic*).
- Cf. Theorem 3.1 case #2 of [Bubeck & Cesa-Bianchi, 2012](http://sbubeck.com/SurveyBCB12.pdf).
"""
return min(1. / self.nbArms, np.sqrt(np.log(self.nbArms) / (self.t * self.nbArms)))
class Exp3SoftMix(Exp3):
r""" Another Exp3 with decreasing parameter :math:`\gamma_t`."""
def __str__(self):
return "Exp3(SoftMix)"
# This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property
@property
def gamma(self):
r""" Decreasing gamma parameter with the time: :math:`\gamma_t = c \frac{\log(t)}{t}` (*heuristic*).
- Cf. [Cesa-Bianchi & Fisher, 1998](http://dl.acm.org/citation.cfm?id=657473).
- Default value for is :math:`c = \sqrt(\frac{\log(K)}{K})`.
"""
c = np.sqrt(np.log(self.nbArms) / self.nbArms)
return c * np.log(self.t) / self.t
# --- Other variants
DELTA = 0.01 #: Default value for the confidence parameter delta
class Exp3ELM(Exp3):
r""" A variant of Exp3, apparently designed to work better in stochastic environments.
- Reference: [Evaluation and Analysis of the Performance of the EXP3 Algorithm in Stochastic Environments, <NAME> & <NAME> & <NAME> & <NAME>, 2012](http://proceedings.mlr.press/v24/seldin12a/seldin12a.pdf).
"""
def __init__(self, nbArms, delta=DELTA, unbiased=True, lower=0., amplitude=1.):
super(Exp3ELM, self).__init__(nbArms, unbiased=unbiased, lower=lower, amplitude=amplitude)
assert delta > 0, "Error: the 'delta' parameter for Exp3ELM class has to be > 0."
self.delta = delta #: Confidence parameter, given in input
self.B = 4 * (np.exp(2) - 2.) * (2 * np.log(nbArms) + np.log(2. / delta)) #: Constant B given by :math:`B = 4 (e - 2) (2 \log K + \log(2 / \delta))`.
self.availableArms = np.arange(nbArms) #: Set of available arms, starting from all arms, and it can get reduced at each step.
self.varianceTerm = np.zeros(nbArms) #: Estimated variance term, for each arm.
def __str__(self):
return r"Exp3ELM($\delta={:.3g}$)".format(self.delta)
def choice(self):
""" Choose among the remaining arms."""
# Force to first visit each arm once in the first steps
if self.t < self.nbArms:
return self._initial_exploration[self.t]
else:
p = self.trusts[self.availableArms]
return rn.choice(self.availableArms, p=p / np.sum(p))
def getReward(self, arm, reward):
r""" Get reward and update the weights, as in Exp3, but also update the variance term :math:`V_k(t)` for all arms, and the set of available arms :math:`\mathcal{A}(t)`, by removing arms whose empirical accumulated reward and variance term satisfy a certain inequality.
.. math::
a^*(t+1) &= \arg\max_a \hat{R}_{a}(t+1), \\
V_k(t+1) &= V_k(t) + \frac{1}{\mathrm{trusts}_k(t+1)}, \\
\mathcal{A}(t+1) &= \mathcal{A}(t) \setminus \left\{ a : \hat{R}_{a^*(t+1)}(t+1) - \hat{R}_{a}(t+1) > \sqrt{B (V_{a^*(t+1)}(t+1) + V_{a}(t+1))} \right\}.
"""
assert arm in self.availableArms, "Error: at time {}, the arm {} was played by Exp3ELM but it is not in the set of remaining arms {}...".format(self.t, arm, self.availableArms) # DEBUG
# First, use the reward to update the weights
self.t += 1
self.pulls[arm] += 1
reward = (reward - self.lower) / self.amplitude
# Update weight of THIS arm, with this biased or unbiased reward
if self.unbiased:
reward = reward / self.trusts[arm]
self.rewards[arm] += reward
# Multiplicative weights
self.weights[arm] *= np.exp(reward * self.gamma)
# Renormalize weights at each step
self.weights[self.availableArms] /= np.sum(self.weights[self.availableArms])
# Then update the variance
self.varianceTerm[self.availableArms] += 1. / self.trusts[self.availableArms]
# And update the set of available arms
a_star = np.argmax(self.rewards[self.availableArms])
# print("- Exp3ELM identified the arm of best accumulated rewards to be {}, at time {} ...".format(a_star, self.t)) # DEBUG
test = (self.rewards[a_star] - self.rewards[self.availableArms]) > np.sqrt(self.B * (self.varianceTerm[a_star] + self.varianceTerm[self.availableArms]))
badArms = np.where(test)[0]
# Do we have bad arms ? If yes, remove them
if len(badArms) > 0:
print("- Exp3ELM identified these arms to be bad at time {} : {}, removing them from the set of available arms ...".format(self.t, badArms)) # DEBUG
self.availableArms = np.setdiff1d(self.availableArms, badArms)
# # DEBUG
# print("- Exp3ELM at time {} as this internal memory:\n - B = {} and delta = {}\n - Pulls {}\n - Rewards {}\n - Weights {}\n - Variance {}\n - Trusts {}\n - a_star {}\n - Left part of test {}\n - Right part of test {}\n - test {}\n - Bad arms {}\n - Available arms {}".format(self.t, self.B, self.delta, self.pulls, self.rewards, self.weights, self.varianceTerm, self.trusts, a_star, (self.rewards[a_star] - self.rewards[self.availableArms]), np.sqrt(self.B * (self.varianceTerm[a_star] + self.varianceTerm[self.availableArms])), test, badArms, self.availableArms)) # DEBUG
# print(input("[Enter to keep going on]")) # DEBUG
# --- Trusts and gamma coefficient
@property
def trusts(self):
r""" Update the trusts probabilities according to Exp3ELM formula, and the parameter :math:`\gamma_t`.
.. math::
\mathrm{trusts}'_k(t+1) &= (1 - |\mathcal{A}_t| \gamma_t) w_k(t) + \gamma_t, \\
\mathrm{trusts}(t+1) &= \mathrm{trusts}'(t+1) / \sum_{k=1}^{K} \mathrm{trusts}'_k(t+1).
If :math:`w_k(t)` is the current weight from arm k.
"""
# Mixture between the weights and the uniform distribution
trusts = ((1 - self.gamma * len(self.availableArms)) * self.weights[self.availableArms]) + self.gamma
# XXX Handle weird cases, slow down everything but safer!
if not np.all(np.isfinite(trusts)):
# XXX some value has non-finite trust, probably on the first steps
# 1st case: all values are non-finite (nan): set trusts to 1/N uniform choice
if np.all(~np.isfinite(trusts)):
trusts = np.full(len(self.availableArms), 1. / len(self.availableArms))
# 2nd case: only few values are non-finite: set them to 0
else:
trusts[~np.isfinite(trusts)] = 0
# Bad case, where the sum is so small that it's only rounding errors
if np.isclose(np.sum(trusts), 0):
trusts = np.full(len(self.availableArms), 1. / len(self.availableArms))
# Normalize it and return it
# return trusts
return trusts / np.sum(trusts[self.availableArms])
# This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property
@property
def gamma(self):
r""" Decreasing gamma with the time: :math:`\gamma_t = \min(\frac{1}{K}, \sqrt(\frac{\log(K)}{t K}))` (*heuristic*).
- Cf. Theorem 3.1 case #2 of [Bubeck & Cesa-Bianchi, 2012](http://sbubeck.com/SurveyBCB12.pdf).
"""
return min(1. / self.nbArms, np.sqrt(np.log(self.nbArms) / (self.t * self.nbArms)))
|
"""Module for DataArray accessor classes."""
__all__ = ["add_accessors"]
# standard library
from collections import defaultdict
from functools import lru_cache
from itertools import chain
from inspect import getsource, signature
from re import sub
from textwrap import dedent
from types import FunctionType
from typing import Any, Callable, List, Optional
from uuid import uuid4
# dependencies
from xarray import DataArray, register_dataarray_accessor
# main features
def add_accessors(cls: type, name: Optional[str] = None) -> type:
"""Add unique and common accessors to a DataArray class.
Args:
cls: DataArray class to which accessors are added.
name: Name of a common accessor. If not specified,
only an unique accessor is added to the class.
Returns:
The same DataArray class as the input.
"""
class UniqueAccessor(UniqueAccessorBase):
_dataarrayclass = cls
class CommonAccessor(CommonAccessorBase):
_dataarrayclass = cls
_name = name
return cls
# helper features
class CommonAccessorBase:
"""Base class for common accessors of DataArray classes."""
_dataarrayclasses = defaultdict(list)
_dataarrayclass: type
_name: str
def __init_subclass__(cls):
"""Initialize a subclass with a bound DataArray class."""
if not cls._name:
return
if cls._name not in cls._dataarrayclasses:
register_dataarray_accessor(cls._name)(cls)
cls._dataarrayclasses[cls._name].insert(0, cls._dataarrayclass)
def __init__(self, dataarray: DataArray) -> None:
"""Initialize an instance with a DataArray to be accessed."""
self._dataarray = dataarray
def __getattr__(self, name: str) -> Any:
"""Get a method or an attribute of the DataArray class."""
for dataarrayclass in self._dataarrayclasses[self._name]:
bound = dataarrayclass._accessor(self._dataarray)
if hasattr(bound, name):
return getattr(bound, name)
raise AttributeError(f"Any DataArray class has no attribute {name!r}")
def __dir__(self) -> List[str]:
"""List names in the union namespace of DataArray classes."""
dirs = map(dir, self._dataarrayclasses[self._name])
return list(set(chain.from_iterable(dirs)))
class UniqueAccessorBase:
"""Base class for unique accessors of DataArray classes."""
_dataarrayclass: type
_name: str
def __init_subclass__(cls) -> None:
"""Initialize a subclass with a bound DataArray class."""
cls._dataarrayclass._accessor = cls
cls._name = "_accessor_" + uuid4().hex[:16]
register_dataarray_accessor(cls._name)(cls)
def __init__(self, dataarray: DataArray) -> None:
"""Initialize an instance with a DataArray to be accessed."""
self._dataarray = dataarray
@lru_cache(None)
def __bind_function(self, func: Callable) -> Callable:
"""Convert a function to a method of an instance."""
first_arg = list(signature(func).parameters)[0]
pattern = rf"(?<!\w){first_arg}\."
repl = rf"{first_arg}.{self._name}."
source = dedent(getsource(func))
exec(sub(pattern, repl, source), func.__globals__, locals())
return locals()[func.__name__].__get__(self._dataarray)
def __getattr__(self, name: str) -> Any:
"""Get a method or an attribute of the DataArray class."""
try:
return getattr(self._dataarray, name)
except AttributeError:
obj = getattr(self._dataarrayclass, name)
if isinstance(obj, FunctionType):
return self.__bind_function(obj)
if isinstance(obj, property):
return self.__bind_function(obj.fget)
return obj
def __dir__(self) -> List[str]:
"""List names in the namespace of the DataArray class."""
return dir(self._dataarrayclass)
|
<gh_stars>0
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The value of the flag is
the container where the versions are stored (commonly referred to as the
"archive container"). The flag itself is one of two headers, which determines
how object ``DELETE`` requests are handled:
* ``X-History-Location``
On ``DELETE``, copy the current version of the object to the archive
container, write a zero-byte "delete marker" object that notes when the
delete took place, and delete the object from the versioned container. The
object will no longer appear in container listings for the versioned
container and future requests there will return ``404 Not Found``. However,
the content will still be recoverable from the archive container.
* ``X-Versions-Location``
On ``DELETE``, only remove the current version of the object. If any
previous versions exist in the archive container, the most recent one is
copied over the current version, and the copy in the archive container is
deleted. As a result, if you have 5 total versions of the object, you must
delete the object 5 times for that object name to start responding with
``404 Not Found``.
Either header may be used for the various containers within an account, but
only one may be set for any given container. Attempting to set both
simulataneously will result in a ``400 Bad Request`` response.
.. note::
It is recommended to use a different archive container for
each container that is being versioned.
.. note::
Enabling versioning on an archive container is not recommended.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object in the archive container and the data in the ``PUT`` request is
saved as the data for the versioned object. The new object name (for the
previous version) is ``<archive_container>/<length><object_name>/<timestamp>``,
where ``length`` is the 3-character zero-padded hexadecimal length of the
``<object_name>`` and ``<timestamp>`` is the timestamp of when the previous
version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will be handled in one of two ways,
as described above.
To restore a previous version of an object, find the desired version in the
archive container then issue a ``COPY`` with a ``Destination`` header
indicating the original location. This will archive the current version similar
to a ``PUT`` over the versioned object. If the the client additionally wishes
to permanently delete what was the current version, it must find the
newly-created archive in the archive container and issue a separate ``DELETE``
to it.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
This middleware was written as an effort to refactor parts of the proxy server,
so this functionality was already available in previous releases and every
attempt was made to maintain backwards compatibility. To allow operators to
perform a seamless upgrade, it is not required to add the middleware to the
proxy pipeline and the flag ``allow_versions`` in the container server
configuration files are still valid, but only when using
``X-Versions-Location``. In future releases, ``allow_versions`` will be
deprecated in favor of adding this middleware to the pipeline to enable or
disable the feature.
In case the middleware is added to the proxy pipeline, you must also
set ``allow_versioned_writes`` to ``True`` in the middleware options
to enable the information about this middleware to be returned in a /info
request.
.. note::
You need to add the middleware to the proxy pipeline and set
``allow_versioned_writes = True`` to use ``X-History-Location``. Setting
``allow_versions = True`` in the container server is not sufficient to
enable the use of ``X-History-Location``.
Upgrade considerations:
+++++++++++++++++++++++
If ``allow_versioned_writes`` is set in the filter configuration, you can leave
the ``allow_versions`` flag in the container server configuration files
untouched. If you decide to disable or remove the ``allow_versions`` flag, you
must re-set any existing containers that had the ``X-Versions-Location`` flag
configured so that it can now be tracked by the versioned_writes middleware.
Clients should not use the ``X-History-Location`` header until all proxies in
the cluster have been upgraded to a version of Swift that supports it.
Attempting to use ``X-History-Location`` during a rolling upgrade may result
in some requests being served by proxies running old code, leading to data
loss.
----------------------------------------------------
Examples Using ``curl`` with ``X-Versions-Location``
----------------------------------------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone from 'versions' container and back in 'container' container::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
curl -i -XGET -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
---------------------------------------------------
Examples Using ``curl`` with ``X-History-Location``
---------------------------------------------------
As above, create a container with the ``X-History-Location`` header and ensure
that the container referenced by the ``X-History-Location`` exists. In this
example, the name of that container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-History-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now delete the current version of the object. Subsequent requests will 404::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
A listing of the older versions of the object will include both the first and
second versions of the object, as well as a "delete marker" object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
To restore a previous version, simply ``COPY`` it from the archive container::
curl -i -XCOPY -H "X-Auth-Token: <token>" \
http://<storage_url>/versions/008myobject/<timestamp> \
-H "Destination: container/myobject"
Note that the archive container still has all previous versions of the object,
including the source for the restore::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
To permanently delete a previous version, ``DELETE`` it from the archive
container::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/versions/008myobject/<timestamp>
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versioned_writes`` to
``False`` in the middleware options.
Disable versioning from a container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
"""
import calendar
import json
from six.moves.urllib.parse import quote, unquote
import time
from swift.common.utils import get_logger, Timestamp, \
register_swift_info, config_true_value, close_if_possible, FileLikeIter
from swift.common.request_helpers import get_sys_meta_prefix, \
copy_header_subset
from swift.common.wsgi import WSGIContext, make_pre_authed_request
from swift.common.swob import (
Request, HTTPException, HTTPRequestEntityTooLarge)
from swift.common.constraints import check_container_format, MAX_FILE_SIZE
from swift.proxy.controllers.base import get_container_info
from swift.common.http import (
is_success, is_client_error, HTTP_NOT_FOUND)
from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \
HTTPServerError, HTTPBadRequest
from swift.common.exceptions import (
ListingIterNotFound, ListingIterError)
DELETE_MARKER_CONTENT_TYPE = 'application/x-deleted;swift_versions_deleted=1'
CLIENT_VERSIONS_LOC = 'x-versions-location'
CLIENT_HISTORY_LOC = 'x-history-location'
SYSMETA_VERSIONS_LOC = get_sys_meta_prefix('container') + 'versions-location'
SYSMETA_VERSIONS_MODE = get_sys_meta_prefix('container') + 'versions-mode'
class VersionedWritesContext(WSGIContext):
def __init__(self, wsgi_app, logger):
WSGIContext.__init__(self, wsgi_app)
self.logger = logger
def _listing_iter(self, account_name, lcontainer, lprefix, req):
try:
for page in self._listing_pages_iter(account_name, lcontainer,
lprefix, req.environ):
for item in page:
yield item
except ListingIterNotFound:
pass
except HTTPPreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except ListingIterError:
raise HTTPServerError(request=req)
def _in_proxy_reverse_listing(self, account_name, lcontainer, lprefix,
env, failed_marker, failed_listing):
'''Get the complete prefix listing and reverse it on the proxy.
This is only necessary if we encounter a response from a
container-server that does not respect the ``reverse`` param
included by default in ``_listing_pages_iter``. This may happen
during rolling upgrades from pre-2.6.0 swift.
:param failed_marker: the marker that was used when we encountered
the non-reversed listing
:param failed_listing: the non-reversed listing that was encountered.
If ``failed_marker`` is blank, we can use this
to save ourselves a request
:returns: an iterator over all objects starting with ``lprefix`` (up
to but not including the failed marker) in reverse order
'''
complete_listing = []
if not failed_marker:
# We've never gotten a reversed listing. So save a request and
# use the failed listing.
complete_listing.extend(failed_listing)
marker = complete_listing[-1]['name'].encode('utf8')
else:
# We've gotten at least one reversed listing. Have to start at
# the beginning.
marker = ''
# First, take the *entire* prefix listing into memory
try:
for page in self._listing_pages_iter(
account_name, lcontainer, lprefix,
env, marker, end_marker=failed_marker, reverse=False):
complete_listing.extend(page)
except ListingIterNotFound:
pass
# Now that we've got everything, return the whole listing as one giant
# reversed page
return reversed(complete_listing)
def _listing_pages_iter(self, account_name, lcontainer, lprefix,
env, marker='', end_marker='', reverse=True):
'''Get "pages" worth of objects that start with a prefix.
The optional keyword arguments ``marker``, ``end_marker``, and
``reverse`` are used similar to how they are for containers. We're
either coming:
- directly from ``_listing_iter``, in which case none of the
optional args are specified, or
- from ``_in_proxy_reverse_listing``, in which case ``reverse``
is ``False`` and both ``marker`` and ``end_marker`` are specified
(although they may still be blank).
'''
while True:
lreq = make_pre_authed_request(
env, method='GET', swift_source='VW',
path='/v1/%s/%s' % (account_name, lcontainer))
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (
quote(lprefix), quote(marker))
if end_marker:
lreq.environ['QUERY_STRING'] += '&end_marker=%s' % (
quote(end_marker))
if reverse:
lreq.environ['QUERY_STRING'] += '&reverse=on'
lresp = lreq.get_response(self.app)
if not is_success(lresp.status_int):
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif is_client_error(lresp.status_int):
raise HTTPPreconditionFailed()
else:
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
# When using the ``reverse`` param, check that the listing is
# actually reversed
first_item = sublisting[0]['name'].encode('utf-8')
last_item = sublisting[-1]['name'].encode('utf-8')
page_is_after_marker = marker and first_item > marker
if reverse and (first_item < last_item or page_is_after_marker):
# Apparently there's at least one pre-2.6.0 container server
yield self._in_proxy_reverse_listing(
account_name, lcontainer, lprefix,
env, marker, sublisting)
return
marker = last_item
yield sublisting
def _get_source_object(self, req, path_info):
# make a pre_auth request in case the user has write access
# to container, but not READ. This was allowed in previous version
# (i.e., before middleware) so keeping the same behavior here
get_req = make_pre_authed_request(
req.environ, path=path_info,
headers={'X-Newest': 'True'}, method='GET', swift_source='VW')
source_resp = get_req.get_response(self.app)
if source_resp.content_length is None or \
source_resp.content_length > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
return source_resp
def _put_versioned_obj(self, req, put_path_info, source_resp):
# Create a new Request object to PUT to the versions container, copying
# all headers from the source object apart from x-timestamp.
put_req = make_pre_authed_request(
req.environ, path=put_path_info, method='PUT',
swift_source='VW')
copy_header_subset(source_resp, put_req,
lambda k: k.lower() != 'x-timestamp')
put_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter)
return put_req.get_response(self.app)
def _check_response_error(self, req, resp):
"""
Raise Error Response in case of error
"""
if is_success(resp.status_int):
return
if is_client_error(resp.status_int):
# missing container or bad permissions
raise HTTPPreconditionFailed(request=req)
# could not version the data, bail
raise HTTPServiceUnavailable(request=req)
def _build_versions_object_prefix(self, object_name):
return '%03x%s/' % (
len(object_name),
object_name)
def _build_versions_object_name(self, object_name, ts):
return ''.join((
self._build_versions_object_prefix(object_name),
Timestamp(ts).internal))
def _copy_current(self, req, versions_cont, api_version, account_name,
object_name):
# validate the write access to the versioned container before
# making any backend requests
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app)
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
raise aresp
get_resp = self._get_source_object(req, req.path_info)
if 'X-Object-Manifest' in get_resp.headers:
# do not version DLO manifest, proceed with original request
close_if_possible(get_resp.app_iter)
return
if get_resp.status_int == HTTP_NOT_FOUND:
# nothing to version, proceed with original request
close_if_possible(get_resp.app_iter)
return
# check for any other errors
self._check_response_error(req, get_resp)
# if there's an existing object, then copy it to
# X-Versions-Location
ts_source = get_resp.headers.get(
'x-timestamp',
calendar.timegm(time.strptime(
get_resp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT')))
vers_obj_name = self._build_versions_object_name(
object_name, ts_source)
put_path_info = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont, vers_obj_name)
put_resp = self._put_versioned_obj(req, put_path_info, get_resp)
self._check_response_error(req, put_resp)
def handle_obj_versions_put(self, req, versions_cont, api_version,
account_name, object_name):
"""
Copy current version of object to versions_container before proceding
with original request.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param object_name: name of object of original request
"""
if 'X-Object-Manifest' in req.headers:
# do not version DLO manifest, proceed with original request
return self.app
self._copy_current(req, versions_cont, api_version, account_name,
object_name)
return self.app
def handle_obj_versions_delete_push(self, req, versions_cont, api_version,
account_name, container_name,
object_name):
"""
Handle DELETE requests when in history mode.
Copy current version of object to versions_container and write a
delete marker before proceding with original request.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param object_name: name of object of original request
"""
self._copy_current(req, versions_cont, api_version, account_name,
object_name)
marker_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont,
self._build_versions_object_name(object_name, time.time()))
marker_headers = {
# Definitive source of truth is Content-Type, and since we add
# a swift_* param, we know users haven't set it themselves.
# This is still open to users POSTing to update the content-type
# but they're just shooting themselves in the foot then.
'content-type': DELETE_MARKER_CONTENT_TYPE,
'content-length': '0',
'x-auth-token': req.headers.get('x-auth-token')}
marker_req = make_pre_authed_request(
req.environ, path=marker_path,
headers=marker_headers, method='PUT', swift_source='VW')
marker_req.environ['swift.content_type_overridden'] = True
marker_resp = marker_req.get_response(self.app)
self._check_response_error(req, marker_resp)
# successfully copied and created delete marker; safe to delete
return self.app
def _restore_data(self, req, versions_cont, api_version, account_name,
container_name, object_name, prev_obj_name):
get_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont, prev_obj_name)
get_resp = self._get_source_object(req, get_path)
# if the version isn't there, keep trying with previous version
if get_resp.status_int == HTTP_NOT_FOUND:
return False
self._check_response_error(req, get_resp)
put_path_info = "/%s/%s/%s/%s" % (
api_version, account_name, container_name, object_name)
put_resp = self._put_versioned_obj(
req, put_path_info, get_resp)
self._check_response_error(req, put_resp)
return get_path
def handle_obj_versions_delete_pop(self, req, versions_cont, api_version,
account_name, container_name,
object_name):
"""
Handle DELETE requests when in stack mode.
Delete current version of object and pop previous version in its place.
:param req: original request.
:param versions_cont: container where previous versions of the object
are stored.
:param api_version: api version.
:param account_name: account name.
:param container_name: container name.
:param object_name: object name.
"""
listing_prefix = self._build_versions_object_prefix(object_name)
item_iter = self._listing_iter(account_name, versions_cont,
listing_prefix, req)
auth_token_header = {'X-Auth-Token': req.headers.get('X-Auth-Token')}
authed = False
for previous_version in item_iter:
if not authed:
# validate the write access to the versioned container before
# making any backend requests
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app)
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
authed = True
if previous_version['content_type'] == DELETE_MARKER_CONTENT_TYPE:
# check whether we have data in the versioned container
obj_head_headers = {'X-Newest': 'True'}
obj_head_headers.update(auth_token_header)
head_req = make_pre_authed_request(
req.environ, path=req.path_info, method='HEAD',
headers=obj_head_headers, swift_source='VW')
hresp = head_req.get_response(self.app)
if hresp.status_int != HTTP_NOT_FOUND:
self._check_response_error(req, hresp)
# if there's an existing object, then just let the delete
# through (i.e., restore to the delete-marker state):
break
# no data currently in the container (delete marker is current)
for version_to_restore in item_iter:
if version_to_restore['content_type'] == \
DELETE_MARKER_CONTENT_TYPE:
# Nothing to restore
break
prev_obj_name = version_to_restore['name'].encode('utf-8')
restored_path = self._restore_data(
req, versions_cont, api_version, account_name,
container_name, object_name, prev_obj_name)
if not restored_path:
continue
old_del_req = make_pre_authed_request(
req.environ, path=restored_path, method='DELETE',
headers=auth_token_header, swift_source='VW')
del_resp = old_del_req.get_response(self.app)
if del_resp.status_int != HTTP_NOT_FOUND:
self._check_response_error(req, del_resp)
# else, well, it existed long enough to do the
# copy; we won't worry too much
break
marker_path = "/%s/%s/%s/%s" % (
api_version, account_name, versions_cont,
previous_version['name'].encode('utf-8'))
# done restoring, redirect the delete to the marker
req = make_pre_authed_request(
req.environ, path=marker_path, method='DELETE',
headers=auth_token_header, swift_source='VW')
else:
# there are older versions so copy the previous version to the
# current object and delete the previous version
prev_obj_name = previous_version['name'].encode('utf-8')
restored_path = self._restore_data(
req, versions_cont, api_version, account_name,
container_name, object_name, prev_obj_name)
if not restored_path:
continue
# redirect the original DELETE to the source of the reinstated
# version object - we already auth'd original req so make a
# pre-authed request
req = make_pre_authed_request(
req.environ, path=restored_path, method='DELETE',
headers=auth_token_header, swift_source='VW')
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
break
# handle DELETE request here in case it was modified
return req.get_response(self.app)
def handle_container_request(self, env, start_response):
app_resp = self._app_call(env)
if self._response_headers is None:
self._response_headers = []
mode = location = ''
for key, val in self._response_headers:
if key.lower() == SYSMETA_VERSIONS_LOC:
location = val
elif key.lower() == SYSMETA_VERSIONS_MODE:
mode = val
if location:
if mode == 'history':
self._response_headers.extend([
(CLIENT_HISTORY_LOC.title(), location)])
else:
self._response_headers.extend([
(CLIENT_VERSIONS_LOC.title(), location)])
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class VersionedWritesMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='versioned_writes')
def container_request(self, req, start_response, enabled):
if CLIENT_VERSIONS_LOC in req.headers and \
CLIENT_HISTORY_LOC in req.headers:
if not req.headers[CLIENT_HISTORY_LOC]:
# defer to versions location entirely
del req.headers[CLIENT_HISTORY_LOC]
elif req.headers[CLIENT_VERSIONS_LOC]:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='Only one of %s or %s may be specified' % (
CLIENT_VERSIONS_LOC, CLIENT_HISTORY_LOC))
else:
# history location is present and versions location is
# present but empty -- clean it up
del req.headers[CLIENT_VERSIONS_LOC]
if CLIENT_VERSIONS_LOC in req.headers or \
CLIENT_HISTORY_LOC in req.headers:
if CLIENT_VERSIONS_LOC in req.headers:
val = req.headers[CLIENT_VERSIONS_LOC]
mode = 'stack'
else:
val = req.headers[CLIENT_HISTORY_LOC]
mode = 'history'
if not val:
# empty value is the same as X-Remove-Versions-Location
req.headers['X-Remove-Versions-Location'] = 'x'
elif not config_true_value(enabled) and \
req.method in ('PUT', 'POST'):
# differently from previous version, we are actually
# returning an error if user tries to set versions location
# while feature is explicitly disabled.
raise HTTPPreconditionFailed(
request=req, content_type='text/plain',
body='Versioned Writes is disabled')
else:
# OK, we received a value, have versioning enabled, and aren't
# trying to set two modes at once. Validate the value and
# translate to sysmeta.
location = check_container_format(req, val)
req.headers[SYSMETA_VERSIONS_LOC] = location
req.headers[SYSMETA_VERSIONS_MODE] = mode
# reset original header on container server to maintain sanity
# now only sysmeta is source of Versions Location
req.headers[CLIENT_VERSIONS_LOC] = ''
# if both add and remove headers are in the same request
# adding location takes precedence over removing
for header in ['X-Remove-Versions-Location',
'X-Remove-History-Location']:
if header in req.headers:
del req.headers[header]
if any(req.headers.get(header) for header in [
'X-Remove-Versions-Location',
'X-Remove-History-Location']):
req.headers.update({CLIENT_VERSIONS_LOC: '',
SYSMETA_VERSIONS_LOC: '',
SYSMETA_VERSIONS_MODE: ''})
for header in ['X-Remove-Versions-Location',
'X-Remove-History-Location']:
if header in req.headers:
del req.headers[header]
# send request and translate sysmeta headers from response
vw_ctx = VersionedWritesContext(self.app, self.logger)
return vw_ctx.handle_container_request(req.environ, start_response)
def object_request(self, req, api_version, account, container, obj,
allow_versioned_writes):
account_name = unquote(account)
container_name = unquote(container)
object_name = unquote(obj)
resp = None
is_enabled = config_true_value(allow_versioned_writes)
container_info = get_container_info(
req.environ, self.app)
# To maintain backwards compatibility, container version
# location could be stored as sysmeta or not, need to check both.
# If stored as sysmeta, check if middleware is enabled. If sysmeta
# is not set, but versions property is set in container_info, then
# for backwards compatibility feature is enabled.
versions_cont = container_info.get(
'sysmeta', {}).get('versions-location')
versioning_mode = container_info.get(
'sysmeta', {}).get('versions-mode', 'stack')
if not versions_cont:
versions_cont = container_info.get('versions')
# if allow_versioned_writes is not set in the configuration files
# but 'versions' is configured, enable feature to maintain
# backwards compatibility
if not allow_versioned_writes and versions_cont:
is_enabled = True
if is_enabled and versions_cont:
versions_cont = unquote(versions_cont).split('/')[0]
vw_ctx = VersionedWritesContext(self.app, self.logger)
if req.method == 'PUT':
resp = vw_ctx.handle_obj_versions_put(
req, versions_cont, api_version, account_name,
object_name)
# handle DELETE
elif versioning_mode == 'history':
resp = vw_ctx.handle_obj_versions_delete_push(
req, versions_cont, api_version, account_name,
container_name, object_name)
else:
resp = vw_ctx.handle_obj_versions_delete_pop(
req, versions_cont, api_version, account_name,
container_name, object_name)
if resp:
return resp
else:
return self.app
def __call__(self, env, start_response):
req = Request(env)
try:
(api_version, account, container, obj) = req.split_path(3, 4, True)
except ValueError:
return self.app(env, start_response)
# In case allow_versioned_writes is set in the filter configuration,
# the middleware becomes the authority on whether object
# versioning is enabled or not. In case it is not set, then
# the option in the container configuration is still checked
# for backwards compatibility
# For a container request, first just check if option is set,
# can be either true or false.
# If set, check if enabled when actually trying to set container
# header. If not set, let request be handled by container server
# for backwards compatibility.
# For an object request, also check if option is set (either T or F).
# If set, check if enabled when checking versions container in
# sysmeta property. If it is not set check 'versions' property in
# container_info
allow_versioned_writes = self.conf.get('allow_versioned_writes')
if allow_versioned_writes and container and not obj:
try:
return self.container_request(req, start_response,
allow_versioned_writes)
except HTTPException as error_response:
return error_response(env, start_response)
elif (obj and req.method in ('PUT', 'DELETE') and
not req.environ.get('swift.post_as_copy')):
try:
return self.object_request(
req, api_version, account, container, obj,
allow_versioned_writes)(env, start_response)
except HTTPException as error_response:
return error_response(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
if config_true_value(conf.get('allow_versioned_writes')):
register_swift_info('versioned_writes', allowed_flags=(
CLIENT_VERSIONS_LOC, CLIENT_HISTORY_LOC))
def obj_versions_filter(app):
return VersionedWritesMiddleware(app, conf)
return obj_versions_filter
|
<gh_stars>0
from django.db import models
from enum import Enum
# Create your models here.
class Student(models.Model):
'''
เก็บข้อมูลรายชื่อนักเรียน
first_name ชื่อนักเรียน
last_name นามสกุลนักเรียน
code รหัสนักเรียน
sex เพศ
'''
class SexChoiceEnum(Enum):
male = '1'
fremale = '2'
@classmethod
def choices(cls):
return tuple((i.value, i.name) for i in cls)
first_name = models.CharField(max_length=200,blank=True, null=True)
last_name = models.CharField(max_length=200,blank=True, null=True)
code = models.CharField(max_length=200,blank=True, null=True)
sex = models.CharField(max_length=1, choices=SexChoiceEnum.choices(),blank=True, null=True)
def __str__(self):
return r"%s %s" %(self.first_name, self.last_name)
class Room (models.Model):
'''
เก็บข้อมูลห้องเรียนชั้นนักเรียน
name ชื่อห้องเรียนชั้นนักเรียน
description รายละเอียด
'''
name = models.CharField(max_length=200,blank=True, null=True)
description = models.CharField(max_length=200,blank=True, null=True)
def __str__(self):
return r"%s" %(self.name)
class Teacher(models.Model):
'''
เก็บข้อมูลรายชื่อนักเรียน
name ชื่อครู
description รายละเอียด
telephone เบอร์โทร
'''
name = models.CharField(max_length=200,blank=True, null=True)
description = models.CharField(max_length=200,blank=True, null=True)
telephone = models.CharField(max_length=200,blank=True, null=True)
def __str__(self):
return r"%s" %(self.name)
class Absent(models.Model):
'''
เก็บข้อมูลการมา / ไม่มา เรียน และต้องการให้ส่ง ไปยัง line message หรือไม่
name ชื่อรายการ มา / ไม่มาเรียน
description รายละเอียด
send_alert ส่ง หรือ ไม่ส่ง ถ้าเป็น True ให้ส่งไปยัง line message, False ไม่ต้องส่ง
'''
name = models.CharField(max_length=200,blank=True, null=True)
description = models.CharField(max_length=200,blank=True, null=True)
send_alert = models.BooleanField(default=False,blank=True, null=True)
def __str__(self):
return r"%s" %(self.name)
class Subject(models.Model):
'''
เก็บข้อมูลวิชาที่มีการสอน
name ชื่อวิชา
description รายละเอียด
'''
name = models.CharField(max_length=200,blank=True, null=True)
description = models.CharField(max_length=200,blank=True, null=True)
def __str__(self):
return r"%s" %(self.name)
class StudentInRoom(models.Model):
'''
เก็บข้อมูลห้องเรียน กับรายชื่อนักเรียน
room ห้องเรียน
student ชื่อนักเรียน
current_year ประจำปี พ.ศ.
'''
room = models.ForeignKey(Room, on_delete=models.CASCADE,blank=True, null=True )
student = models.ForeignKey(Student, on_delete=models.CASCADE,blank=True, null=True )
current_year = models.IntegerField(blank=True, null=True,default=2561)
def __str__(self):
return r"%s (%s)" %(self.room, self.student)
class TeacherInRoom(models.Model):
'''
เก็บข้อมูล เงื่อนไข ของ วันที่สอน คาบ ครู วิชา ห้องเรียน เพื่อให้สามารถระบุนักเรียน
teach_date วันที่สอน
time คาบที่สอน
subject วิชาที่สอน
teacher ครูที่สอน
room ห้องที่สอน
'''
teach_date = models.DateField(blank=True, null=True)
time = models.CharField(max_length=200,blank=True, null=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, blank=True, null=True)
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, blank=True, null=True)
room = models.ForeignKey(Room, on_delete=models.CASCADE,blank=True, null=True )
def __str__(self):
return r"%s %s (%s)" % (self.teach_date, self.subject, self.teacher)
class StudentAbsent(models.Model):
'''
เก็บข้อมูลรายชื่อนักเรียนที่มา / ไม่มา ตามเงื่อนไขของ วันที่สอน คาบ ครู วิชา ห้องเรียน
teacherinroom เงื่อนไขของ วันที่สอน คาบ ครู วิชา ห้องเรียน
student ชื่อนักเรียน
absent สถานะการมา / ไม่มาเรียน
'''
teacherinroom = models.ForeignKey(TeacherInRoom, on_delete=models.CASCADE, blank=True, null=True)
student = models.ForeignKey(Student, on_delete=models.CASCADE, blank=True, null=True)
absent = models.ForeignKey(Absent, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return r"%s %s" % (self.student, self.absent)
|
<gh_stars>0
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
product_dict = {
'A': 50,
'B': 30,
'C': 20,
'D': 15,
'E': 40,
'F': 10,
'G': 20,
'H': 10,
'I': 35,
'J': 60,
'K': 70,
'L': 90,
'M': 15,
'N': 40,
'O': 10,
'P': 50,
'Q': 30,
'R': 50,
'S': 20,
'T': 20,
'U': 40,
'V': 50,
'W': 20,
'X': 17,
'Y': 20,
'Z': 21,
}
special_offers1 = {
'A': ((3,130),(5,200)),
'B': ((2,45),),
'H': ((5,45),(10,80)),
'K': ((2,120),),
'P': ((5,200),),
'Q': ((3,80),),
'V': ((2,90),(3,130)),
}
special_offers2 = {
'F': (2,1),
'U': (3,1)
}
special_offers3 = {
'E': (2,'B'),
'N': (3,'M'),
'R': (3,'Q')
}
cost = 0
product_amounts = {
'A': 0,
'B': 0,
'C': 0,
'D': 0,
'E': 0,
'F': 0,
'G': 0,
'H': 0,
'I': 0,
'J': 0,
'K': 0,
'L': 0,
'M': 0,
'N': 0,
'O': 0,
'P': 0,
'Q': 0,
'R': 0,
'S': 0,
'T': 0,
'U': 0,
'V': 0,
'W': 0,
'X': 0,
'Y': 0,
'Z': 0,
}
prod_list = ['A','C','D','E','B','F']
for i in range(len(skus)):
curr = skus[i]
if curr not in product_dict:
return -1
if curr not in product_amounts:
product_amounts[curr] = 1
else:
product_amounts[curr] += 1
for key in '<KEY>':
if key in special_offers1:
val = product_amounts[key]
if val>=0:
offers = special_offers1[key]
#print(offers)
for i in range(len(offers)-1,-1,-1):
cost += (val // offers[i][0])*offers[i][1]
val = val % offers[i][0]
cost += val*product_dict[key]
elif key in special_offers2:
if val>=0:
val = product_amounts[key]
offers = special_offers2[key]
cost += (val//(offers[0]+1)) * offers[0]*product_dict[key]
val = val%(offers[0]+1)
cost += val*product_dict[key]
elif key in special_offers3:
key_to_reduce = special_offers3[key][1]
multiple = special_offers3[key][0]
product_amounts[key_to_reduce] -= product_amounts[key]//multiple
cost += product_amounts[key] * product_dict[key]
else:
val = product_amounts[key]
if val>=0:
cost += val*product_dict[key]
special_offer4_num = 0
for key in 'ZYTSX':
special_offer4_num += product_amounts[key]
cost += (special_offer4_num // 3) * 45
rest = special_offer4_num % 3
#while rest > 0:
for key in 'XSTYZ':
curr = product_amounts[key]
if rest<=curr:
cost += product_dict[key] * rest
break
else:
cost += product_dict[key] * curr
rest -= curr
return cost
|
<reponame>elephanting/minimal-hand
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import cv2
def plot3d(joints_,ax, title=None):
joints = joints_.copy()
ax.plot(joints[:, 0], joints[:, 1], joints[:, 2], 'yo', label='keypoint')
ax.plot(joints[:5, 0], joints[:5, 1],
joints[:5, 2],
'r',
label='thumb')
ax.plot(joints[[0, 5, 6, 7, 8, ], 0], joints[[0, 5, 6, 7, 8, ], 1],
joints[[0, 5, 6, 7, 8, ], 2],
'm',
label='index')
ax.plot(joints[[0, 9, 10, 11, 12, ], 0], joints[[0, 9, 10, 11, 12], 1],
joints[[0, 9, 10, 11, 12], 2],
'b',
label='middle')
ax.plot(joints[[0, 13, 14, 15, 16], 0], joints[[0, 13, 14, 15, 16], 1],
joints[[0, 13, 14, 15, 16], 2],
'c',
label='ring')
ax.plot(joints[[0, 17, 18, 19, 20], 0], joints[[0, 17, 18, 19, 20], 1],
joints[[0, 17, 18, 19, 20], 2],
'g',
label='pinky')
# snap convention
ax.plot(joints[4][0], joints[4][1], joints[4][2], 'rD', label='thumb')
ax.plot(joints[8][0], joints[8][1], joints[8][2], 'ro', label='index')
ax.plot(joints[12][0], joints[12][1], joints[12][2], 'ro', label='middle')
ax.plot(joints[16][0], joints[16][1], joints[16][2], 'ro', label='ring')
ax.plot(joints[20][0], joints[20][1], joints[20][2], 'ro', label='pinky')
# plt.plot(joints [1:, 0], joints [1:, 1], joints [1:, 2], 'o')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#ax.set_xlim(xmin=-1.0,xmax=1.0)
#ax.set_ylim(ymin=-1.0,ymax=1.0)
#ax.set_zlim(zmin=-1.0,zmax=1.0)
# plt.legend()
# ax.view_init(330, 110)
ax.view_init(-90, -90)
return ax
def visualize_2d(ax,
hand_joints=None,
box=None,
links=[(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
(0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]):
# box : xyxy form
ax.axis('off')
if hand_joints is not None:
visualize_joints_2d(ax, hand_joints, joint_idxs=False, links=links)
if box is not None:
if box.shape == (4,):
top = np.zeros((2, 2))
top[0] = np.array([box[0], box[1]])
top[1] = np.array([box[2], box[1]])
plt.plot(top[:, 0].astype(int), top[:, 1].astype(int), 'b')
left = np.zeros((2, 2))
left[0] = np.array([box[0], box[1]])
left[1] = np.array([box[0], box[3]])
plt.plot(left[:, 0].astype(int), left[:, 1].astype(int), 'b')
right = np.zeros((2, 2))
right[0] = np.array([box[2], box[1]])
right[1] = np.array([box[2], box[3]])
plt.plot(right[:, 0].astype(int), right[:, 1].astype(int), 'b')
bottom = np.zeros((2, 2))
bottom[0] = np.array([box[0], box[3]])
bottom[1] = np.array([box[2], box[3]])
plt.plot(bottom[:, 0].astype(int), bottom[:, 1].astype(int), 'b')
else: # shape == (5,)
if box[0] == 0:
c = 'r' # right hand
else:
c = 'b' # left hand
top = np.zeros((2, 2))
top[0] = np.array([box[1], box[2]])
top[1] = np.array([box[3], box[2]])
ax.plot(top[:, 0].astype(int), top[:, 1].astype(int), c)
left = np.zeros((2, 2))
left[0] = np.array([box[1], box[2]])
left[1] = np.array([box[1], box[4]])
ax.plot(left[:, 0].astype(int), left[:, 1].astype(int), c)
right = np.zeros((2, 2))
right[0] = np.array([box[3], box[2]])
right[1] = np.array([box[3], box[4]])
ax.plot(right[:, 0].astype(int), right[:, 1].astype(int), c)
bottom = np.zeros((2, 2))
bottom[0] = np.array([box[1], box[4]])
bottom[1] = np.array([box[3], box[4]])
ax.plot(bottom[:, 0].astype(int), bottom[:, 1].astype(int), c)
return ax
def visualize_joints_2d(ax,
joints,
joint_idxs=True,
links=None,
alpha=1,
scatter=True,
linewidth=2):
if links is None:
links = [(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
(0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]
# Scatter hand joints on image
x = joints[:, 0]
y = joints[:, 1]
if scatter:
ax.scatter(x, y, 1, 'r')
# Add idx labels to joints
for row_idx, row in enumerate(joints):
if joint_idxs:
plt.annotate(str(row_idx), (row[0], row[1]))
_draw2djoints(ax, joints, links, alpha=alpha, linewidth=linewidth)
ax.axis('equal')
def _draw2djoints(ax, annots, links, alpha=1, linewidth=1):
colors = ['r', 'm', 'b', 'c', 'g']
for finger_idx, finger_links in enumerate(links):
for idx in range(len(finger_links) - 1):
_draw2dseg(
ax,
annots,
finger_links[idx],
finger_links[idx + 1],
c=colors[finger_idx],
alpha=alpha,
linewidth=linewidth)
def _draw2dseg(ax, annot, idx1, idx2, c='r', alpha=1, linewidth=1):
ax.plot([annot[idx1, 0], annot[idx2, 0]], [annot[idx1, 1], annot[idx2, 1]],
c=c,
alpha=alpha,
linewidth=linewidth)
def CVplot2D(img, kp2d, box=None):
color = [(255, 0, 0), (199,21,133), (0, 0, 255), (0, 255, 255), (0, 255, 0)]
kp2d = kp2d.astype(int)
for kp in kp2d:
kp = tuple(map(tuple, kp))
# plot joints
for i in range(21):
cv2.circle(img, kp[i], 1, (0, 0, 255), -1)
# plot skeletons
for i in range(5):
cv2.line(img, kp[0], kp[i*4+1], color[i], 1)
cv2.line(img, kp[i*4+1], kp[i*4+2], color[i], 1)
cv2.line(img, kp[i*4+2], kp[i*4+3], color[i], 1)
cv2.line(img, kp[i*4+3], kp[i*4+4], color[i], 1)
if box is not None:
for b in box:
left = b[5]
b = b.astype(int)
b = tuple(map(tuple, b[:4].reshape(2, 2)))
if left:
cv2.rectangle(img, b[0], b[1], (0, 0, 255), 1)
else:
cv2.rectangle(img, b[0], b[1], (255, 0, 0), 1)
return img
|
<filename>Source/Tools/TrainList_CityScape.py
# -*- coding: utf-8 -*-
import os
import glob
def OutputData(outputFile, data):
outputFile.write(str(data) + '\n')
outputFile.flush()
TrainListPath = './Dataset/trainlist_CityScape.txt'
CLSLabelListPath = './Dataset/labellist_cls_CityScape.txt'
DispLabelListPath = './Dataset/labellist_disp_CityScape.txt'
ValTrainListPath = './Dataset/val_trainlist_CityScape.txt'
ValDispLabelListPath = './Dataset/val_label_disp_list_CityScape.txt'
ValClsLabelListPath = './Dataset/val_label_cls_list_CityScape.txt'
RootPath = '/home2/Documents/CityScape/'
cls_folder_list = ['leftImg8bit/', 'rightImg8bit/', 'gtCoarse/', 'disparity/']
train_folder_list = ['train/aachen', 'train/bochum', 'train/bremen', 'train/cologne',
'train/darmstadt', 'train/dusseldorf', 'train/erfurt', 'train/hamburg',
'train/hanover', 'train/jena', 'train/krefeld', 'train/monchengladbach',
'train/strasbourg', 'train/stuttgart', 'train/tubingen', 'train/ulm',
'train/weimar', 'train/zurich',
'train_extra/augsburg', 'train_extra/bad-honnef', 'train_extra/bamberg',
'train_extra/bayreuth', 'train_extra/dortmund', 'train_extra/dresden',
'train_extra/duisburg', 'train_extra/erlangen', 'train_extra/freiburg',
'train_extra/heidelberg', 'train_extra/heilbronn', 'train_extra/karlsruhe',
'train_extra/konigswinter', 'train_extra/konstanz', 'train_extra/mannheim',
'train_extra/muhlheim-ruhr', 'train_extra/nuremberg', 'train_extra/oberhausen',
'train_extra/saarbrucken', 'train_extra/schweinfurt', 'train_extra/troisdorf',
'train_extra/wuppertal', 'train_extra/wurzburg']
val_folder_list = ['val/frankfurt', 'val/lindau', 'val/munster']
if os.path.exists(TrainListPath):
os.remove(TrainListPath)
if os.path.exists(CLSLabelListPath):
os.remove(CLSLabelListPath)
if os.path.exists(DispLabelListPath):
os.remove(DispLabelListPath)
if os.path.exists(ValTrainListPath):
os.remove(ValTrainListPath)
if os.path.exists(ValDispLabelListPath):
os.remove(ValDispLabelListPath)
if os.path.exists(ValClsLabelListPath):
os.remove(ValClsLabelListPath)
fd_train_list = open(TrainListPath, 'a')
fd_cls_label_list = open(CLSLabelListPath, 'a')
fd_disp_label_list = open(DispLabelListPath, 'a')
fd_val_train_list = open(ValTrainListPath, 'a')
fd_val_cls_label_list = open(ValClsLabelListPath, 'a')
fd_val_disp_label_list = open(ValDispLabelListPath, 'a')
for i in range(len(train_folder_list)):
path = RootPath + cls_folder_list[0] + train_folder_list[i]
files = glob.glob(path + '/*.png')
for j in range(len(files)):
filename = files[j]
pos = filename.find(train_folder_list[i])
filename = filename[pos + len(train_folder_list[i])+1:-15]
# print filename
# break
path_0 = RootPath + cls_folder_list[0] + \
train_folder_list[i] + '/' + filename + 'leftImg8bit.png'
path_1 = RootPath + cls_folder_list[1] + \
train_folder_list[i] + '/' + filename + 'rightImg8bit.png'
path_2 = RootPath + cls_folder_list[2] + \
train_folder_list[i] + '/' + filename + 'gtCoarse_labelIds.png'
path_3 = RootPath + cls_folder_list[3] + \
train_folder_list[i] + '/' + filename + 'disparity.png'
exist_0 = os.path.exists(path_0)
exist_1 = os.path.exists(path_1)
exist_2 = os.path.exists(path_2)
exist_3 = os.path.exists(path_3)
if (not exist_0) or \
(not exist_1) or \
(not exist_2) or \
(not exist_3):
print "'" + path_0 + "' : is not existed!"
print "'" + path_1 + "' : is not existed!"
print "'" + path_2 + "' : is not existed!"
print "'" + path_3 + "' : is not existed!"
print '***************'
break
OutputData(fd_train_list, path_0)
OutputData(fd_train_list, path_1)
OutputData(fd_cls_label_list, path_2)
OutputData(fd_disp_label_list, path_3)
print "Finish: " + train_folder_list[i]
for i in range(len(val_folder_list)):
path = RootPath + cls_folder_list[0] + val_folder_list[i]
files = glob.glob(path + '/*.png')
for j in range(len(files)):
filename = files[j]
pos = filename.find(val_folder_list[i])
filename = filename[pos + len(val_folder_list[i])+1:-15]
# print filename
# break
path_0 = RootPath + cls_folder_list[0] + \
val_folder_list[i] + '/' + filename + 'leftImg8bit.png'
path_1 = RootPath + cls_folder_list[1] + \
val_folder_list[i] + '/' + filename + 'rightImg8bit.png'
path_2 = RootPath + cls_folder_list[2] + \
val_folder_list[i] + '/' + filename + 'gtCoarse_labelIds.png'
path_3 = RootPath + cls_folder_list[3] + \
val_folder_list[i] + '/' + filename + 'disparity.png'
exist_0 = os.path.exists(path_0)
exist_1 = os.path.exists(path_1)
exist_2 = os.path.exists(path_2)
exist_3 = os.path.exists(path_3)
if (not exist_0) or \
(not exist_1) or \
(not exist_2) or \
(not exist_3):
print "'" + path_0 + "' : is not existed!"
print "'" + path_1 + "' : is not existed!"
print "'" + path_2 + "' : is not existed!"
print "'" + path_3 + "' : is not existed!"
print '***************'
break
OutputData(fd_val_train_list, path_0)
OutputData(fd_val_train_list, path_1)
OutputData(fd_val_cls_label_list, path_2)
OutputData(fd_val_disp_label_list, path_3)
print "Finish: " + val_folder_list[i]
# if __name__ == '__main__':
|
from pathlib import Path
from typing import NamedTuple, Optional, List, Dict
from logzero import logger
from arbitrageur.request import request_cached_pages
class ItemUpgrade(NamedTuple):
upgrade: str
item_id: int
class Item(NamedTuple):
id: int
chat_link: str
name: str
type_name: str
rarity: str
level: int
vendor_value: int
flags: List[str]
restrictions: List[str]
upgrades_into: Optional[List[ItemUpgrade]]
upgrades_from: Optional[List[ItemUpgrade]]
def vendor_price(item: Item) -> Optional[int]:
name = item.name
if any([name == "Spool of Jute Thread",
name == "Spool of Wool Thread",
name == "Spool of Cotton Thread",
name == "Spool of Linen Thread",
name == "Spool of Silk Thread",
name == "Spool of Gossamer Thread",
(name.endswith("Rune of Holding") and not name.startswith("Supreme")),
name == "Lump of Tin",
name == "Lump of Coal",
name == "Lump of Primordium",
name == "Jar of Vinegar",
name == "Packet of Baking Powder",
name == "Jar of Vegetable Oil",
name == "Packet of Salt",
name == "Bag of Sugar",
name == "Jug of Water",
name == "Bag of Starch",
name == "Bag of Flour",
name == "Bottle of Soy Sauce",
name == "Milling Basin",
name == "Crystalline Bottle",
name == "Bag of Mortar",
name == "Essence of Elegance"]):
if item.vendor_value > 0:
# standard vendor sell price is generally buy price * 8, see:
# https://forum-en.gw2archive.eu/forum/community/api/How-to-get-the-vendor-sell-price
return item.vendor_value * 8
else:
return None
elif name == "Thermocatalytic Reagent":
return 150
elif name == "Pile of Compost Starter":
return 150
elif name == "Pile of Powdered Gelatin Mix":
return 200
elif name == "Smell-Enhancing Culture":
return 40000
elif is_common_ascended_material(item):
return 0
else:
return None
def is_restricted(item: Item) -> bool:
return any([item.id == 24749, # legacy Major Rune of the Air
item.id == 76363, # legacy catapult schematic
any(flag == "AccountBound" or flag == "SoulbindOnAcquire" for flag in item.flags)])
def is_common_ascended_material(item: Item) -> bool:
name = item.name
return any([name == "Empyreal Fragment",
name == "Dragonite Ore",
name == "Pile of Bloodstone Dust"])
async def retrieve_items(items_path: Path) -> Dict[int, Item]:
logger.info("Loading items")
items = await request_cached_pages(items_path, "items")
logger.info(f"""Loaded {len(items)} items""")
logger.info("Parsing items data")
items_map = {item["id"]: Item(id=item["id"],
chat_link=item["chat_link"],
name=item["name"],
type_name=item["type"],
rarity=item["rarity"],
level=item["level"],
vendor_value=item["vendor_value"],
flags=item["flags"],
restrictions=item["restrictions"],
upgrades_into=None if "upgrades_into" not in item else [
ItemUpgrade(item_id=i["item_id"], upgrade=i["upgrade"]) for i in
item["upgrades_into"]],
upgrades_from=None if "upgrades_from" not in item else [
ItemUpgrade(item_id=i["item_id"], upgrade=i["upgrade"]) for i in
item["upgrades_from"]]) for item in items}
return items_map
|
<reponame>laurentlb/tensorflow
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
ALL_LOSSES = [keras.losses.mean_squared_error,
keras.losses.mean_absolute_error,
keras.losses.mean_absolute_percentage_error,
keras.losses.mean_squared_logarithmic_error,
keras.losses.squared_hinge,
keras.losses.hinge,
keras.losses.categorical_crossentropy,
keras.losses.binary_crossentropy,
keras.losses.kullback_leibler_divergence,
keras.losses.poisson,
keras.losses.cosine_proximity,
keras.losses.logcosh,
keras.losses.categorical_hinge]
class _MSEMAELoss(object):
"""Loss function with internal state, for testing serialization code."""
def __init__(self, mse_fraction):
self.mse_fraction = mse_fraction
def __call__(self, y_true, y_pred):
return (self.mse_fraction * keras.losses.mse(y_true, y_pred) +
(1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred))
def get_config(self):
return {'mse_fraction': self.mse_fraction}
class KerasLossesTest(test.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((5, 6, 7)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((6, 7)))
y_b = keras.backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [6,])
def test_cce_one_hot(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (5, 6)
y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
y_b = keras.backend.variable(np.random.random((6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (6,)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.sparse_categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.sparse_categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_binary_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
sigmoid_output = keras.backend.sigmoid(logits)
output_from_logit = keras.losses.binary_crossentropy(
target, logits, from_logits=True)
output_from_sigmoid = keras.losses.binary_crossentropy(
target, sigmoid_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_sigmoid), atol=1e-5)
def test_serialization(self):
fn = keras.losses.get('mse')
config = keras.losses.serialize(fn)
new_fn = keras.losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
[0.1, 0.2, 0.7]]))
y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_serializing_loss_class(self):
orig_loss_class = _MSEMAELoss(0.3)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
serialized = keras.losses.serialize(orig_loss_class)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
deserialized = keras.losses.deserialize(serialized)
assert isinstance(deserialized, _MSEMAELoss)
assert deserialized.mse_fraction == 0.3
def test_serializing_model_with_loss_class(self):
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
model_filename = os.path.join(tmpdir, 'custom_loss.h5')
with self.cached_session():
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loss = _MSEMAELoss(0.3)
inputs = keras.layers.Input((2,))
outputs = keras.layers.Dense(1, name='model_output')(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(optimizer='sgd', loss={'model_output': loss})
model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
if h5py is None:
return
model.save(model_filename)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loaded_model = keras.models.load_model(model_filename)
loaded_model.predict(np.random.rand(128, 2))
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.SUM, name='mse_1')
self.assertEqual(mse_obj.name, 'mse_1')
self.assertEqual(mse_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_timestep_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_impl.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.SUM, name='mae_1')
self.assertEqual(mae_obj.name, 'mae_1')
self.assertEqual(mae_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
def test_scalar_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
def test_sample_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
def test_timestep_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
def test_zero_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
mae_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
def test_sum_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_impl.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = keras.losses.MeanAbsolutePercentageError(
reduction=losses_impl.ReductionV2.SUM, name='mape_1')
self.assertEqual(mape_obj.name, 'mape_1')
self.assertEqual(mape_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
def test_sample_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
def test_zero_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError(
reduction=losses_impl.ReductionV2.SUM, name='mape_1')
self.assertEqual(msle_obj.name, 'mape_1')
self.assertEqual(msle_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
def test_sample_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
def test_zero_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_util.run_all_in_graph_and_eager_modes
class CosineProximityTest(test.TestCase):
def test_config(self):
cosine_obj = keras.losses.CosineProximity(
reduction=losses_impl.ReductionV2.SUM, name='cosine_loss')
self.assertEqual(cosine_obj.name, 'cosine_loss')
self.assertEqual(cosine_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), -0.18722, 3)
def test_scalar_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), -0.43060, 3)
def test_sample_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.15599, 3)
def test_timestep_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), -2.0000, 3)
def test_zero_weighted(self):
cosine_obj = keras.losses.CosineProximity()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cosine_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = keras.losses.BinaryCrossentropy(
reduction=losses_impl.ReductionV2.SUM, name='bce_1')
self.assertEqual(bce_obj.name, 'bce_1')
self.assertEqual(bce_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float32)
loss = bce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 8.0004, 3)
# Test with logits.
logits = constant_op.constant([10., 10., 10., -10., 10, -10],
shape=(2, 3),
dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 5., 3)
def test_scalar_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float32)
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 18.4010, 3)
# Test with logits.
y_true = array_ops.ones((32, 1))
logits = array_ops.ones((32, 1), dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.7205, 3)
def test_sample_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = constant_op.constant([1, 0, 1, 0, 0, 1], shape=(2, 3))
y_pred = constant_op.constant([1, 1, 1, 0, 1, 0],
shape=(2, 3),
dtype=dtypes.float64)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 21.4907, 3)
# Test with logits.
y_true = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
logits = constant_op.constant(
[[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]],
dtype=dtypes.float64)
weights = constant_op.constant([3, 2, 8])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
self.assertAlmostEqual(self.evaluate(loss), 288.8888, 3)
def test_no_reduction(self):
y_true = constant_op.constant(((1, 0, 1), (1, 1, 0), (0, 1, 1)))
logits = constant_op.constant(((100.0, -100.0, 100.0),
(100.0, -100.0, 100.0),
(100.0, 100.0, -100.0)))
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = bce_obj(y_true, logits)
self.assertAllClose((0., 66.6666, 66.6666), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = keras.losses.CategoricalCrossentropy(
reduction=losses_impl.ReductionV2.SUM, name='bce_1')
self.assertEqual(cce_obj.name, 'bce_1')
self.assertEqual(cce_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_all_correct_unweighted_sparse(self):
y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([0, 1, 2])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted_sparse(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction_sparse(self):
y_true = constant_op.constant([[0], [1], [2]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_impl.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = keras.losses.Hinge(
reduction=losses_impl.ReductionV2.SUM, name='hinge_loss')
self.assertEqual(hinge_obj.name, 'hinge_loss')
self.assertEqual(hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 7.3333, 3)
def test_scalar_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 16.8666, 3)
# Verify we get the same output when the same input is given
loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 24.9333, 3)
def test_timestep_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.0, 3)
def test_zero_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = keras.losses.SquaredHinge(
reduction=losses_impl.ReductionV2.SUM, name='sq_hinge_loss')
self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss')
self.assertEqual(sq_hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
# Sq hinge = mean(square(max(1. - y_true * y_pred, 0.)), axis=-1)
# (1. - y_true * y_pred) = [[1-4, 1-72], [1-24, 1+40]] = [0, 48]
# sq(max(above val, 0)) = sq([[0, 0], [0, 41]) = [[0, 0], [0, 1681]]
# Mean = [0, 840.5]. Reduced loss = (0 + 840.5)/2 = 420.25
loss = sq_hinge_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 420.25, 3)
def test_scalar_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 647.833, 3)
# Verify we get the same output when the same input is given
loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 957.667, 3)
def test_timestep_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 6.0, 3)
def test_zero_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = keras.losses.CategoricalHinge(
reduction=losses_impl.ReductionV2.SUM, name='cat_hinge_loss')
self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss')
self.assertEqual(cat_hinge_obj.reduction, losses_impl.ReductionV2.SUM)
def test_unweighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred)
# pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
# neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
# cat_hinge = max(0., neg - pos + 1.) = [0, 65]
# reduced_loss = (0 + 65)/2 = 32.5
self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
def test_scalar_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
# Verify we get the same output when the same input is given
loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
def test_timestep_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
def test_zero_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class LogLossTest(test.TestCase):
def setup(self):
# TODO(psv): Change to setUp() after b/122319309 is fixed.
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
epsilon = 1e-7 # to avoid log 0
self.batch_size = 6
self.expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
self.expected_losses += np.multiply(1 - y_true,
np.log(1 - y_pred + epsilon))
self.expected_losses = -self.expected_losses
self.y_pred = constant_op.constant(y_pred)
self.y_true = constant_op.constant(y_true)
def test_config(self):
log_loss_obj = keras.losses.LogLoss(
reduction=losses_impl.ReductionV2.SUM, name='log')
self.assertEqual(log_loss_obj.name, 'log')
self.assertEqual(log_loss_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 2.3
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
log_loss_obj = keras.losses.LogLoss()
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3, 1))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3, 1))
epsilon = 1e-7 # to avoid log 0
batch_size = 6
expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
expected_losses += np.multiply(1 - y_true, np.log(1 - y_pred + epsilon))
y_pred = constant_op.constant(y_pred)
y_true = constant_op.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = log_loss_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
actual_loss = np.multiply(-expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 0
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
if __name__ == '__main__':
test.main()
|
<filename>manila/tests/scheduler/test_scheduler.py
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from manila import context
from manila import db
from manila import exception
from manila.scheduler import driver
from manila.scheduler import manager
from manila.scheduler import simple
from manila.share import rpcapi as share_rpcapi
from manila import test
from manila.tests import db_utils
from manila import utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'manila.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, {})
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host,
capabilities=capabilities)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
@mock.patch.object(db, 'share_update', mock.Mock())
def test_create_share_exception_puts_share_in_error_state(self):
"""Test that a NoValideHost exception for create_share.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_share_id = 1
request_spec = {'share_id': fake_share_id}
with mock.patch.object(self.manager.driver,
'schedule_create_share',
mock.Mock(side_effect=raise_no_valid_host)):
self.mock_object(manager.LOG, 'error')
self.manager.create_share_instance(
self.context, request_spec=request_spec, filter_properties={})
db.share_update.assert_called_once_with(
self.context, fake_share_id, {'status': 'error'})
self.manager.driver.schedule_create_share.assert_called_once_with(
self.context, request_spec, {})
manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_pools(self):
"""Ensure get_pools exists and calls driver.get_pools."""
mock_get_pools = self.mock_object(self.manager.driver, 'get_pools',
mock.Mock(return_value='fake_pools'))
result = self.manager.get_pools(self.context, filters='fake_filters')
mock_get_pools.assert_called_once_with(self.context, 'fake_filters')
self.assertEqual('fake_pools', result)
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_no_valid_host_puts_cg_in_error_state(self):
"""Test that NoValidHost is raised for create_consistency_group.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=raise_no_valid_host)):
self.manager.create_consistency_group(self.context,
fake_cg_id,
request_spec=request_spec,
filter_properties={})
db.consistency_group_update.assert_called_once_with(
self.context, fake_cg_id, {'status': 'error'})
self.manager.driver.schedule_create_consistency_group\
.assert_called_once_with(self.context, cg_id,
request_spec, {})
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_exception_puts_cg_in_error_state(self):
"""Test that exceptions for create_consistency_group.
Puts the share in 'error' state and raises the exception.
"""
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=exception.NotFound)):
self.assertRaises(exception.NotFound,
self.manager.create_consistency_group,
self.context, fake_cg_id,
request_spec=request_spec,
filter_properties={})
def test_migrate_share_to_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(share_rpcapi.ShareAPI, 'migrate_share')
self.mock_object(driver.Scheduler, 'host_passes_filters',
mock.Mock(return_value=host))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
def test_migrate_share_to_host_no_valid_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(
driver.Scheduler, 'host_passes_filters',
mock.Mock(side_effect=[exception.NoValidHost('fake')]))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
with mock.patch.object(self.driver.host_manager,
'update_service_capabilities', mock.Mock()):
self.driver.update_service_capabilities(
service_name, host, capabilities)
self.driver.host_manager.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
def fake_service_is_up(*args, **kwargs):
if args[0]['host'] == 'host1':
return False
return True
with mock.patch.object(db, 'service_get_all_by_topic',
mock.Mock(return_value=services)):
with mock.patch.object(utils, 'service_is_up',
mock.Mock(side_effect=fake_service_is_up)):
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
db.service_get_all_by_topic.assert_called_once_with(
self.context, self.topic)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods.
These can't fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch.object(db, 'share_update', mock.Mock())
def test_share_host_update_db(self):
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value='fake-now')):
driver.share_update_db(self.context, 31337, 'fake_host')
db.share_update.assert_called_once_with(
self.context, 31337,
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
class SimpleSchedulerSharesTestCase(test.TestCase):
"""Test case for simple scheduler create share method."""
def setUp(self):
super(SimpleSchedulerSharesTestCase, self).setUp()
self.mock_object(share_rpcapi, 'ShareAPI')
self.driver = simple.SimpleScheduler()
self.context = context.RequestContext('fake_user', 'fake_project')
self.admin_context = context.RequestContext('fake_admin_user',
'fake_project')
self.admin_context.is_admin = True
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_if_two_services_up(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 2), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(utils.IsAMatcher(dict))
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake_host1')
def test_create_share_if_services_not_available(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_result = []
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
def test_create_share_if_max_gigabytes_exceeded(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 10001}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 5), (fake_service_2, 7)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'size': 1,
}
fake_instance = {
'availability_zone_id': 'fake',
}
fake_service_1 = {
'disabled': False, 'host': 'fake_host1',
'availability_zone_id': 'fake',
}
fake_service_2 = {
'disabled': False, 'host': 'fake_host2',
'availability_zone_id': 'super_fake',
}
fake_result = [(fake_service_1, 0), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
'share_instance_properties': fake_instance,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service_1)
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
fake_service_1['host'])
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone_on_host(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_service = {'disabled': False, 'host': 'fake'}
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=[(fake_service, 1)]))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.admin_context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service)
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake')
|
<reponame>musterchef/OpenMoBu
#
# Shaders Graph Exporter
#
# Sergey <Neill3d> Solokhin 2018
#
# function to bake - BakeShadersGraphEdits(objNS, xmlname)
import os
import time
import subprocess
from pyfbsdk import *
from xml.dom import minidom
import FbxShadersGraphMisc as misc
######################################### Global Variables
lApp = FBApplication()
lSystem = FBSystem()
######################################### Functions
def StoreProperty(newdoc, top_element, prop, namespaceToRemove):
if prop.GetPropertyFlag(FBPropertyFlag.kFBPropertyFlagHideProperty):
return 0
if prop.IsReferenceProperty():
return 0
propname = prop.Name
if 'RefFileName' == propname or 'RefOrigShader' == propname:
return 0
elem = newdoc.createElement('Property')
elem.setAttribute( 'Name', propname )
elem.setAttribute( 'Type', str(prop.GetPropertyTypeName()) )
value = ""
try:
data = prop.Data
if type(data) is FBTime:
value = data.GetTimeString()
elif type(data) is FBVector2d:
elem.setAttribute( "X", str(data[0]) )
elem.setAttribute( "Y", str(data[1]) )
elem.setAttribute( "Z", str(data[2]) )
value = str(data)
elif type(data) in [FBVector3d, FBColor]:
elem.setAttribute( "X", str(data[0]) )
elem.setAttribute( "Y", str(data[1]) )
elem.setAttribute( "Z", str(data[2]) )
value = str(data)
elif type(data) in [FBVector4d, FBColorAndAlpha]:
elem.setAttribute( "X", str(data[0]) )
elem.setAttribute( "Y", str(data[1]) )
elem.setAttribute( "Z", str(data[2]) )
elem.setAttribute( "W", str(data[3]) )
value = str(data)
else:
value = str(data)
except NotImplementedError:
value = "undefined"
except Exception:
value = "undefined"
elem.setAttribute( "Value", value )
top_element.appendChild(elem)
# write property connectors (models, textues, etc. like FBPropertyObjectList)
srcCount = prop.GetSrcCount()
if srcCount > 0:
conns_elem = newdoc.createElement('Connections')
conns_elem.setAttribute('Count', str(srcCount) )
elem.appendChild(conns_elem)
for i in range(srcCount):
comp = prop.GetSrc(i)
src_elem = newdoc.createElement('Source')
conns_elem.appendChild(src_elem)
longname = comp.LongName
longname = misc.RemoveFirstNamespace(longname, namespaceToRemove)
src_elem.setAttribute('ClassName', comp.ClassName() )
src_elem.setAttribute('Name', comp.Name)
src_elem.setAttribute('LongName', longname )
return 1
#
# StoreShader
def StoreShader(newdoc, top_element, shader, namespaceToRemove):
shader_elem = newdoc.createElement('Shader')
shader_elem.setAttribute( 'ClassName', misc.MBGetShaderTypeName(shader) )
shader_elem.setAttribute( 'Name', shader.Name )
shader_elem.setAttribute( 'LongName', misc.RemoveFirstNamespace(shader.LongName, namespaceToRemove+'S') )
shader_elem.setAttribute( 'System', str(shader.HasObjectFlags(FBObjectFlag.kFBFlagSystem)) )
top_element.appendChild(shader_elem)
# Attachments
atts_elem = newdoc.createElement('Attachments')
attCount = 0
for i in range(shader.GetDstCount()):
comp = shader.GetDst(i)
if False == isinstance(comp, FBScene):
attCount += 1
atts_elem.setAttribute( 'Count', str(attCount) )
shader_elem.appendChild(atts_elem)
for i in range(shader.GetDstCount()):
comp = shader.GetDst(i)
if isinstance(comp, FBScene):
continue
dst_elem = newdoc.createElement('Dst')
atts_elem.appendChild(dst_elem)
longname = comp.LongName
longname = misc.RemoveFirstNamespace(longname, namespaceToRemove)
dst_elem.setAttribute('ClassName', comp.ClassName() )
dst_elem.setAttribute('Name', comp.Name )
dst_elem.setAttribute('LongName', longname )
# Properties
props_elem = newdoc.createElement('Properties')
shader_elem.appendChild(props_elem)
numberOfExported = 0
for prop in shader.PropertyList:
status = StoreProperty(newdoc, props_elem, prop, namespaceToRemove)
numberOfExported += status
props_elem.setAttribute('Count', str(numberOfExported))
#
# StoreModel
def StoreModel(newdoc, top_element, model, namespaceToRemove):
longname = model.LongName
longName = misc.RemoveFirstNamespace(longname, namespaceToRemove)
model_elem = newdoc.createElement('Model')
model_elem.setAttribute( 'ClassName', model.ClassName() )
model_elem.setAttribute( 'Name', model.Name )
model_elem.setAttribute( 'LongName', longName )
model_elem.setAttribute( 'Visibility', str(model.Visibility) )
model_elem.setAttribute( 'Show', str(model.Show) )
top_element.appendChild(model_elem)
#
# Save Shader Graph
def SaveShaderGraph(filename, FBXFileName, shadersList, modelsList, namespaceToRemove):
impl = minidom.getDOMImplementation()
newdoc = impl.createDocument(None, 'ShadersGraph', None)
top_element = newdoc.documentElement
top_element.setAttribute('FileName', FBXFileName)
# TODO: put file size and file last write
size = os.path.getsize(FBXFileName)
date = misc.GetFileLastWrite(FBXFileName)
top_element.setAttribute("FileSize", str(size))
top_element.setAttribute("LastWrite", str(date))
shaders_elem = newdoc.createElement('Shaders')
shaders_elem.setAttribute('Count', str(len(shadersList)))
top_element.appendChild(shaders_elem)
for shader in shadersList:
StoreShader(newdoc, shaders_elem, shader, namespaceToRemove)
models_elem = newdoc.createElement('Models')
models_elem.setAttribute('Count', str(len(modelsList)))
top_element.appendChild(models_elem)
for comp in modelsList:
StoreModel(newdoc, models_elem, comp, namespaceToRemove)
res = open(filename, 'w')
res.writelines(newdoc.toprettyxml())
res.close()
#
# BakeShadersGraphEdits
def BakeShadersGraphEdits(objNS, xmlname):
if objNS is None:
return False
if False == os.path.isfile(xmlname):
#raise NameError('Shaders Graph description is not found!')
print ('Shaders Graph description is not found!')
return False
shadersList = []
modelsList = []
filename = objNS.ReferenceFilePath
#shadersList = misc.FindAllShadersByTags(xmlname, objNS)
misc.CollectReferenceModels(objNS, modelsList)
for model in modelsList:
for shader in model.Shaders:
if not (shader in shadersList):
shadersList.append(shader)
# write a new one
SaveShaderGraph(xmlname, objNS.ReferenceFilePath, shadersList, modelsList, objNS.LongName)
# 1. bake updates into ref file
misc.RunCmdBake(objNS.ReferenceFilePath, xmlname, objNS.ReferenceFilePath)
# 2. store a new description holder - make baked state as initial
misc.DescriptionStore(objNS)
#except NameError:
# FBMessageBox( 'Shaders Graph', 'Reference object or shaders are not found!', 'Ok')
return True
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/util/landmarks_to_render_data_calculator.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
from mediapipe.util import color_pb2 as mediapipe_dot_util_dot_color__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/util/landmarks_to_render_data_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nDmediapipe/calculators/util/landmarks_to_render_data_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\x1a\x1amediapipe/util/color.proto\"\xff\x02\n&LandmarksToRenderDataCalculatorOptions\x12\x1c\n\x14landmark_connections\x18\x01 \x03(\x05\x12(\n\x0elandmark_color\x18\x02 \x01(\x0b\x32\x10.mediapipe.Color\x12*\n\x10\x63onnection_color\x18\x03 \x01(\x0b\x32\x10.mediapipe.Color\x12\x14\n\tthickness\x18\x04 \x01(\x01:\x01\x31\x12&\n\x18visualize_landmark_depth\x18\x05 \x01(\x08:\x04true\x12!\n\x12utilize_visibility\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x14visibility_threshold\x18\x07 \x01(\x01:\x01\x30\x32_\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xbd\xd2\x9d{ \x01(\x0b\x32\x31.mediapipe.LandmarksToRenderDataCalculatorOptions'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,mediapipe_dot_util_dot_color__pb2.DESCRIPTOR,])
_LANDMARKSTORENDERDATACALCULATOROPTIONS = _descriptor.Descriptor(
name='LandmarksToRenderDataCalculatorOptions',
full_name='mediapipe.LandmarksToRenderDataCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='landmark_connections', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.landmark_connections', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='landmark_color', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.landmark_color', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='connection_color', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.connection_color', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='thickness', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.thickness', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='visualize_landmark_depth', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.visualize_landmark_depth', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='utilize_visibility', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.utilize_visibility', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='visibility_threshold', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.visibility_threshold', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.LandmarksToRenderDataCalculatorOptions.ext', index=0,
number=258435389, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=533,
)
_LANDMARKSTORENDERDATACALCULATOROPTIONS.fields_by_name['landmark_color'].message_type = mediapipe_dot_util_dot_color__pb2._COLOR
_LANDMARKSTORENDERDATACALCULATOROPTIONS.fields_by_name['connection_color'].message_type = mediapipe_dot_util_dot_color__pb2._COLOR
DESCRIPTOR.message_types_by_name['LandmarksToRenderDataCalculatorOptions'] = _LANDMARKSTORENDERDATACALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LandmarksToRenderDataCalculatorOptions = _reflection.GeneratedProtocolMessageType('LandmarksToRenderDataCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _LANDMARKSTORENDERDATACALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.util.landmarks_to_render_data_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksToRenderDataCalculatorOptions)
})
_sym_db.RegisterMessage(LandmarksToRenderDataCalculatorOptions)
_LANDMARKSTORENDERDATACALCULATOROPTIONS.extensions_by_name['ext'].message_type = _LANDMARKSTORENDERDATACALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_LANDMARKSTORENDERDATACALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
|
<filename>lambda/api/addChannel/addChannel.py
# API - null
# 1. Create new Medialive Input
# 2. Create MediaPackage Channel
# 3. Create MediaPackage Distrubution
# 4. Create new Medialive Channel
# 5. Save Channel Detail to DDB
import boto3
import json
import uuid
import os
# BOTO3
medialive = boto3.client('medialive')
mediapackage = boto3.client('mediapackage')
dynamodb = boto3.resource('dynamodb')
# ENV VAR
medialive_sg = os.environ['medialive_sg']
archive_s3 = os.environ['archive_s3']
medialive_role_arn = os.environ['medialive_role_arn']
ddb_channel = dynamodb.Table(os.environ['ddb_channel'])
def lambda_handler(event, context):
channeluuid = str(uuid.uuid1())
print(f'channeli is {channeluuid}')
# 1. Create new Medialive Input
medialive_create_input = medialive.create_input(
Destinations=[{'StreamName': f'liveinput/{channeluuid}'},],
InputSecurityGroups=[ medialive_sg ],
Name=f'input{channeluuid}',
Type='RTMP_PUSH'
)
input_arn = medialive_create_input['Input']['Arn']
input_endpoint = medialive_create_input['Input']['Destinations'][0]['Url']
input_name = medialive_create_input['Input']['Name']
input_id = medialive_create_input['Input']['Id']
print(f'input_arn = {input_arn}')
print(f'input_endpoint = {input_endpoint}')
print(f'input_name = {input_name}')
print(f'input_id = {input_id}')
# 2. Create MediaPackage Channel
mediapackage_create_channel = mediapackage.create_channel(
Description=f'mediapackage channel {channeluuid}',
Id=channeluuid,
)
mediapackage_channeluuid = channeluuid
# 3. Create MediaPackage Distrubution
mediapackage_create_origin_endpoint = mediapackage.create_origin_endpoint(
# Authorization={
# 'CdnIdentifierSecret': 'string',
# 'SecretsRoleArn': 'string'
# },
ChannelId=channeluuid,
Description=f'mediapackage HLS distibution endpoint channel {channeluuid}',
HlsPackage={
'AdMarkers': 'NONE',
'IncludeIframeOnlyStream': False,
'PlaylistType': 'EVENT',
'PlaylistWindowSeconds': 60,
'SegmentDurationSeconds': 6,
'StreamSelection': {
'StreamOrder': 'ORIGINAL'
}
},
Id=channeluuid,
Origination='ALLOW',
StartoverWindowSeconds=300,
TimeDelaySeconds=0
)
print(mediapackage_create_origin_endpoint)
mediapackage_endpoint = mediapackage_create_origin_endpoint['Url']
# 4. Create new Medialive Channel
medialive_destination_s3 = str(uuid.uuid4())
medialive_destination_mediapackage = str(uuid.uuid4())
medialive_create_channel = medialive.create_channel(
ChannelClass='SINGLE_PIPELINE',
Destinations=[{
'Id': medialive_destination_s3,
'Settings': [{
'Url': f's3ssl://{archive_s3}/delivery/{channeluuid}/index'
}],
'MediaPackageSettings': []
},
{
'Id': medialive_destination_mediapackage,
'Settings': [],
'MediaPackageSettings': [{
'ChannelId': channeluuid
}]
}],
EncoderSettings={
'AudioDescriptions': [{
'AudioTypeControl': 'FOLLOW_INPUT',
'LanguageCodeControl': 'FOLLOW_INPUT',
'AudioSelectorName' : 'audio_1',
'Name': 'audio_1'
},
{
'AudioTypeControl': 'FOLLOW_INPUT',
'LanguageCodeControl': 'FOLLOW_INPUT',
'AudioSelectorName' : 'audio_2',
'Name': 'audio_2'
}
],
'CaptionDescriptions': [],
'OutputGroups': [
{
'OutputGroupSettings': {
'HlsGroupSettings': {
'AdMarkers': [],
'CaptionLanguageSetting': 'OMIT',
'CaptionLanguageMappings': [],
'HlsCdnSettings': {
'HlsBasicPutSettings': {
'NumRetries': 10,
'ConnectionRetryInterval': 1,
'RestartDelay': 15,
'FilecacheDuration': 300
}
},
'InputLossAction': 'EMIT_OUTPUT',
'ManifestCompression': 'NONE',
'Destination': {
'DestinationRefId': medialive_destination_s3
},
'IvInManifest': 'INCLUDE',
'IvSource': 'FOLLOWS_SEGMENT_NUMBER',
'ClientCache': 'ENABLED',
'TsFileMode': 'SEGMENTED_FILES',
'ManifestDurationFormat': 'FLOATING_POINT',
'SegmentationMode': 'USE_SEGMENT_DURATION',
'RedundantManifest': 'DISABLED',
'OutputSelection': 'MANIFESTS_AND_SEGMENTS',
'StreamInfResolution': 'INCLUDE',
'IFrameOnlyPlaylists': 'DISABLED',
'IndexNSegments': 10,
'ProgramDateTime': 'EXCLUDE',
'ProgramDateTimePeriod': 600,
'KeepSegments': 21,
'SegmentLength': 10,
'TimedMetadataId3Frame': 'PRIV',
'TimedMetadataId3Period': 10,
'HlsId3SegmentTagging': 'DISABLED',
'CodecSpecification': 'RFC_4281',
'DirectoryStructure': 'SINGLE_DIRECTORY',
'SegmentsPerSubdirectory': 10000,
'Mode': 'VOD'
}
},
'Name': 'S3VOD',
'Outputs': [{
'OutputSettings': {
'HlsOutputSettings': {
'NameModifier': 'vod',
'HlsSettings': {
'StandardHlsSettings': {
'M3u8Settings': {
'AudioFramesPerPes': 4,
'AudioPids': '492-498',
'NielsenId3Behavior': 'NO_PASSTHROUGH',
'PcrControl': 'PCR_EVERY_PES_PACKET',
'PmtPid': '480',
'ProgramNum': 1,
'Scte35Pid': '500',
'Scte35Behavior': 'NO_PASSTHROUGH',
'TimedMetadataPid': '502',
'TimedMetadataBehavior': 'NO_PASSTHROUGH',
'VideoPid': '481'
},
'AudioRenditionSets': 'program_audio'
}
},
'H265PackagingType': 'HVC1'
}
},
'OutputName': 'S3VOD_1',
'VideoDescriptionName': 'video_6er6o',
'AudioDescriptionNames': [
'audio_1'
],
'CaptionDescriptionNames': []
}]
},
{
'OutputGroupSettings': {
'MediaPackageGroupSettings': {
'Destination': {
'DestinationRefId': medialive_destination_mediapackage
}
}
},
'Name': 'MobilePackage',
'Outputs': [{
'OutputSettings': {
'MediaPackageOutputSettings': {}
},
'OutputName': 'MobilePackage_1',
'VideoDescriptionName': 'video_wz2iqp',
'AudioDescriptionNames': [
'audio_2'
],
'CaptionDescriptionNames': []
}]
}
],
'TimecodeConfig': {
'Source': 'EMBEDDED'
},
'VideoDescriptions': [
{
'CodecSettings': {
'H264Settings': {
'AfdSignaling': 'NONE',
'ColorMetadata': 'INSERT',
'AdaptiveQuantization': 'MEDIUM',
'Bitrate': 8000000,
'EntropyEncoding': 'CABAC',
'FlickerAq': 'ENABLED',
'FramerateControl': 'SPECIFIED',
'FramerateNumerator': 30,
'FramerateDenominator': 1,
'GopBReference': 'DISABLED',
'GopClosedCadence': 1,
'GopNumBFrames': 2,
'GopSize': 90,
'GopSizeUnits': 'FRAMES',
'SubgopLength': 'FIXED',
'ScanType': 'PROGRESSIVE',
'Level': 'H264_LEVEL_AUTO',
'LookAheadRateControl': 'MEDIUM',
'NumRefFrames': 1,
'ParControl': 'SPECIFIED',
'ParNumerator': 1,
'ParDenominator': 1,
'Profile': 'MAIN',
'RateControlMode': 'CBR',
'Syntax': 'DEFAULT',
'SceneChangeDetect': 'ENABLED',
'SpatialAq': 'ENABLED',
'TemporalAq': 'ENABLED',
'TimecodeInsertion': 'DISABLED'
}
},
'Height': 1080,
'Name': 'video_wz2iqp',
'RespondToAfd': 'NONE',
'Sharpness': 50,
'ScalingBehavior': 'DEFAULT',
'Width': 1920
},
{
'Name': 'video_6er6o',
'RespondToAfd': 'NONE',
'Sharpness': 50,
'ScalingBehavior': 'DEFAULT'
}
]
},
InputAttachments=[
{
'InputAttachmentName': input_name,
'InputId': input_id,
'InputSettings': {
'SourceEndBehavior': 'CONTINUE',
'InputFilter': 'AUTO',
'FilterStrength': 1,
'DeblockFilter': 'DISABLED',
'DenoiseFilter': 'DISABLED',
'AudioSelectors': [],
'CaptionSelectors': []
}
},
],
InputSpecification={
'Codec': 'HEVC',
'Resolution': 'HD',
'MaximumBitrate': 'MAX_50_MBPS'
},
LogLevel='DISABLED',
Name=f'Channel-{channeluuid}',
RoleArn=medialive_role_arn,
)
# print(medialive_create_channel)
ChannelId = medialive_create_channel['Channel']['Id']
#TODO ERROR handling / Retry
ChannelItem = {
'ChannelId' : ChannelId,
'Streamer' : None,
'State' : 'IDLE',
'RTMPEndpoint' : input_endpoint,
'MediaPackageHLSEndpoint' : mediapackage_endpoint,
'VoDS3key' : f'delivery/{channeluuid}/'
}
ddb_put_item = ddb_channel.put_item(
Item=ChannelItem
)
response = {
'message' : f'added new Channel {ChannelId}',
'channelitem' : ChannelItem
}
return {
'statusCode': 200,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": '*'
},
'body': json.dumps(response)
}
|
import collections
import functools
import statistics
from . import base
from . import precision
from . import recall
__all__ = [
'F1Score',
'MacroF1Score',
'MicroF1Score',
'RollingF1Score',
'RollingMacroF1Score',
'RollingMicroF1Score'
]
class BaseF1Score:
@property
def bigger_is_better(self):
return True
@property
def requires_labels(self):
return True
class F1Score(BaseF1Score, base.BinaryClassificationMetric):
"""Binary F1 score.
The F1 score is the harmonic mean of the precision and the recall.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [True, False, True, True, True]
>>> y_pred = [True, True, False, True, True]
>>> metric = metrics.F1Score()
>>> for i, (y_t, y_p) in enumerate(zip(y_true, y_pred)):
... metric = metric.update(y_t, y_p)
... assert metric.get() == f1_score(y_true[:i+1], y_pred[:i+1])
>>> metric
F1Score: 0.75
"""
def __init__(self):
super().__init__()
self.precision = precision.Precision()
self.recall = recall.Recall()
def update(self, y_true, y_pred):
self.precision.update(y_true, y_pred)
self.recall.update(y_true, y_pred)
return self
def get(self):
return statistics.harmonic_mean((self.precision.get(), self.recall.get()))
class RollingF1Score(F1Score):
"""Rolling binary F1 score.
The F1 score is the harmonic mean of the precision and the recall.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [True, False, True, True, True]
>>> y_pred = [True, True, False, True, True]
>>> metric = metrics.RollingF1Score(window_size=3)
>>> for y_t, y_p in zip(y_true, y_pred):
... print(metric.update(y_t, y_p).get())
1.0
0.666666...
0.5
0.666666...
0.666666...
"""
def __init__(self, window_size):
super().__init__()
self.precision = precision.RollingPrecision(window_size=window_size)
self.recall = recall.RollingRecall(window_size=window_size)
class MacroF1Score(BaseF1Score, base.MultiClassificationMetric):
"""Macro-average F1 score.
The macro-average F1 score is the arithmetic average of the binary F1 scores of each label.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> metric = metrics.MacroF1Score()
>>> for i, (y_t, y_p) in enumerate(zip(y_true, y_pred)):
... metric = metric.update(y_t, y_p)
... print(metric.get(), f1_score(y_true[:i+1], y_pred[:i+1], average='macro'))
1.0 1.0
0.333333... 0.333333...
0.555555... 0.555555...
0.555555... 0.555555...
0.488888... 0.488888...
>>> metric
MacroF1Score: 0.488889
"""
def __init__(self):
self.f1_scores = collections.defaultdict(F1Score)
self.classes = set()
def update(self, y_true, y_pred):
self.classes.update({y_true, y_pred})
for c in self.classes:
self.f1_scores[c].update(y_true == c, y_pred == c)
return self
def get(self):
total = sum(f1.get() for f1 in self.f1_scores.values())
if total == 0.:
return 0.
return total / len(self.f1_scores)
class RollingMacroF1Score(MacroF1Score):
"""Rolling macro-average F1 score.
The macro-average F1 score is the arithmetic average of the binary F1 scores of each label.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> metric = metrics.RollingMacroF1Score(window_size=3)
>>> for y_t, y_p in zip(y_true, y_pred):
... print(metric.update(y_t, y_p).get())
1.0
0.333333...
0.555555...
0.555555...
0.488888...
>>> metric
RollingMacroF1Score: 0.488889
"""
def __init__(self, window_size):
self.f1_scores = collections.defaultdict(functools.partial(RollingF1Score, window_size))
self.classes = set()
class MicroF1Score(precision.MicroPrecision):
"""Micro-average F1 score.
The micro-average F1 score is exactly equivalent to the micro-average precision as well as the
micro-average recall score.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> metric = metrics.MicroF1Score()
>>> for i, (y_t, y_p) in enumerate(zip(y_true, y_pred)):
... metric = metric.update(y_t, y_p)
... print(metric.get(), f1_score(y_true[:i+1], y_pred[:i+1], average='micro'))
1.0 1.0
0.5 0.5
0.666666... 0.666666...
0.75 0.75
0.6 0.6
>>> metric
MicroF1Score: 0.6
References:
1. `Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? <https://simonhessner.de/why-are-precision-recall-and-f1-score-equal-when-using-micro-averaging-in-a-multi-class-problem/>`_
"""
class RollingMicroF1Score(precision.RollingMicroPrecision):
"""Rolling micro-average F1 score.
The micro-average F1 score is exactly equivalent to the micro-average precision as well as the
micro-average recall score.
Example:
::
>>> from creme import metrics
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> metric = metrics.RollingMicroF1Score(window_size=3)
>>> for y_t, y_p in zip(y_true, y_pred):
... print(metric.update(y_t, y_p).get())
1.0
0.5
0.666666...
0.666666...
0.666666...
>>> metric
RollingMicroF1Score: 0.666667
References:
1. `Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? <https://simonhessner.de/why-are-precision-recall-and-f1-score-equal-when-using-micro-averaging-in-a-multi-class-problem/>`_
"""
|
from . import engine as css_engine
from .constants import (
ALIGN_CONTENT_CHOICES, ALIGN_ITEMS_CHOICES, ALIGN_SELF_CHOICES, AUTO,
BORDER_COLOR_CHOICES, BORDER_STYLE_CHOICES, BORDER_WIDTH_CHOICES,
BOX_OFFSET_CHOICES, CLEAR_CHOICES, DIRECTION_CHOICES, DISPLAY_CHOICES,
FLEX_BASIS_CHOICES, FLEX_DIRECTION_CHOICES, FLEX_GROW_CHOICES,
FLEX_SHRINK_CHOICES, FLEX_START, FLEX_WRAP_CHOICES, FLOAT_CHOICES,
GRID_AUTO_CHOICES, GRID_AUTO_FLOW_CHOICES, GRID_GAP_CHOICES,
GRID_PLACEMENT_CHOICES, GRID_TEMPLATE_AREA_CHOICES, GRID_TEMPLATE_CHOICES,
INLINE, JUSTIFY_CONTENT_CHOICES, LTR, MARGIN_CHOICES, MAX_SIZE_CHOICES,
MIN_SIZE_CHOICES, NORMAL, NOWRAP, PADDING_CHOICES, POSITION_CHOICES, ROW,
SIZE_CHOICES, STATIC, STRETCH, TRANSPARENT, UNICODE_BIDI_CHOICES,
Z_INDEX_CHOICES, ORDER_CHOICES
)
_CSS_PROPERTIES = set()
def unvalidated_property(name, choices, initial):
"Define a simple CSS property attribute."
initial = choices.validate(initial)
def getter(self):
return getattr(self, '_%s' % name, initial)
def setter(self, value):
if value != getattr(self, '_%s' % name, initial):
setattr(self, '_%s' % name, value)
self.dirty = True
def deleter(self):
try:
delattr(self, '_%s' % name)
self.dirty = True
except AttributeError:
# Attribute doesn't exist
pass
_CSS_PROPERTIES.add(name)
return property(getter, setter, deleter)
def validated_property(name, choices, initial):
"Define a simple CSS property attribute."
initial = choices.validate(initial)
def getter(self):
return getattr(self, '_%s' % name, initial)
def setter(self, value):
try:
value = choices.validate(value)
except ValueError:
raise ValueError("Invalid value '%s' for CSS property '%s'; Valid values are: %s" % (
value, name, choices
))
if value != getattr(self, '_%s' % name, initial):
setattr(self, '_%s' % name, value)
self.dirty = True
def deleter(self):
try:
delattr(self, '_%s' % name)
self.dirty = True
except AttributeError:
# Attribute doesn't exist
pass
_CSS_PROPERTIES.add(name)
return property(getter, setter, deleter)
def directional_property(name, initial):
"Define a property attribute that proxies for top/right/bottom/left alternatives."
def getter(self):
return (
getattr(self, name % '_top', initial),
getattr(self, name % '_right', initial),
getattr(self, name % '_bottom', initial),
getattr(self, name % '_left', initial),
)
def setter(self, value):
if isinstance(value, tuple):
if len(value) == 4:
setattr(self, name % '_top', value[0])
setattr(self, name % '_right', value[1])
setattr(self, name % '_bottom', value[2])
setattr(self, name % '_left', value[3])
elif len(value) == 3:
setattr(self, name % '_top', value[0])
setattr(self, name % '_right', value[1])
setattr(self, name % '_bottom', value[2])
setattr(self, name % '_left', value[1])
elif len(value) == 2:
setattr(self, name % '_top', value[0])
setattr(self, name % '_right', value[1])
setattr(self, name % '_bottom', value[0])
setattr(self, name % '_left', value[1])
elif len(value) == 1:
setattr(self, name % '_top', value[0])
setattr(self, name % '_right', value[0])
setattr(self, name % '_bottom', value[0])
setattr(self, name % '_left', value[0])
else:
raise ValueError("Invalid value for '%s'; value must be an number, or a 1-4 tuple." % (name % ''))
else:
setattr(self, name % '_top', value)
setattr(self, name % '_right', value)
setattr(self, name % '_bottom', value)
setattr(self, name % '_left', value)
def deleter(self):
delattr(self, name % '_top')
delattr(self, name % '_right')
delattr(self, name % '_bottom')
delattr(self, name % '_left')
_CSS_PROPERTIES.add(name % '')
_CSS_PROPERTIES.add(name % '_top')
_CSS_PROPERTIES.add(name % '_right')
_CSS_PROPERTIES.add(name % '_bottom')
_CSS_PROPERTIES.add(name % '_left')
return property(getter, setter, deleter)
class CSS:
def __init__(self, **style):
self._node = None
self.set(**style)
######################################################################
# Style properties
######################################################################
# 8. Box model #######################################################
# 8.3 Margin properties
margin_top = validated_property('margin_top', choices=MARGIN_CHOICES, initial=0)
margin_right = validated_property('margin_right', choices=MARGIN_CHOICES, initial=0)
margin_bottom = validated_property('margin_bottom', choices=MARGIN_CHOICES, initial=0)
margin_left = validated_property('margin_left', choices=MARGIN_CHOICES, initial=0)
margin = directional_property('margin%s', initial=0)
# 8.4 Padding properties
padding_top = validated_property('padding_top', choices=PADDING_CHOICES, initial=0)
padding_right = validated_property('padding_right', choices=PADDING_CHOICES, initial=0)
padding_bottom = validated_property('padding_bottom', choices=PADDING_CHOICES, initial=0)
padding_left = validated_property('padding_left', choices=PADDING_CHOICES, initial=0)
padding = directional_property('padding%s', initial=0)
# 8.5 Border properties
# 8.5.1 Border width
border_top_width = validated_property('border_top_width', choices=BORDER_WIDTH_CHOICES, initial=0)
border_right_width = validated_property('border_right_width', choices=BORDER_WIDTH_CHOICES, initial=0)
border_bottom_width = validated_property('border_bottom_width', choices=BORDER_WIDTH_CHOICES, initial=0)
border_left_width = validated_property('border_left_width', choices=BORDER_WIDTH_CHOICES, initial=0)
border_width = directional_property('border%s_width', initial=0)
# 8.5.2 Border color
border_top_color = validated_property('border_top_color', choices=BORDER_COLOR_CHOICES, initial=TRANSPARENT)
border_right_color = validated_property('border_right_color', choices=BORDER_COLOR_CHOICES, initial=TRANSPARENT)
border_bottom_color = validated_property('border_bottom_color', choices=BORDER_COLOR_CHOICES, initial=TRANSPARENT)
border_left_color = validated_property('border_left_color', choices=BORDER_COLOR_CHOICES, initial=TRANSPARENT)
border_color = directional_property('border%s_color', initial=0)
# 8.5.3 Border style
border_top_style = validated_property('border_top_style', choices=BORDER_STYLE_CHOICES, initial=None)
border_right_style = validated_property('border_right_style', choices=BORDER_STYLE_CHOICES, initial=None)
border_bottom_style = validated_property('border_bottom_style', choices=BORDER_STYLE_CHOICES, initial=None)
border_left_style = validated_property('border_left_style', choices=BORDER_STYLE_CHOICES, initial=None)
border_style = directional_property('border%s_style', initial=None)
# 8.5.4 Border shorthand properties
# border_top
# border_right
# border_bottom
# border_left
# border
# 9. Visual formatting model #########################################
# 9.2.4 The display property
display = validated_property('display', choices=DISPLAY_CHOICES, initial=INLINE)
# 9.3 Positioning schemes
position = validated_property('position', choices=POSITION_CHOICES, initial=STATIC)
# 9.3.2 Box offsets
top = validated_property('top', choices=BOX_OFFSET_CHOICES, initial=AUTO)
bottom = validated_property('bottom', choices=BOX_OFFSET_CHOICES, initial=AUTO)
left = validated_property('left', choices=BOX_OFFSET_CHOICES, initial=AUTO)
right = validated_property('right', choices=BOX_OFFSET_CHOICES, initial=AUTO)
# 9.5.1 Positioning the float
float = validated_property('float', choices=FLOAT_CHOICES, initial=None)
# 9.5.2 Controlling flow next to floats
clear = validated_property('clear', choices=CLEAR_CHOICES, initial=None)
# 9.9 Layered Presentation
z_index = validated_property('z_index', choices=Z_INDEX_CHOICES, initial=AUTO)
# 9.10 Text Direction
direction = validated_property('direction', choices=DIRECTION_CHOICES, initial=LTR)
unicode_bidi = validated_property('unicode_bidi', choices=UNICODE_BIDI_CHOICES, initial=NORMAL)
# 10. Visual formatting model details ################################
# 10.2 Content width
width = validated_property('width', choices=SIZE_CHOICES, initial=AUTO)
# 10.4 Minimum and maximum width
# Initial value updated by Flexbox 4.5
min_width = validated_property('min_width', choices=MIN_SIZE_CHOICES, initial=AUTO)
max_width = validated_property('max_width', choices=MAX_SIZE_CHOICES, initial=None)
# 10.5 Content height
height = validated_property('height', choices=SIZE_CHOICES, initial=AUTO)
# 10.7 Minimum and maximum heights
# Initial value updated by Flexbox 4.5
min_height = validated_property('min_height', choices=MIN_SIZE_CHOICES, initial=AUTO)
max_height = validated_property('max_height', choices=MAX_SIZE_CHOICES, initial=None)
# 10.8 Leading and half-leading
# line_height
# vertical_align
# 11. Visual effects #################################################
# 11.1.1 Overflow
# overflow
# 11.1.2 Clip
# clip
# 11.2 Visibility
# visibility
# 12. Visual effects #################################################
# 12.2 The content property
# content
# 12.3 Quotation marks
# quotes
# 12.4 Automatic counters and numbering
# counter-reset
# counter-increment
# 12.5 Lists
# list_style_type
# list_style_image
# list_style_position
# list_style
# 13. Paged media ####################################################
# 13.3.1 Page break properties
# page_break_before
# page_break_after
# page_break_inside
# 13.3.2 Breaks inside elements
# orphans
# widows
# 14. Colors and backgrounds #########################################
# 14.1 Foreground color
# color
# 14.2.1 Background properties
# background_color
# background_image
# background_repeat
# background_attachment
# background_position
# background
# 15. Fonts ##########################################################
# 15.3 Font family
# font_family
# 15.4 Font Styling
# font_style
# 15.5 Small-caps
# font_variant
# 15.6 Font boldness
# font_weight
# 15.7 Font size
# font_size
# 15.8 Shorthand font property
# font
# 16. Text ###########################################################
# 16.1 Indentation
# text_indent
# 16.2 Alignment
# text_align
# 16.3 Decoration
# text_decoration
# 16.4 Letter and word spacing
# letter_spacing
# word_spacing
# 16.5 Capitalization
# text_transform
# 16.6 White space
# white_space
# 17. Tables #########################################################
# 17.4.1 Caption position and alignment
# caption_side
# 17.5.2 Table width algorithms
# table_layout
# 17.6 Borders
# border_collapse
# border_spacing
# empty_cells
# 18. User interface #################################################
# 18.1 Cursors
# cursor
# 18.4 Dynamic outlines
# outline_width
# outline_style
# outline_color
# outline
######################################################################
# Flexbox properties
######################################################################
# 5. Ordering and orientation ########################################
# 5.1 Flex flow direction
# flex_direction = validated_property('flex_direction', choices=FLEX_DIRECTION_CHOICES, initial=ROW)
# 5.2 Flex line wrapping
# flex_wrap = validated_property('flex_wrap', choices=FLEX_WRAP_CHOICES, initial=NOWRAP)
# 5.3 Flex direction and wrap
# flex_flow =
# 5.4 Display order
# order = validated_property('order', choices=ORDER_CHOICES, initial=0)
# 7. Flexibility #####################################################
# 7.2 Components of flexibility
# flex_grow = validated_property('flex_grow', choices=FLEX_GROW_CHOICES, initial=0)
# flex_shrink = validated_property('flex_shrink', choices=FLEX_SHRINK_CHOICES, initial=1)
# flex_basis = validated_property('flex_basis', choices=FLEX_BASIS_CHOICES, initial=AUTO)
# 7.1 The 'flex' shorthand
# flex =
# 8. Alignment #######################################################
# 8.2 Axis alignment
# justify_content = validated_property('justify_content', choices=JUSTIFY_CONTENT_CHOICES, initial=FLEX_START)
# 8.3 Cros-axis alignment
# align_items = validated_property('align_items', choices=ALIGN_ITEMS_CHOICES, initial=STRETCH)
# align_self = validated_property('align_self', choices=ALIGN_SELF_CHOICES, initial=AUTO)
# 8.4 Packing flex lines
# align_content = validated_property('align_content', choices=ALIGN_CONTENT_CHOICES, initial=STRETCH)
######################################################################
# Grid properties
######################################################################
# 7. Defining the grid ###############################################
# 7.2 Explicit track sizing
# grid_template_columns = validated_property('grid_template_columns', choices=GRID_TEMPLATE_CHOICES, initial=None)
# grid_template_rows = validated_property('grid_template_rows', choices=GRID_TEMPLATE_CHOICES, initial=None)
# 7.3 Named Areas
# grid_template_areas = validated_property('grid_template_areas', choices=GRID_TEMPLATE_AREA_CHOICES, initial=None)
# 7.4 Explicit grid shorthand
# grid_template =
# 7.6 Implicit track sizing
# grid_auto_columns = validated_property('grid_auto_columns', choices=GRID_AUTO_CHOICES, initial=AUTO)
# grid_auto_rows = validated_property('grid_auto_rows', choices=GRID_AUTO_CHOICES, initial=AUTO)
# 7.7 Automatic placement
# grid_auto_flow = validated_property('grid_auto_flow', choices=GRID_AUTO_FLOW_CHOICES, initial=ROW)
# 7.8 Grid definition shorthand
# grid =
# 8. Placing grid items ##############################################
# 8.3 Line-based placement
# grid_row_start = validated_property('grid_row_start', choices=GRID_PLACEMENT_CHOICES, initial=AUTO)
# grid_column_start = validated_property('grid_column_start', choices=GRID_PLACEMENT_CHOICES, initial=AUTO)
# grid_row_end = validated_property('grid_row_end', choices=GRID_PLACEMENT_CHOICES, initial=AUTO)
# grid_column_end = validated_property('grid_column_end', choices=GRID_PLACEMENT_CHOICES, initial=AUTO)
# 8.4 Placement shorthands
# grid_row =
# grid_column =
# grid_area =
# 10. Alignment and spacing ##########################################
# 10.1 Gutters
# grid_row_gap = validated_property('grid_row_gap', choices=GRID_GAP_CHOICES, initial=0)
# grid_column_gap = validated_property('grid_column_gap', choices=GRID_GAP_CHOICES, initial=0)
# grid_gap =
######################################################################
# Proxy the dirtiness state of layout calculations
######################################################################
@property
def dirty(self):
if self._node:
return self._node.layout.dirty
@dirty.setter
def dirty(self, value):
if self._node:
self._node.layout.dirty = value
######################################################################
# Obtain the layout module
######################################################################
def engine(self):
return css_engine
######################################################################
# Style manipulation
######################################################################
def set(self, **styles):
"Set multiple styles on the CSS definition."
for name, value in styles.items():
if not hasattr(self, name):
raise NameError("Unknown CSS style '%s'" % name)
if value is None:
delattr(self, name)
else:
setattr(self, name, value)
def copy(self, node=None):
"Create a duplicate of this style declaration."
dup = CSS()
dup._node = node
for style in _CSS_PROPERTIES:
try:
setattr(dup, style, getattr(self, '_%s' % style))
except AttributeError:
pass
return dup
######################################################################
# Get the rendered form of the style declaration
######################################################################
def __str__(self):
non_default = []
for name in _CSS_PROPERTIES:
try:
non_default.append((
name.replace('_', '-'),
getattr(self, '_%s' % name)
))
except AttributeError:
pass
return "; ".join(
"%s: %s" % (name, value)
for name, value in sorted(non_default)
)
|
import csv
import pdb
import pickle
import sys
from collections import defaultdict
from operator import itemgetter
import numpy as np
np.seterr(all='raise')
from bs4 import BeautifulSoup as BS
from nltk.corpus import wordnet as wn
from numpy.linalg import norm
from scipy import sparse as sp
from ALaCarte.compute import *
from ALaCarte.cooc import *
from text_embedding.features import *
from text_embedding.vectors import *
BROWNROOT = '/n/fs/nlpdatasets/ALaCache/Brown' # output of running ALaCarte/cooc.py on Brown Corpus (can get corpus from http://nlp.cs.princeton.edu/ALaCarte/corpora)
SYNSETROOT = '/n/fs/nlpdatasets/ALaCache/Brown_Synset' # output of running ALaCarte/cooc.py on synset features using Bronw Corpus vocabulary (can get vocab from http://nlp.cs.princeton.edu/ALaCarte/corpora)
FILEDIR = os.path.dirname(os.path.realpath(__file__)) + '/'
POSMAP = {'j': 'as', 'n': 'n', 'r': 'r', 'v': 'v'}
DIM = 300
VECTORFILE = '/n/fs/nlpdatasets/NLPrinceton/enwiki-20161001_glove_300.txt' # GloVe trained on Wikipedia (can get vectors from http://nlp.cs.princeton.edu/ALaCarte/vectors)
def SemEval2013Task12():
with open(FILEDIR+'data-SemEval2013_Task12/test/keys/gold/wordnet/wordnet.en.key', 'r') as f:
gold = [(split[1], set(key.split('%')[1] for key in split[2:])) for split in (line.split() for line in f)]
ids = {entry[0] for entry in gold}
with open(FILEDIR+'data-SemEval2013_Task12/test/data/multilingual-all-words.en.xml', 'r') as f:
soup = BS(f, 'lxml')
data = [(inst['id'], inst['lemma'], inst['pos'], list(split_on_punctuation(' '.join(child.text for child in sent.children if not child == inst and not child == '\n').lower().replace('_', ' ')))) for text in soup('text') for sent in text('sentence') for inst in sent('instance') if inst['id'] in ids]
return data, gold
def SemEval2015Task13():
with open(FILEDIR+'data-SemEval2015_Task13/test/keys/gold_keys/EN/semeval-2015-task-13-en.key', 'r') as f:
gold = [(split[1], set(key.split('%')[1] for key in split[2:] if key[:3] == 'wn:')) for split in (line.split() for line in f if '\twn:' in line)]
ids = {entry[0] for entry in gold}
with open(FILEDIR+'data-SemEval2015_Task13/test/data/semeval-2015-task-13-en.xml','r') as f:
soup = BS(f, 'lxml')
data = [(wf['id'], wf['lemma'], wf['pos'], list(split_on_punctuation(' '.join(child.text for child in sent.children if not child == wf and not child == '\n')))) for text in soup('text') for sent in text('sentence') for wf in sent('wf') if wf['id'] in ids and wf['pos'][0].lower() in POSMAP and wn.synsets(wf['lemma'], POSMAP[wf['pos'][0].lower()])]
id2keys = defaultdict(lambda: set())
for entry, keys in gold:
id2keys[entry] = id2keys[entry].union(keys)
gold = [(entry, id2keys[entry]) for entry, _, _, _ in data]
return data, gold
def evaluate(retrieved, truth):
precision = np.array([r in t[1] for r, t in zip(retrieved, truth)])
recall = np.array([(r in t[1])/len(t[1]) for r, t in zip(retrieved, truth)])
return np.mean(precision), np.mean(recall), 2.0*sum((precision*recall)[precision]/(precision+recall)[precision])/len(truth)
def cossim(u, v):
normu = norm(u)
if normu:
normv = norm(v)
if normv:
return np.inner(u, v)/normu/normv
return 0.0
def wordnet():
write('Training Context Transform\n')
cooc, words, counts = alacache(BROWNROOT)
wordset = set(words)
select = np.array([word in wordset for word in words])
C = cooc[select][:,select]
words = [word for sel, word in zip(select, words) if sel]
counts = counts[select]
X = vocab2mat(words, vectorfile=VECTORFILE, dimension=DIM, unit=False)
A = linear_transform(C, X, counts, weights=np.log(counts), fit_intercept=False, n_jobs=-1)
write('Constructing Synset Embeddings\n')
cooc, _, _, synsets, synsetcounts = alacache(SYNSETROOT, 'synset')
s2v = dict(zip(synsets, A.predict(cooc[:,select].dot(X)/synsetcounts[:,None])))
output = {'counts': dict(zip(synsets, synsetcounts))}
for task, load in [('SemEval2013 Task 12', SemEval2013Task12), ('SemEval2015 Task 13', SemEval2015Task13)]:
alldata, allgold = load()
write('Evaluating WSD on '+task+'\n')
mfs = []
alc = []
gls = []
cmb = []
truth = []
output[task] = {}
for pos in sorted(POSMAP.values()):
try:
data, gold = zip(*((dentry, gentry) for dentry, gentry in zip(alldata, allgold) if POSMAP[dentry[2][0].lower()] == pos))
output[task][pos] = {}
except ValueError:
continue
write('\tPOS: '+pos+'\n')
truth.extend(gold)
s2c = {synset: count for synset, count in zip(synsets, synsetcounts)}
keys = [max(((synset.lemmas()[0].key().split('%')[1], s2c.get(synset.name(), 0)) for synset in wn.synsets(entry[1], POSMAP[entry[2][0].lower()])), key=itemgetter(1))[0] for entry in data]
pr, re, f1 = evaluate(keys, gold)
write('\t\tMF Sense : P='+str(pr)+', R='+str(re)+', F1='+str(f1)+'\n')
mfs.extend(keys)
w2v = vocab2vecs(wordset.union({word for entry in data for word in entry[-1]}), vectorfile=VECTORFILE, dimension=DIM, unit=False)
z = np.zeros(DIM)
convecs = [A.coef_.dot(sum((w2v.get(word, z) for word in entry[-1]), z)) for entry in data]
keys = [max(((synset.lemmas()[0].key().split('%')[1], cossim(s2v.get(synset.name(), z), convec)) for synset in wn.synsets(entry[1], POSMAP[entry[2][0].lower()])), key=itemgetter(1))[0] for entry, convec in zip(data, convecs)]
pr, re, f1 = evaluate(keys, gold)
write('\t\tA La Carte: P='+str(pr)+', R='+str(re)+', F1='+str(f1)+'\n')
alc.extend(keys)
for d, g, k in zip(data, gold, keys):
correct = int(k in g[1])
for synset in wn.synsets(d[1], POSMAP[d[2][0].lower()]):
if synset.lemmas()[0].key().split('%')[1] in g[1]:
name = synset.name()
output[task][pos].setdefault(name, [0, 0])
output[task][pos][name][correct] += 1
tasksyns = {synset for entry in data for synset in wn.synsets(entry[1], POSMAP[entry[2][0].lower()])}
w2v = vocab2vecs({word for synset in tasksyns for sent in [synset.definition()]+synset.examples() for word in split_on_punctuation(sent.lower())}, vectorfile=VECTORFILE, dimension=DIM, unit=False)
glsvecs = {}
for synset in tasksyns:
glsvecs[synset] = np.zeros(DIM)
lemmas = set.union(*(set(lemma.name().lower().split('_')) for lemma in synset.lemmas()))
for sent in [synset.definition()]+synset.examples():
for word in split_on_punctuation(sent.lower()):
if not word in lemmas:
glsvecs[synset] += w2v.get(word, z)
glsvecs[synset] = A.coef_.dot(glsvecs[synset])
keys = [max(((synset.lemmas()[0].key().split('%')[1], cossim(glsvecs.get(synset, z), convec)) for synset in wn.synsets(entry[1], POSMAP[entry[2][0].lower()])), key=itemgetter(1))[0] for entry, convec in zip(data, convecs)]
pr, re, f1 = evaluate(keys, gold)
write('\t\tGloss-Only: P='+str(pr)+', R='+str(re)+', F1='+str(f1)+'\n')
gls.extend(keys)
keys = [max(((synset.lemmas()[0].key().split('%')[1], cossim(s2v.get(synset.name(), glsvecs.get(synset, z)), convec)) for synset in wn.synsets(entry[1], POSMAP[entry[2][0].lower()])), key=itemgetter(1))[0] for entry, convec in zip(data, convecs)]
pr, re, f1 = evaluate(keys, gold)
write('\t\tCombined : P='+str(pr)+', R='+str(re)+', F1='+str(f1)+'\n')
cmb.extend(keys)
write('\tAll POS \n')
for meth, keys in [('MF Sense ', mfs), ('A La Carte', alc), ('Gloss-Only', gls), ('Combined ', cmb)]:
pr, re, f1 = evaluate(keys, truth)
write('\t\t'+meth+': P='+str(pr)+', R='+str(re)+', F1='+str(f1)+'\n')
if __name__ == '__main__':
wordnet()
|
from django.shortcuts import render, render_to_response
from django.views.generic import TemplateView
from Proyecto.models import *
import json
from django.shortcuts import render, redirect
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.http import HttpResponseRedirect, JsonResponse
from django.urls import path, reverse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.core.paginator import Paginator
from .forms import CreateUserForm, UserForm, AlgoForm
from .models import *
from .models import Hotel
from django.views.generic import TemplateView
from django.db.models import Q
# Create your views here.
def Pruebas(request):
queryset = request.GET.get("buscar")
h = Hotel.objects.all()
if queryset:
h = Hotel.objects.filter(
Q(nombre__icontains = queryset) |
Q(direccion__icontains = queryset)
).distinct()
paginator = Paginator(h, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'hoteles': page_obj,
'hoteles2': h,
}
return render(request,"Proyecto/home2.html",context)
def LoginView(request):
context = {}
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
messages.success(request, "Bienvenido " + username)
login(request, user)
return redirect('home')
else:
messages.error(request, "Username OR password is incorrect ")
return render(request, 'Proyecto/login.html', context)
return render(request, 'Proyecto/login.html', context)
def RegisterView(request):
form = CreateUserForm()
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
p = Cliente(
nombre=form.cleaned_data.get('first_name'),
apellido=form.cleaned_data.get('last_name'),
mail=form.cleaned_data.get('email'),
username=form.cleaned_data.get('username')
)
p.save()
username = form.cleaned_data.get('username')
messages.success(request, "Account was created for " + username)
return redirect('login')
context = {
'form': form
}
return render(request, 'Proyecto/register.html', context)
def LogoutUser(request):
logout(request)
messages.success(request, "Has salido de tu sesion, gracias por elegirnos")
return redirect('home')
def HotelesView2(request, Hotel):
from .models import Hotel as hotel
hoteles = hotel.objects.get(pk=Hotel) # Aca deberiamos llamar a las habitaciones del hotel que queremos
h = hoteles.habitaciones.all() #Tomamos SOLO las habitaciones del hotel para hacer el pagination
hab_o = h.filter(estado="Ocupado").count()
hab = hoteles.habitaciones.count()
paginator = Paginator(h, 9)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'hoteles': hoteles,
'habitaciones': page_obj,
'h': h,
'hab': hab,
'hab_o': hab_o,
}
return render(request, 'Proyecto/hoteles2.html', context)
def HabitacionView(request, Habitacion):
from .models import Habitacion as habitacion
habitaciones = habitacion.objects.get(pk=Habitacion)
hotel = Hotel.objects.all()
search_post = request.GET.get('search')
str(search_post)
if search_post:
hotel = Hotel.objects.filter(Q(nombre__icontains=search_post))
else:
pass
context = {
'habitacion': habitaciones,
'hotel': hotel,
}
return render(request, 'Proyecto/habitaciones.html', context)
def hacerReserva(request, Habitacion):
from .models import Habitacion as habitacion
habi = habitacion.objects.get(pk=Habitacion)
if request.user.is_authenticated:
# Habitacion.estado = "Ocupado"
# Habitacion.save()
cliente = Cliente.objects.get(username=request.user.username)
cliente.reservas.add(habi)
cliente.save()
cam = habitacion.objects.get(id=Habitacion)
cam.estado = "Ocupado"
cam.save()
messages.success(request, "Tu reserva ha sido realizada con exito")
return redirect('home')
else:
messages.error(request, "Tienes que estar logeado para hacer una reserva")
return redirect('login')
def deshacerReserva(request):
if request.user.is_authenticated:
if request.user.username == 'admin':
messages.error(request,"no puedes acceder a esta pagina")
return redirect('home')
else:
cliente = Cliente.objects.get(username=request.user.username)
habitaciones = cliente.reservas.all()
hab_count = habitaciones.count()
paginator = Paginator(habitaciones, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'cliente': cliente,
'reserva':page_obj,
'hab_count': hab_count,
}
return render(request, "Proyecto/eliminarReserva.html", context)
else:
cliente = Cliente.objects.get(id=1)
context = {
'cliente': cliente
}
return render(request, "Proyecto/eliminarReserva.html", context)
def eliminarReserva(request, Habitacion):
from .models import Habitacion as habitacion
habi = habitacion.objects.get(pk=Habitacion)
if request.user.is_authenticated:
cliente = Cliente.objects.get(username=request.user.username)
cliente.reservas.remove(habi)
cliente.save()
cam = habitacion.objects.get(id=Habitacion)
cam.estado = "Disponible"
cam.save()
messages.success(request, "Tu reserva ha sido cancelada con exito")
return redirect('home')
else:
messages.error(request, "Tienes que estar logeado para hacer una reserva")
return redirect('login')
return redirect('home') |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from skbio.util._decorator import experimental
from ._cutils import center_distance_matrix_cy
@experimental(as_of="0.4.0")
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
@experimental(as_of="0.4.0")
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
a = np.asarray(a, dtype=np.float64)
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
@experimental(as_of="0.4.0")
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
@experimental(as_of="0.4.0")
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
@experimental(as_of="0.4.0")
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
def f_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
def center_distance_matrix(distance_matrix, inplace=False):
"""
Centers a distance matrix.
Note: If the used distance was euclidean, pairwise distances
needn't be computed from the data table Y because F_matrix =
Y.dot(Y.T) (if Y has been centered).
But since we're expecting distance_matrix to be non-euclidian,
we do the following computation as per
Numerical Ecology (Legendre & Legendre 1998).
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
inplace : bool, optional
Whether or not to center the given distance matrix in-place, which
is more efficient in terms of memory and computation.
"""
if not distance_matrix.flags.c_contiguous:
# center_distance_matrix_cy requires c_contiguous, so make a copy
distance_matrix = np.asarray(distance_matrix, order='C')
if inplace:
center_distance_matrix_cy(distance_matrix, distance_matrix)
return distance_matrix
else:
centered = np.empty(distance_matrix.shape, distance_matrix.dtype)
center_distance_matrix_cy(distance_matrix, centered)
return centered
def _e_matrix_inplace(distance_matrix):
"""
Compute E matrix from a distance matrix inplace.
Squares and divides by -2 the input element-wise. Eq. 9.20 in
Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.e_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
"""
distance_matrix = distance_matrix.astype(float)
for i in np.arange(len(distance_matrix)):
distance_matrix[i] = (distance_matrix[i] * distance_matrix[i]) / -2
return distance_matrix
def _f_matrix_inplace(e_matrix):
"""
Compute F matrix from E matrix inplace.
Centering step: for each element, the mean of the corresponding
row and column are subtracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.f_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
e_matrix : 2D array_like
A matrix representing the "E matrix" as described above.
"""
e_matrix = e_matrix.astype(float)
row_means = np.zeros(len(e_matrix), dtype=float)
col_means = np.zeros(len(e_matrix), dtype=float)
matrix_mean = 0.0
for i in np.arange(len(e_matrix)):
row_means[i] = e_matrix[i].mean()
matrix_mean += e_matrix[i].sum()
col_means += e_matrix[i]
matrix_mean /= len(e_matrix) ** 2
col_means /= len(e_matrix)
for i in np.arange(len(e_matrix)):
v = e_matrix[i]
v -= row_means[i]
v -= col_means
v += matrix_mean
e_matrix[i] = v
return e_matrix
|
import unittest, sys, os
python2 = sys.version_info < (3, 0, 0)
if python2:
from StringIO import StringIO
else:
from io import StringIO
from bibtex_merger.core import *
from bibtex_merger.extension import *
class test_core(unittest.TestCase):
###########
# __init__
###########
def test_base1(self):
Core(ext=Extension(ext="none"))
def test_base2(self):
Core(ext=[Extension(ext="none"), Extension(ext="none")])
def test_base_bad1(self):
self.assertRaises(ValueError, Core, ext="")
def test_base_bad2(self):
self.assertRaises(ValueError, Core, ext=["", 1234])
def test_base_bad3(self):
self.assertRaises(ValueError, Core, ext=[Extension(ext="none"), 1234])
def test_prefFile_good(self):
Core(ext=Extension(ext="none"))
Core(ext=Extension(ext="none"), prefFile=None)
Core(ext=Extension(ext="none"), prefFile="pref.cfg")
def test_prefFile_bad(self):
self.assertRaises(ValueError, Core, ext=Extension(ext="none"), prefFile=12345)
self.assertRaises(ValueError, Core, ext=Extension(ext="none"), prefFile=Extension(ext="none"))
def test_out(self):
Core(ext=Extension(ext="none"))
Core(ext=Extension(ext="none"), out=sys.stdout)
Core(ext=Extension(ext="none"), out=StringIO())
def test_out_bad(self):
self.assertRaises(ValueError, Core, ext=Extension(ext="none"), out="invalid")
def test_killLevel(self):
c = Core(ext=Extension(ext="none"))
self.assertEqual(c.killLevel, c.killLevels['warning'])
c = Core(ext=Extension(ext="none"), killLevel='warning')
self.assertEqual(c.killLevel, c.killLevels['warning'])
c = Core(ext=Extension(ext="none"), killLevel='error')
self.assertEqual(c.killLevel, c.killLevels['error'])
def test_killLevel_bad(self):
self.assertRaises(ValueError, Core, ext=Extension(ext="none"), killLevel="invalid")
self.assertRaises(ValueError, Core, ext=Extension(ext="none"), killLevel=12345)
###########
# Properties
###########
def attemptChange_killLevel(self):
m = Core(ext=Extension(ext="none"))
m.killLevel = "bad"
def attemptChange_out(self):
m = Core(ext=Extension(ext="none"))
m.out = "bad"
def attemptChange_extensionObjects(self):
m = Core(ext=Extension(ext="none"))
m.extensionObjects = "bad"
def attemptChange_extensionRegexs(self):
m = Core(ext=Extension(ext="none"))
m.extensionRegexs = "bad"
def attemptChange_extensionPatterns(self):
m = Core(ext=Extension(ext="none"))
m.extensionPatterns = "bad"
def attemptChange_preferences(self):
m = Core(ext=Extension(ext="none"))
m.preferences = "bad"
def attemptChange_preferencesFile(self):
m = Core(ext=Extension(ext="none"))
m.preferencesFile = "bad"
def test_properties(self):
m = Core(ext=Extension(ext="none"))
m.killLevel
m.out
m.extensionObjects
m.extensionRegexs
m.extensionPatterns
m.preferences
m.preferencesFile
def test_properties_bad(self):
self.assertRaises(AttributeError, self.attemptChange_killLevel)
self.assertRaises(AttributeError, self.attemptChange_out)
self.assertRaises(AttributeError, self.attemptChange_extensionObjects)
self.assertRaises(AttributeError, self.attemptChange_extensionRegexs)
self.assertRaises(AttributeError, self.attemptChange_extensionPatterns)
self.assertRaises(AttributeError, self.attemptChange_preferences)
self.assertRaises(AttributeError, self.attemptChange_preferencesFile)
###########
# __title__
###########
def test_title1(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
c.__title__(title="Random Title")
output = out.getvalue().strip()
self.assertEqual(output, "################################################################################\n" +
"# RANDOM TITLE #\n" +
"################################################################################")
def test_title2(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
c.__title__(title="a" * 100)
output = out.getvalue().strip()
self.assertEqual(output, "################################################################################\n" +
"# AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA #\n" +
"################################################################################")
def test_title3(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
self.assertRaises(ValueError, c.__title__, title=123)
###########
# __subtitle__
###########
def test_subtitle1(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
c.__subtitle__(title="Random Subtitle")
output = out.getvalue().strip()
self.assertEqual(output, "|| RANDOM SUBTITLE ||\n" +
"================================================================================")
def test_subtitle2(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
c.__subtitle__(title="a" * 100)
output = out.getvalue().strip()
self.assertEqual(output, "|| AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ||\n" +
"================================================================================")
def test_subtitle3(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
self.assertRaises(ValueError, c.__subtitle__, title=123)
###########
# extensionNames
###########
def test_extensionNames1(self):
c = Core(ext=Extension(ext="test"), prefFile=None)
self.assertEqual(c.extensionPatterns, [r"\.test$"])
def test_extensionNames2(self):
c = Core(ext=[ Extension(ext="test1"),
Extension(ext="test2"),
Extension(ext="test3") ], prefFile=None)
self.assertEqual(c.extensionPatterns, [r"\.test1$", r"\.test2$", r"\.test3$"])
###########
# extensionObjects
###########
def test_extensionObjects1(self):
c = Core(ext=Extension(ext="test"), prefFile=None)
self.assertEqual(all(isinstance(x, Extension) for x in c.extensionObjects), True)
self.assertEqual(c.extensionObjects[0].extension, r"\.test$")
def test_extensionObjects2(self):
c = Core(ext=[ Extension(ext="test1"),
Extension(ext="test2"),
Extension(ext="test3") ], prefFile=None)
self.assertEqual(all(isinstance(x, Extension) for x in c.extensionObjects), True)
self.assertEqual(c.extensionObjects[0].extension, r"\.test1$")
self.assertEqual(c.extensionObjects[1].extension, r"\.test2$")
self.assertEqual(c.extensionObjects[2].extension, r"\.test3$")
###########
# __read__
###########
dataDir = "bibtex_merger/tests/data"
def tRead(self, filename):
with open(filename, "r") as f:
return f.read()
raise Exception
def test_read(self):
c = Core(ext=Extension(ext="txt", reader=self.tRead))
self.assertEqual(c.__read__("{}/sample.txt".format(self.dataDir)), "Sample file with text")
def test_read_bad_ext(self):
c = Core(ext=Extension(ext="txt", reader=self.tRead))
self.assertRaises(CoreError, c.__read__, "{}/sample.random".format(self.dataDir))
###########
# __write__
###########
def tWrite(self, filename, content):
with open(filename, "w") as f:
return f.write(content)
raise Exception
def test_writeread(self):
c = Core(ext=Extension(ext="txt", reader=self.tRead, writer=self.tWrite))
f = "sample2.txt"
t = "Some random text to insert"
c.__write__("{}/{}".format(self.dataDir, f), t)
self.assertEqual(c.__read__("{}/{}".format(self.dataDir, f)), t)
os.remove("{}/{}".format(self.dataDir, f))
def test_write_bad_ext(self):
c = Core(ext=Extension(ext="txt", reader=self.tRead, writer=self.tWrite))
f = "sample2.random"
t = "Some random text to insert"
self.assertRaises(CoreError, c.__write__, "{}/{}".format(self.dataDir, f), t)
###########
# preferences
###########
def test_no_preferences_attemptreadwrite(self):
c = Core(ext=Extension(ext="none"), prefFile=None)
self.assertRaises(CoreError, c.__preferencesRead__)
self.assertRaises(CoreError, c.__preferencesWrite__)
###########
# __preferencesRead__
###########
samplePrefSect = "Preferences"
samplePrefItems = [('value1', 'Foo'), ('value2', 'Bar'), ('value3', '2015')]
def test_preferencesRead(self):
c = Core(ext=Extension(ext="none"), prefFile="{}/sample.cfg".format(self.dataDir))
pref = c.__preferencesRead__()
self.assertEqual(pref.sections(), [self.samplePrefSect])
self.assertEqual(pref.items(self.samplePrefSect), self.samplePrefItems)
###########
# __preferencesWrite__
###########
def test_preferencesWrite1(self):
f = "sample2.cfg"
sect = "Random1"
items = [("n1", "Foo"), ("n2", "Bar"), ("n3", "Baz")]
c = Core(ext=Extension(ext="none"), prefFile="{}/{}".format(self.dataDir, f))
pref = c.__preferencesRead__()
pref.add_section(sect)
for o, v in items:
pref.set(sect, o, v)
pref = c.__preferencesWrite__()
pref = c.__preferencesRead__()
self.assertEqual(pref.sections(), [sect])
self.assertEqual(pref.items(sect), items)
os.remove("{}/{}".format(self.dataDir, f))
def test_preferencesWrite2(self):
f = "sample.cfg"
c = Core(ext=Extension(ext="none"), prefFile="{}/{}".format(self.dataDir, f))
c.__preferencesWrite__()
pref = c.__preferencesRead__()
self.assertEqual(pref.sections(), [self.samplePrefSect])
self.assertEqual(pref.items(self.samplePrefSect), self.samplePrefItems)
###########
# __info__
###########
def test_info(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out)
c.__info__("just a message")
output = out.getvalue().strip()
self.assertEqual(output, "just a message")
def test_info_bad(self):
c = Core(ext=Extension(ext="none"))
self.assertRaises(ValueError, c.__info__, 12345)
###########
# __warning__
###########
def test_warning1(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out, killLevel='error')
c.__warning__(ValueError("just a message"))
output = out.getvalue().strip()
self.assertEqual(output, "WARNING: ValueError: just a message")
def test_warning2(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out, killLevel='warning')
self.assertRaises(ValueError, c.__warning__, ValueError("just a message"))
def test_warning_bad(self):
c = Core(ext=Extension(ext="none"))
self.assertRaises(ValueError, c.__warning__, 12345)
###########
# __error__
###########
def test_error1(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out, killLevel='warning')
self.assertRaises(ValueError, c.__error__, ValueError("just a message"))
def test_error2(self):
out = StringIO()
c = Core(ext=Extension(ext="none"), out=out, killLevel='error')
self.assertRaises(ValueError, c.__error__, ValueError("just a message"))
def test_error_bad(self):
c = Core(ext=Extension(ext="none"))
self.assertRaises(ValueError, c.__error__, 12345)
class test_core_error(unittest.TestCase):
###########
# Helpers
###########
def coreError(self, msg=-1):
if msg == -1:
raise CoreError()
else:
raise CoreError(msg)
def coreErrorAttemptMsgChange(self):
ce = CoreError(msg="red")
ce.msg = "blue"
raise ee
###########
# __init__
###########
def test_CoreError_base(self):
self.assertRaises(CoreError, self.coreError, msg="red")
self.assertRaises(CoreError, self.coreError, msg=None)
self.assertRaises(CoreError, self.coreError)
def test_CoreError_bad_msg(self):
self.assertRaises(ValueError, self.coreError, msg=12345)
def test_CoreError_catching(self):
msg = "test"
try:
raise CoreError(msg=msg)
except CoreError as e:
self.assertEqual(str(e), msg)
###########
# msg
###########
def test_CoreError_msg_change(self):
self.assertRaises(AttributeError, self.coreErrorAttemptMsgChange)
if __name__ == '__main__':
unittest.main() |
<filename>plugins/modules/oci_devops_repository_commit_facts.py<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_devops_repository_commit_facts
short_description: Fetches details about one or multiple RepositoryCommit resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple RepositoryCommit resources in Oracle Cloud Infrastructure
- Returns a list of commits.
- If I(commit_id) is specified, the details of a single RepositoryCommit will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
repository_id:
description:
- Unique repository identifier.
type: str
required: true
commit_id:
description:
- A filter to return only resources that match the given commit ID.
- Required to get a specific repository_commit.
type: str
aliases: ["id"]
ref_name:
description:
- A filter to return only resources that match the given reference name.
type: str
exclude_ref_name:
description:
- A filter to exclude commits that match the given reference name.
type: str
file_path:
description:
- A filter to return only commits that affect any of the specified paths.
type: str
timestamp_greater_than_or_equal_to:
description:
- A filter to return commits only created after the specified timestamp value.
type: str
timestamp_less_than_or_equal_to:
description:
- A filter to return commits only created before the specified timestamp value.
type: str
commit_message:
description:
- A filter to return any commits that contains the given message.
type: str
author_name:
description:
- A filter to return any commits that are pushed by the requested author.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific repository_commit
oci_devops_repository_commit_facts:
# required
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
commit_id: "ocid1.commit.oc1..xxxxxxEXAMPLExxxxxx"
- name: List repository_commits
oci_devops_repository_commit_facts:
# required
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
# optional
ref_name: ref_name_example
exclude_ref_name: exclude_ref_name_example
file_path: file_path_example
timestamp_greater_than_or_equal_to: 2013-10-20T19:20:30+01:00
timestamp_less_than_or_equal_to: 2013-10-20T19:20:30+01:00
commit_message: commit_message_example
author_name: author_name_example
"""
RETURN = """
repository_commits:
description:
- List of RepositoryCommit resources
returned: on success
type: complex
contains:
commit_id:
description:
- Commit hash pointed to by reference name.
returned: on success
type: str
sample: "ocid1.commit.oc1..xxxxxxEXAMPLExxxxxx"
commit_message:
description:
- The commit message.
returned: on success
type: str
sample: commit_message_example
author_name:
description:
- Name of the author of the repository.
returned: on success
type: str
sample: author_name_example
author_email:
description:
- Email of the author of the repository.
returned: on success
type: str
sample: author_email_example
committer_name:
description:
- Name of who creates the commit.
returned: on success
type: str
sample: committer_name_example
committer_email:
description:
- Email of who creates the commit.
returned: on success
type: str
sample: committer_email_example
parent_commit_ids:
description:
- An array of parent commit IDs of created commit.
returned: on success
type: list
sample: []
time_created:
description:
- The time at which commit was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
tree_id:
description:
- Tree information for the specified commit.
returned: on success
type: str
sample: "ocid1.tree.oc1..xxxxxxEXAMPLExxxxxx"
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"bar-key\\": \\"value\\"}`"
- Returned for list operation
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- Returned for list operation
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: [{
"commit_id": "ocid1.commit.oc1..xxxxxxEXAMPLExxxxxx",
"commit_message": "commit_message_example",
"author_name": "author_name_example",
"author_email": "author_email_example",
"committer_name": "committer_name_example",
"committer_email": "committer_email_example",
"parent_commit_ids": [],
"time_created": "2013-10-20T19:20:30+01:00",
"tree_id": "ocid1.tree.oc1..xxxxxxEXAMPLExxxxxx",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.devops import DevopsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class RepositoryCommitFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"repository_id",
"commit_id",
]
def get_required_params_for_list(self):
return [
"repository_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_commit,
repository_id=self.module.params.get("repository_id"),
commit_id=self.module.params.get("commit_id"),
)
def list_resources(self):
optional_list_method_params = [
"ref_name",
"exclude_ref_name",
"file_path",
"timestamp_greater_than_or_equal_to",
"timestamp_less_than_or_equal_to",
"commit_message",
"author_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_commits,
repository_id=self.module.params.get("repository_id"),
**optional_kwargs
)
RepositoryCommitFactsHelperCustom = get_custom_class(
"RepositoryCommitFactsHelperCustom"
)
class ResourceFactsHelper(
RepositoryCommitFactsHelperCustom, RepositoryCommitFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
repository_id=dict(type="str", required=True),
commit_id=dict(aliases=["id"], type="str"),
ref_name=dict(type="str"),
exclude_ref_name=dict(type="str"),
file_path=dict(type="str"),
timestamp_greater_than_or_equal_to=dict(type="str"),
timestamp_less_than_or_equal_to=dict(type="str"),
commit_message=dict(type="str"),
author_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="repository_commit",
service_client_class=DevopsClient,
namespace="devops",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(repository_commits=result)
if __name__ == "__main__":
main()
|
from regress import *
from util import *
from dateutil import parser as dateparser
def calc_hl_daily(full_df, horizon):
print("Caculating daily hl...")
result_df = full_df.reset_index()
# result_df = filter_expandable(result_df)
result_df = result_df[['close', 'high', 'low', 'date', 'ind1', 'gvkey']]
print("Calculating hl0...")
result_df['hl0'] = result_df['close'] / np.sqrt(result_df['high'] * result_df['low'])
result_df['hl0_B'] = winsorize(result_df['hl0'])
result_df = result_df.dropna()
demean = lambda x: (x - x.mean())
indgroups = result_df[['hl0_B', 'date', 'ind1']].groupby(['date', 'ind1'], sort=False).transform(demean)
result_df['hl0_B_ma'] = indgroups['hl0_B']
result_df.set_index(['date', 'gvkey'], inplace=True)
result_df['hl3'] = result_df['hl0'].unstack().shift(3).stack() # new
print("Calulating lags...")
for lag in range(1, horizon):
shift_df = result_df.unstack().shift(lag).stack()
result_df['hl' + str(lag) + '_B_ma'] = shift_df['hl0_B_ma']
result_df = pd.merge(full_df, result_df, how='left', on=['date', 'gvkey'], sort=False,
suffixes=['', '_dead']) # new
result_df = remove_dup_cols(result_df)
return result_df
def hl_fits(daily_df, full_df, horizon, name, reg_st, reg_ed, out_dir):
fits_df = pd.DataFrame(columns=['horizon', 'coef', 'indep', 'tstat', 'nobs', 'stderr'])
for lag in range(1, horizon + 1):
fits_df = fits_df.append(regress_alpha(daily_df, 'hl0_B_ma', lag, median=True, start=reg_st, end=reg_ed),
ignore_index=True)
plot_fit(fits_df, out_dir + "/" + "hl_daily_" + name + "_" + df_dates(daily_df))
fits_df.set_index(keys=['indep', 'horizon'], inplace=True)
coef0 = fits_df.ix['hl0_B_ma'].ix[horizon].ix['coef']
if 'hl' not in full_df.columns:
print("Creating forecast columns...")
full_df['hl'] = 0
full_df['hlC_B_ma_coef'] = np.nan
for lag in range(1, horizon + 1):
full_df['hl' + str(lag) + '_B_ma_coef'] = np.nan
for lag in range(1, horizon + 1):
full_df.loc[daily_df.index, 'hl' + str(lag) + '_B_ma_coef'] = coef0 - fits_df.ix['hl0_B_ma'].ix[lag].ix['coef']
for lag in range(1, horizon):
full_df.loc[daily_df.index, 'hl'] += full_df['hl' + str(lag) + '_B_ma'] * full_df[
'hl' + str(lag) + '_B_ma_coef']
return full_df
def calc_hl_forecast(daily_df, horizon, reg_st, reg_ed, output_dir):
daily_df = calc_hl_daily(daily_df, horizon)
sector = 10 # 'Energy'
print("Running hl for sector code %d" % (sector))
sector_df = daily_df[daily_df['sector'] == sector]
full_df = hl_fits(sector_df, daily_df, horizon, "in", reg_st, reg_ed, output_dir)
print("Running hl for sector code %d" % (sector))
sector_df = daily_df[daily_df['sector'] != sector]
full_df = hl_fits(sector_df, daily_df, horizon, "ex", reg_st, reg_ed, output_dir)
coefs = []
for lag in range(1, horizon + 1):
coefs.append('hl' + str(lag) + '_B_ma_coef')
coef_df = full_df[coefs]
# dump_alpha(full_df, 'hl')
return full_df, coef_df
def six_months_before(date_s):
if date_s[-4:] == '0101':
return str(int(date_s[:4]) - 1) + '0630'
else:
return date_s[:4] + '0101'
def get_hl(start_s, end_s, dir):
lookback = 30
horizon = 3 # new
d2 = end_s
dfs = []
for i in range(3):
print("Loading raw data folder %s..." % d2)
barra_df = pd.read_csv("%s/data/raw/%s/barra_df.csv" % (dir, d2), header=0, sep='|', dtype={'gvkey': str},
parse_dates=[0])
uni_df = pd.read_csv("%s/data/raw/%s/uni_df.csv" % (dir, d2), header=0, sep='|', dtype={'gvkey': str},
parse_dates=[0])
price_df = pd.read_csv("%s/data/raw/%s/price_df.csv" % (dir, d2), header=0, sep='|', dtype={'gvkey': str},
parse_dates=[0])
price_df.set_index(['date', 'gvkey'], inplace=True)
uni_df.set_index('gvkey', inplace=True)
barra_df.set_index(['date', 'gvkey'], inplace=True)
daily_df = merge_barra_data(price_df, barra_df)
result_df = calc_forward_returns(daily_df, horizon)
daily_df = daily_df.merge(result_df, on=['date', 'gvkey'])
daily_df = daily_df.join(uni_df[['sedol']],on='gvkey', how='inner')
daily_df.index.names=['date','gvkey']
# intra_df = merge_intra_data(daily_df, daybar_df)
dfs.append(daily_df)
d2 = six_months_before(d2)
reg_st = d2
reg_ed = start_s
daily_df = pd.concat(dfs).sort_index()
graphs_dir = dir + "/data/all_graphs"
full_df, coef_df = calc_hl_forecast(daily_df, horizon, reg_st, reg_ed, graphs_dir)
full_df = full_df.truncate(before=dateparser.parse(start_s), after=dateparser.parse(end_s))
output_dir = dir+ "/data/all"
full_df.to_hdf('%s/all.%s-%s.h5' % (output_dir, start_s, end_s), 'full_df', mode='w')
return coef_df
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: SMA.py
@time: 2019-10-01 03:56
Fast Moving Average: 短期移动均线,因为窗口越小,均线对价格的灵敏度越高,改变越fast
本质:趋势交易,利用均线的迟滞降低扰动
优点:降低扰动,可以过滤掉噪音,从而显著降低交易频率
主要缺点:
1、由于均线的迟滞,会错过初期的那部分涨幅。
但这是趋势交易不可以避免的结果,否则就是逆势抄底了。。。
2、股票走势一般呈现一种缓涨急跌的走势,这时均线的迟滞就会造成大的回撤
改进措施:
可以通过仓位管理来降低回撤:
如买入时,全仓买入。
等到股价第一次回踩fast_ma时short 1/3的仓位
等到股价第二次回踩fast_ma时short 1/3的仓位
然后装死,直到fast_ma下穿slow_ma时清仓
这里有一些细节,如可以预判,股价即将上穿时 就买入半仓,等到完全上穿后买入剩下的半仓,
没有如期上穿,就择时清仓
卖出时也可以预判,当fast_ma即将下穿slow_ma清仓
买入时点的选择:尽量在14点以后,
改进:
增加交易手续费
开盘价买入
控制回撤
仓位管理
"""
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import ffn
import matplotlib.pyplot as plt
import sys
sys.path.append('/Users/luoyonggui/PycharmProjects/mayiutils_n1/mayiutils/db')
from pymongo_wrapper import PyMongoWrapper
class SMA:
@classmethod
def get_stocks_dict(cls):
mongo = PyMongoWrapper()
table = mongo.getCollection('finance_n', 'stocks_info')
df = mongo.findAll(table, fieldlist=['ts_code', 'name'], returnFmt='df')
stock_names = df.name.tolist()
stock_ts_codes = df.ts_code.tolist()
return dict(zip(stock_names, stock_ts_codes)), dict(zip(stock_ts_codes, stock_names))
@classmethod
def get_stocks_dict_top_n(cls, n, t_date):
mongo = PyMongoWrapper()
table = mongo.getCollection('finance_n', 'stock_daily_basic')
df = mongo.findAll(table, {'trade_date': pd.to_datetime(t_date)}, fieldlist=['ts_code'],
sort=[('rank', 1)], limit=n, returnFmt='df')
stock_reverse_dict = cls.get_stocks_dict()[1]
return df.ts_code.map(lambda c: stock_reverse_dict[c]).tolist()
@classmethod
def get_hist(cls, name=None, ts_code=None, count=365):
if not ts_code:
stocks_dict = cls.get_stocks_dict()[0]
ts_code = stocks_dict[name]
# start_date = datetime.now() - timedelta(days=count)
mongo = PyMongoWrapper()
table = mongo.getCollection('finance_n', 'stocks_daily')
# df = mongo.findAll(table, {'trade_date': {'$gte': start_date}},
# fieldlist=['trade_date', 'pct_chg'], returnFmt='df')
df = mongo.findAll(table, {'ts_code': ts_code}, fieldlist=['trade_date', 'pct_chg'],
sort=[('trade_date', -1)], limit=count, returnFmt='df')
df.set_index('trade_date', inplace=True)
df.sort_index(inplace=True)
df['net'] = (1 + df['pct_chg'] / 100).cumprod()
del df['pct_chg']
# print(df.head())
# print(df.tail())
return df
@classmethod
def sma_base(cls, name=None, ts_code=None, count=365, fast_ma=8, slow_ma=60):
"""
"""
df = cls.get_hist(name, ts_code, count)
symbol = 'net'
data = df
data['fast_ma'] = data[symbol].rolling(fast_ma).mean()
data['slow_ma'] = data[symbol].rolling(slow_ma).mean()
data.dropna(inplace=True)
data['position'] = np.where(data['fast_ma'] > data['slow_ma'], 1, 0)
data['position'] = data['position'].shift(1) # 因为当天的收益是拿不到的,
data.plot(secondary_y='position', figsize=(14, 6))
plt.show()
data['returns'] = np.log(data[symbol] / data[symbol].shift(1))
data['strat'] = data['position'] * data['returns']
data.dropna(inplace=True)
data['hold_earnings'] = data[['returns']].cumsum().apply(np.exp)
data['strat_earnings'] = data[['strat']].cumsum().apply(np.exp)
ax = data[['hold_earnings', 'strat_earnings']].plot(figsize=(14, 6))
data['position'].plot(ax=ax, secondary_y='position', style='--', figsize=(14, 6))
plt.show()
print(np.exp(data[['returns', 'strat']].sum()))
print(data[['returns','strat']].std())
# 计算最大回撤
hold_max_drawdown = ffn.calc_max_drawdown(data.hold_earnings)
strat_max_drawdown = ffn.calc_max_drawdown(data.strat_earnings)
print(f'hold_max_drawdown: {hold_max_drawdown}')
print(f'strat_max_drawdown: {strat_max_drawdown}')
@classmethod
def sma_v1(cls, name=None, ts_code=None, count=365, fast_ma=8):
"""
当股价大于fast_ma时买入,低于时卖出
"""
df = cls.get_hist(name, ts_code, count)
symbol = 'net'
data = df
data['fast_ma'] = data[symbol].rolling(fast_ma).mean()
data.dropna(inplace=True)
data['position'] = np.where(data.net > data['fast_ma'], 1, 0)
data['position'] = data['position'].shift(1) # 因为当天的收益是拿不到的,
data.plot(secondary_y='position', figsize=(14, 6))
plt.show()
data['returns'] = np.log(data[symbol] / data[symbol].shift(1))
data['strat'] = data['position'] * data['returns']
data.dropna(inplace=True)
data['hold_earnings'] = data[['returns']].cumsum().apply(np.exp)
data['strat_earnings'] = data[['strat']].cumsum().apply(np.exp)
ax = data[['hold_earnings', 'strat_earnings']].plot(figsize=(14, 6))
data['position'].plot(ax=ax, secondary_y='position', style='--', figsize=(14, 6))
plt.show()
print(np.exp(data[['returns', 'strat']].sum()))
print(data[['returns','strat']].std())
# 计算最大回撤
hold_max_drawdown = ffn.calc_max_drawdown(data.hold_earnings)
strat_max_drawdown = ffn.calc_max_drawdown(data.strat_earnings)
print(f'hold_max_drawdown: {hold_max_drawdown}')
print(f'strat_max_drawdown: {strat_max_drawdown}')
@classmethod
def stock_select(cls, name=None, ts_code=None, fast_ma=8, slow_ma=60):
data = cls.get_hist(name, ts_code, slow_ma)
symbol = 'net'
# data['fast_ma'] = data[symbol].rolling(fast_ma).mean()
data['slow_ma'] = data[symbol].rolling(slow_ma).mean()
print(data.tail())
return all([
# data.iloc[-1].fast_ma < data.iloc[-1].slow_ma,
data.iloc[-1].slow_ma < data.iloc[-1].net,
abs(data.iloc[-1].slow_ma - data.iloc[-1].net) < (data.iloc[-1].net * 0.01),
# abs(data.iloc[-1].slow_ma - data.iloc[-1].net) < (data.iloc[-1].net * 0.01),
# (data.iloc[-1].slow_ma - data.iloc[-1].fast_ma) < abs(data.iloc[-1].net * 0.01),
# data.iloc[-1].fast_ma < data.iloc[-2].fast_ma, # 短期均线保持向下走势
# data.iloc[-1].net > data.iloc[-1].slow_ma,
# data.iloc[-1].fast_ma > data.iloc[-2].fast_ma, # 短期均线保持向上走势
# data.iloc[-1].slow_ma > data.iloc[-2].slow_ma, # 长期均线保持向上走势
])
@classmethod
def stocks_select(cls, names='all', fast_ma=8, slow_ma=60):
if names=='all':
names = cls.get_stocks_dict()[0].keys()
rs = []
print(datetime.now())
print(f'股票池容量:{len(names)}')
count = 0
# import multiprocessing
# pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
for name in names:
print(f'{count}, {name}')
count += 1
try:
if cls.stock_select(name, fast_ma=fast_ma, slow_ma=slow_ma):
rs.append(name)
except Exception as e:
print(e, name)
print(datetime.now())
print(f'符合条件的stock num: {len(rs)}')
return rs
|
<filename>decode_beam.py
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
# from Queue import PriorityQueue
from queue import PriorityQueue
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1
MAX_LENGTH = 50
class DecoderRNN(nn.Module):
def __init__(self, embedding_size, hidden_size, output_size, cell_type, dropout=0.1):
'''
Illustrative decoder
'''
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.cell_type = cell_type
self.embedding = nn.Embedding(num_embeddings=output_size,
embedding_dim=embedding_size,
)
self.rnn = nn.GRU(embedding_size, hidden_size, bidirectional=True, dropout=dropout, batch_first=False)
self.dropout_rate = dropout
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden, not_used):
embedded = self.embedding(input).transpose(0, 1) # [B,1] -> [ 1, B, D]
embedded = F.dropout(embedded, self.dropout_rate)
output = embedded
output, hidden = self.rnn(output, hidden)
out = self.out(output.squeeze(0))
output = F.log_softmax(out, dim=1)
return output, hidden
class BeamSearchNode(object):
def __init__(self, hiddenstate, previousNode, wordId, logProb, length):
'''
:param hiddenstate:
:param previousNode:
:param wordId:
:param logProb:
:param length:
'''
self.h = hiddenstate
self.prevNode = previousNode
self.wordid = wordId
self.logp = logProb
self.leng = length
def eval(self, alpha=1.0):
reward = 0
# Add here a function for shaping a reward
return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward
decoder = DecoderRNN()
def beam_decode(target_tensor, decoder_hiddens, encoder_outputs=None):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
beam_width = 10
topk = 1 # how many sentence do you want to generate
decoded_batch = []
# decoding goes sentence by sentence
for idx in range(target_tensor.size(0)):
if isinstance(decoder_hiddens, tuple): # LSTM case
decoder_hidden = (decoder_hiddens[0][:, idx, :].unsqueeze(0), decoder_hiddens[1][:, idx, :].unsqueeze(0))
else:
decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0)
encoder_output = encoder_outputs[:, idx, :].unsqueeze(1)
# Start with the start of the sentence token
decoder_input = torch.LongTensor([[SOS_token]], device=device)
# Number of sentence to generate
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# starting node - hidden vector, previous node, word id, logp, length
node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)
nodes = PriorityQueue()
# start the queue
nodes.put((-node.eval(), node))
qsize = 1
# start beam search
while True:
# give up when decoding takes too long
if qsize > 2000: break
# fetch the best node
score, n = nodes.get()
decoder_input = n.wordid
decoder_hidden = n.h
if n.wordid.item() == EOS_token and n.prevNode != None:
endnodes.append((score, n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
continue
# decode for one step using decoder
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_output)
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(decoder_output, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(1, -1)
log_p = log_prob[0][new_k].item()
node = BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
nodes.put((score, nn))
# increase qsize
qsize += len(nextnodes) - 1
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
for score, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
utterance.append(n.wordid)
# back trace
while n.prevNode != None:
n = n.prevNode
utterance.append(n.wordid)
utterance = utterance[::-1]
utterances.append(utterance)
decoded_batch.append(utterances)
return decoded_batch
def greedy_decode(decoder_hidden, encoder_outputs, target_tensor):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
batch_size, seq_len = target_tensor.size()
decoded_batch = torch.zeros((batch_size, MAX_LENGTH))
decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=device)
for t in range(MAX_LENGTH):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1) # get candidates
topi = topi.view(-1)
decoded_batch[:, t] = topi
decoder_input = topi.detach().view(-1, 1)
return decoded_batch
|
import subprocess
import tempfile
import uuid
from typing import List, Optional, Iterable
from envparse import env
VERSION = 2
JOB_CONFIG_TEMPLATE = """
labels:
type: "{label_type}"
owner: "{label_owner}"
version: "{version}"
trainingInput:
scaleTier: CUSTOM
masterType: n1-standard-4
args:{model_dirs}{files_for_inference}
- "--data-dir"
- {remote_data_dir}
- "--eval-batch-size"
- '16'
region: us-central1
masterConfig:
acceleratorConfig:
count: '1'
type: NVIDIA_TESLA_P100
imageUri: "{docker_image_uri}"
"""
def __fmt_yaml_list(key, values: List[str], nspaces=0):
"""Spacing matters since we're constructing a yaml file. This function
creates a string that represents a list of values in yaml."""
result = ""
if isinstance(values, list) and len(values) > 0:
result = []
prefix = " " * nspaces
result.append(f'{prefix}- "--{key}"')
for v in values:
result.append(f"{prefix}- {v}")
result = "\n" + "\n".join(result)
return result
def build_job_config(
model_dirs: List[str],
files_for_inference: Optional[List[str]] = None,
docker_image_uri: Optional[str] = None,
label_type: str = "production",
label_owner: str = "alchemy",
version: str = VERSION,
):
"""
Inputs:
model_dirs: The list of gs:// location of the models (Also known as the
"version_dir" elsewhere in the codebase).
files_for_inference: A list of datasets to run inference on, can
either be the dataset names OR their gs:// urls.
docker_image_uri: The docker image URI. If None, will default to the
env var GOOGLE_AI_PLATFORM_DOCKER_IMAGE_URI.
label_type: Label for type.
label_owner: Label for who ran the model.
"""
if not docker_image_uri:
docker_image_uri = env("GOOGLE_AI_PLATFORM_DOCKER_IMAGE_URI")
# Format lists into proper yaml.
formatted_model_dirs = __fmt_yaml_list("dirs", model_dirs, nspaces=4)
formatted_files_for_inference = __fmt_yaml_list(
"infer", files_for_inference, nspaces=4
)
return JOB_CONFIG_TEMPLATE.format(
model_dirs=formatted_model_dirs,
files_for_inference=formatted_files_for_inference,
remote_data_dir="gs://alchemy-gp/data", # gs_url.build_raw_data_dir()
docker_image_uri=docker_image_uri,
label_type=label_type,
label_owner=label_owner,
version=version,
)
def submit_ai_platform_job(job_id, config_file):
"""
Inputs:
job_id: The id of the new job.
config_file: Path to a config file on disk.
"""
run_cmd([
"gcloud", "ai-platform", "jobs", "submit", "training", job_id, "--config", config_file
])
def __generate_job_id():
# A valid job_id only contains letters, numbers and underscores,
# AND must start with a letter.
return "t_" + str(uuid.uuid4()).replace("-", "_")
def run_cmd(cmd: Iterable[str]):
"""Run a command line command
Inputs:
cmd: A command line command.
"""
# print the command for easier debugging
print(" ".join(cmd))
# check=True makes this function raise an Exception if the command fails.
try:
output = subprocess.run([*cmd], check=True, capture_output=True)
print("stdout:", str(output.stdout, 'utf-8'))
print("stderr:", str(output.stderr, 'utf-8'))
return output
except subprocess.CalledProcessError as e:
print("stdout:", str(e.stdout, 'utf-8'))
print("stderr:", str(e.stderr, 'utf-8'))
raise
if __name__ == "__main__":
job_id = __generate_job_id()
print(f"Submit job: {job_id}")
gcs_model_dirs = [
"gs://alchemy-gp/tasks/95f8fcad08680ac3167d20f315c09b987595d04f15feb16f34a38456/models/5"
]
datasets_for_inference = ["spring_jan_2020_small.jsonl"]
docker_image_uri = "gcr.io/nlp-flywheel/alchemy-exp@sha256:e21902f05d649512c2fd08d0f10f91d48334f012f34dee676fcd41bb4611eff5"
model_config = build_job_config(
model_dirs=gcs_model_dirs,
files_for_inference=datasets_for_inference,
docker_image_uri=docker_image_uri,
label_owner="test",
label_type="experimental",
)
print(model_config)
submit_job_fn = None
with tempfile.NamedTemporaryFile(mode="w") as fp:
fp.write(model_config)
fp.flush()
if not submit_job_fn:
submit_job_fn = submit_ai_platform_job
submit_job_fn(job_id, fp.name)
print(job_id)
|
<filename>journalism/table.py
#!/usr/bin/env python
"""
This module contains the Table object.
"""
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
from journalism.columns import ColumnMapping, NumberType
from journalism.exceptions import ColumnDoesNotExistError, UnsupportedOperationError
from journalism.rows import RowSequence, Row
class Table(object):
"""
A dataset consisting of rows and columns.
:param rows: The data as a sequence of any sequences: tuples, lists, etc.
:param column_types: A sequence of instances of:class:`.ColumnType`,
one per column of data.
:param column_names: A sequence of strings that are names for the columns.
:var columns: A :class:`.ColumnMapping` for accessing the columns in this
table.
:var rows: A :class:`.RowSequence` for accessing the rows in this table.
"""
def __init__(self, rows, column_types, column_names):
len_column_types = len(column_types)
len_column_names = len(column_names)
if len_column_types != len_column_names:
raise ValueError('column_types and column_names must be the same length.')
if len(set(column_names)) != len_column_names:
raise ValueError('Duplicate column names are not allowed.')
self._column_types = tuple(column_types)
self._column_names = tuple(column_names)
self._cached_columns = {}
self._cached_rows = {}
self.columns = ColumnMapping(self)
self.rows = RowSequence(self)
cast_data = []
cast_funcs = [c.cast for c in self._column_types]
for i, row in enumerate(rows):
if len(row) != len_column_types:
raise ValueError('Row %i has length %i, but Table only has %i columns.' % (i, len(row), len_column_types))
# Forked tables can share data (because they are immutable)
# but original data should be buffered so it can't be changed
if isinstance(row, Row):
cast_data.append(row)
continue
cast_data.append(tuple(cast_funcs[i](d) for i, d in enumerate(row)))
self._data = tuple(cast_data)
def _get_column(self, i):
"""
Get a Column of data, caching a copy for next request.
"""
if i not in self._cached_columns:
column_type = self._column_types[i]
self._cached_columns[i] = column_type.create_column(self, i)
return self._cached_columns[i]
def _get_row(self, i):
"""
Get a Row of data, caching a copy for the next request.
"""
if i not in self._cached_rows:
# If rows are from a fork, they are safe to access directly
if isinstance(self._data[i], Row):
self._cached_rows[i] = self._data[i]
else:
self._cached_rows[i] = Row(self, i)
return self._cached_rows[i]
def _fork(self, rows, column_types=[], column_names=[]):
"""
Create a new table using the metadata from this one.
Used internally by functions like :meth:`order_by`.
"""
if not column_types:
column_types = self._column_types
if not column_names:
column_names = self._column_names
return Table(rows, column_types, column_names)
def get_column_types(self):
"""
Get an ordered list of this table's column types.
:returns: A :class:`tuple` of :class:`.Column` instances.
"""
return self._column_types
def get_column_names(self):
"""
Get an ordered list of this table's column names.
:returns: A :class:`tuple` of strings.
"""
return self._column_names
def select(self, column_names):
"""
Reduce this table to only the specified columns.
:param column_names: A sequence of names of columns to include in the new table.
:returns: A new :class:`Table`.
"""
column_indices = tuple(self._column_names.index(n) for n in column_names)
column_types = tuple(self._column_types[i] for i in column_indices)
new_rows = []
for row in self.rows:
new_rows.append(tuple(row[i] for i in column_indices))
return self._fork(new_rows, column_types, column_names)
def where(self, test):
"""
Filter a to only those rows where the row passes a truth test.
:param test: A function that takes a :class:`.Row` and returns
:code:`True` if it should be included.
:type test: :class:`function`
:returns: A new :class:`Table`.
"""
rows = [row for row in self.rows if test(row)]
return self._fork(rows)
def find(self, test):
"""
Find the first row that passes a truth test.
:param test: A function that takes a :class:`.Row` and returns
:code:`True` if it matches.
:type test: :class:`function`
:returns: A single :class:`.Row` or :code:`None` if not found.
"""
for row in self.rows:
if test(row):
return row
return None
def stdev_outliers(self, column_name, deviations=3, reject=False):
"""
A wrapper around :meth:`where` that filters the dataset to
rows where the value of the column are more than some number
of standard deviations from the mean.
This method makes no attempt to validate that the distribution
of your data is normal.
There are well-known cases in which this algorithm will
fail to identify outliers. For a more robust measure see
:meth:`mad_outliers`.
:param column_name: The name of the column to compute outliers on.
:param deviations: The number of deviations from the mean a data point
must be to qualify as an outlier.
:param reject: If :code:`True` then the new :class:`Table` will contain
everything *except* the outliers.
:returns: A new :class:`Table`.
"""
mean = self.columns[column_name].mean()
sd = self.columns[column_name].stdev()
lower_bound = mean - (sd * deviations)
upper_bound = mean + (sd * deviations)
if reject:
f = lambda row: row[column_name] < lower_bound or row[column_name] > upper_bound
else:
f = lambda row: lower_bound <= row[column_name] <= upper_bound
return self.where(f)
def mad_outliers(self, column_name, deviations=3, reject=False):
"""
A wrapper around :meth:`where` that filters the dataset to
rows where the value of the column are more than some number of
`median absolute deviations <http://en.wikipedia.org/wiki/Median_absolute_deviation>`_
from the median.
This method makes no attempt to validate that the distribution
of your data is normal.
:param column_name: The name of the column to compute outliers on.
:param deviations: The number of deviations from the median a data point
must be to qualify as an outlier.
:param reject: If :code:`True` then the new :class:`Table` will contain
everything *except* the outliers.
:returns: A new :class:`Table`.
"""
median = self.columns[column_name].median()
mad = self.columns[column_name].mad()
lower_bound = median - (mad * deviations)
upper_bound = median + (mad * deviations)
if reject:
f = lambda row: row[column_name] < lower_bound or row[column_name] > upper_bound
else:
f = lambda row: lower_bound <= row[column_name] <= upper_bound
return self.where(f)
def order_by(self, key, reverse=False):
"""
Sort this table by the :code:`key`. This can be either a
column_name or callable that returns a value to sort by.
:param key: Either the name of a column to sort by or a :class:`function`
that takes a row and returns a value to sort by.
:param reverse: If :code:`True` then sort in reverse (typically,
descending) order.
:returns: A new :class:`Table`.
"""
key_is_row_function = hasattr(key, '__call__')
def null_handler(row):
if key_is_row_function:
k = key(row)
else:
k = row[key]
if k is None:
return NullOrder()
return k
rows = sorted(self.rows, key=null_handler, reverse=reverse)
return self._fork(rows)
def limit(self, start_or_stop=None, stop=None, step=None):
"""
Filter data to a subset of all rows.
See also: Python's :func:`slice`.
:param start_or_stop: If the only argument, then how many rows to
include, otherwise, the index of the first row to include.
:param stop: The index of the last row to include.
:param step: The size of the jump between rows to include.
(*step=2* will return every other row.)
:returns: A new :class:`Table`.
"""
if stop or step:
return self._fork(self.rows[slice(start_or_stop, stop, step)])
return self._fork(self.rows[:start_or_stop])
def distinct(self, key=None):
"""
Filter data to only rows that are unique.
:param key: Either 1) the name of a column to use to identify
unique rows or 2) a :class:`function` that takes a row and
returns a value to identify unique rows or 3) :code:`None`,
in which case the entire row will be checked for uniqueness.
:returns: A new :class:`Table`.
"""
key_is_row_function = hasattr(key, '__call__')
uniques = []
rows = []
for row in self.rows:
if key_is_row_function:
k = key(row)
elif key is None:
k = tuple(row)
else:
k = row[key]
if k not in uniques:
uniques.append(k)
rows.append(row)
return self._fork(rows)
def inner_join(self, left_key, table, right_key):
"""
Performs an "inner join", combining columns from this table
and from :code:`table` anywhere that the output of :code:`left_key`
and :code:`right_key` are equivalent.
:param left_key: Either the name of a column from the this table
to join on, or a :class:`function` that takes a row and returns
a value to join on.
:param table: The "right" table to join to.
:param right_key: Either the name of a column from :code:table`
to join on, or a :class:`function` that takes a row and returns
a value to join on.
:returns: A new :class:`Table`.
"""
left_key_is_row_function = hasattr(left_key, '__call__')
right_key_is_row_function = hasattr(right_key, '__call__')
left = []
right = []
if left_key_is_row_function:
left = [left_key(row) for row in self.rows]
else:
c = self._column_names.index(left_key)
left = self._get_column(c)
if right_key_is_row_function:
right = [right_key(row) for row in table.rows]
else:
c = table._column_names.index(right_key)
right = table._get_column(c)
rows = []
for i, l in enumerate(left):
for j, r in enumerate(right):
if l == r:
rows.append(tuple(self.rows[i]) + tuple(table.rows[j]))
column_types = self._column_types + table._column_types
column_names = self._column_names + table._column_names
return self._fork(rows, column_types, column_names)
def left_outer_join(self, left_key, table, right_key):
"""
Performs an "left outer join", combining columns from this table
and from :code:`table` anywhere that the output of :code:`left_key`
and :code:`right_key` are equivalent.
Where there is no match for :code:`left_key`the left columns will
be included with the right columns set to :code:`None`.
:param left_key: Either the name of a column from the this table
to join on, or a :class:`function` that takes a row and returns
a value to join on.
:param table: The "right" table to join to.
:param right_key: Either the name of a column from :code:table`
to join on, or a :class:`function` that takes a row and returns
a value to join on.
:returns: A new :class:`Table`.
"""
left_key_is_row_function = hasattr(left_key, '__call__')
right_key_is_row_function = hasattr(right_key, '__call__')
left = []
right = []
if left_key_is_row_function:
left = [left_key(row) for row in self.rows]
else:
c = self._column_names.index(left_key)
left = self._get_column(c)
if right_key_is_row_function:
right = [right_key(row) for row in table.rows]
else:
c = table._column_names.index(right_key)
right = table._get_column(c)
rows = []
for i, l in enumerate(left):
if l in right:
for j, r in enumerate(right):
if l == r:
rows.append(tuple(list(self.rows[i]) + list(table.rows[j])))
else:
rows.append(tuple(list(self.rows[i]) + [None] * len(table.columns)))
column_types = self._column_types + table._column_types
column_names = self._column_names + table._column_names
return self._fork(rows, column_types, column_names)
def group_by(self, group_by):
"""
Create a new :class:`Table` for **each** unique value in the
:code:`group_by` column and return them as a dict.
:param group_by: The name of a column to group by.
:returns: A :class:`dict` where the keys are unique values from
the :code:`group_by` column and the values are new :class:`Table`
instances.
:raises: :exc:`.ColumnDoesNotExistError`
"""
try:
i = self._column_names.index(group_by)
except ValueError:
raise ColumnDoesNotExistError(group_by)
groups = OrderedDict()
for row in self._data:
group_name = row[i]
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(row)
output = {}
for group, rows in groups.items():
output[group] = self._fork(rows)
return output
def aggregate(self, group_by, operations):
"""
Aggregate data by grouping values together and performing some
set of column operations on the groups.
The columns of the output table (except for the :code:`group_by`
column, will be named :code:`originalname_operation`. For instance
:code:`salaries_median`.
A :code:`group_by_count` column will always be added to the output.
The order of the output columns will be :code:`('group_by',
'group_by_count', 'column_one_operation', ...)`.
:param group_by: The name of a column to group by.
:param operations: A :class:`dict: where the keys are column names
and the values are the names of :class:`.Column` methods, such
as "sum" or "max_length".
:returns: A new :class:`Table`.
:raises: :exc:`.ColumnDoesNotExistError`, :exc:`.UnsupportedOperationError`
"""
try:
i = self._column_names.index(group_by)
except ValueError:
raise ColumnDoesNotExistError(group_by)
groups = OrderedDict()
for row in self._data:
group_name = row[i]
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(row)
output = []
column_types = [self._column_types[i], NumberType()]
column_names = [group_by, '%s_count' % group_by]
for op_column, operation in operations:
try:
j = self._column_names.index(op_column)
except ValueError:
raise ColumnDoesNotExistError(op_column)
column_type = self._column_types[j]
column_types.append(column_type)
column_names.append('%s_%s' % (op_column, operation))
for name, group_rows in groups.items():
group_table = Table(group_rows, self._column_types, self._column_names)
new_row = [name, len(group_table.rows)]
for op_column, operation in operations:
c = group_table.columns[op_column]
try:
op = getattr(c, operation)
except AttributeError:
raise UnsupportedOperationError(operation, c)
new_row.append(op())
output.append(tuple(new_row))
return self._fork(output, column_types, column_names)
def compute(self, column_name, column_type, func):
"""
Compute a new column by passing each row to a function.
:param column_name: A name of the new column.
:param column_type: An instance of :class:`.ColumnType`.
:param func: A :class:`function` that will be passed a :class:`.Row`
and should return the computed value for the new column.
:returns: A new :class:`Table`.
"""
column_types = self._column_types + (column_type,)
column_names = self._column_names + (column_name,)
new_rows = []
for row in self.rows:
new_rows.append(tuple(row) + (func(row),))
return self._fork(new_rows, column_types, column_names)
def percent_change(self, before_column_name, after_column_name, new_column_name):
"""
A wrapper around :meth:`compute` for quickly computing
percent change between two columns.
:param before_column_name: The name of the column containing the
*before* values.
:param after_column_name: The name of the column containing the
*after* values.
:param new_column_name: The name of the resulting column.
:returns: A new :class:`Table`.
"""
def calc(row):
return (row[after_column_name] - row[before_column_name]) / row[before_column_name] * 100
return self.compute(new_column_name, NumberType(), calc)
def rank(self, key, new_column_name):
"""
Creates a new column that is the rank order of the values
returned by the row function.
:param key:
:param after_column_name: The name of the column containing the
*after* values.
:param new_column_name: The name of the resulting column.
:returns: A new :class:`Table`.
"""
key_is_row_function = hasattr(key, '__call__')
def null_handler(k):
if k is None:
return NullOrder()
return k
if key_is_row_function:
values = [key(row) for row in self.rows]
compute_func = lambda row: rank_column.index(key(row)) + 1
else:
values = [row[key] for row in self.rows]
compute_func = lambda row: rank_column.index(row[key]) + 1
rank_column = sorted(values, key=null_handler)
return self.compute(new_column_name, NumberType(), compute_func)
class NullOrder(object):
"""
Dummy object used for sorting in place of None.
Sorts as "greater than everything but other nulls."
"""
def __lt__(self, other):
return False
def __gt__(self, other):
if other is None:
return False
return True
|
<filename>pyemvue/__main__.py
import sys
import datetime
import json
import dateutil
# Our files
from pyemvue.enums import Scale, Unit
from pyemvue.customer import Customer
from pyemvue.device import VueDevice, VueDeviceChannel, VueDeviceChannelUsage
from pyemvue.pyemvue import PyEmVue
def main():
errorMsg = 'Please pass a file containing the "email" and "password" as json.'
if len(sys.argv) == 1:
print(errorMsg)
sys.exit(1)
filepath = sys.argv[1]
data = {}
email = None
passw = None
idToken = None
accessToken = None
refreshToken = None
try:
with open(filepath) as f:
data = json.load(f)
except:
print('Error opening file.', errorMsg)
raise
if ('email' not in data or 'password' not in data) and ('idToken' not in data or 'accessToken' not in data or 'refreshToken' not in data):
print(errorMsg)
sys.exit(1)
canLogIn = False
if 'email' in data:
email = data['email']
if 'password' in data:
passw = data['password']
canLogIn = True
if 'idToken' in data and 'accessToken' in data and 'refreshToken' in data:
idToken = data['idToken']
accessToken = data['accessToken']
refreshToken = data['refreshToken']
canLogIn = True
if not canLogIn:
print('Not enough details to log in.', errorMsg)
sys.exit(1)
vue = PyEmVue()
vue.login(email, passw, idToken, accessToken, refreshToken, token_storage_file='keys.json')
print('Logged in. Authtoken follows:')
print(vue.cognito.id_token)
print()
devices = vue.get_devices()
deviceGids = []
for device in devices:
deviceGids.append(device.device_gid)
print(device.device_gid, device.manufacturer_id, device.model, device.firmware)
vue.populate_device_properties(device)
for chan in device.channels:
print('\t', chan.device_gid, chan.name, chan.channel_num, chan.channel_multiplier)
monthly, start = vue.get_chart_usage(devices[0].channels[0], None, None, Scale.MONTH.value)
print(monthly[0], 'kwh used since', start.isoformat())
now = datetime.datetime.utcnow()
midnight=(datetime.datetime
.now(dateutil.tz.gettz(devices[0].time_zone))
.replace(hour=0, minute=0, second=0, microsecond=0)
.astimezone(dateutil.tz.tzutc()))
yesterday = midnight - datetime.timedelta(days=1)
yesterday = yesterday.replace(tzinfo=None)
minAgo = now - datetime.timedelta(minutes=1)
print('Total usage for today in kwh: ')
use = vue.get_devices_usage(deviceGids, now, Scale.DAY.value)
for chan in use:
print(f'{chan.device_gid} ({chan.channel_num}): {chan.usage} kwh')
print('Total usage for yesterday in kwh: ')
# use = vue.get_devices_usage(deviceGids, yesterday, Scale.DAY.value)
# for chan in use:
# print(f'{chan.device_gid} ({chan.channel_num}): {chan.usage} kwh')
for chan in use:
usage = vue.get_chart_usage(chan, yesterday, yesterday+datetime.timedelta(hours=23, minutes=59), Scale.DAY.value)
if usage and usage[0]:
print(f'{chan.device_gid} ({chan.channel_num}): {usage[0][0]} kwh')
print('Average usage over the last minute in watts: ')
use = vue.get_devices_usage(deviceGids, None, Scale.MINUTE.value)
for chan in use:
print(f'{chan.device_gid} ({chan.channel_num}): {chan.usage*1000*60} W')
usage_over_time, start_time = vue.get_chart_usage(devices[0].channels[0], datetime.datetime.utcnow()-datetime.timedelta(days=7), datetime.datetime.utcnow(), scale=Scale.DAY.value, unit=Unit.KWH.value)
print('Usage for the last seven days starting', start_time.isoformat())
for usage in usage_over_time:
print(usage, 'kwh')
if __name__ == '__main__':
main() |
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update, D4rlReplayBuffer
from stable_baselines3.edac.policies import EDACPolicy
class EDAC(OffPolicyAlgorithm):
"""
Ensemble diversified actor critic (EDAC)
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[EDACPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = "auto",
target_update_interval: int = 1,
target_entropy: Union[str, float] = "auto",
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
dataset: Dict = None,
without_exploration: bool = False,
gumbel_ensemble: bool = False,
gumbel_temperature: float = 0.5,
eta: float = -1.0,
):
super(EDAC, self).__init__(
policy,
env,
EDACPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
without_exploration=without_exploration,
gumbel_ensemble=gumbel_ensemble,
gumbel_temperature=gumbel_temperature
)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
self.dataset = dataset
self.eta = eta # Add for EDAC
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(EDAC, self)._setup_model()
self._create_aliases()
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == "auto":
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
# Default initial value of ent_coef when learned
init_value = 1.0
if "_" in self.ent_coef:
init_value = float(self.ent_coef.split("_")[1])
assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
# Deal with offline reinforcement learning, as original object of CQL.
if self.dataset is not None:
self.offline_buffer = D4rlReplayBuffer(self.dataset, self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
edac_additional_losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
# Compute the next Q values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
if self.gumbel_ensemble:
gumbel_coefs = self.get_gumbel_coefs(next_q_values, inverse_proportion=True)
next_q_values = th.sum(next_q_values * gumbel_coefs, dim=1, keepdim=True)
else:
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
# add entropy term
next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
# using action from the replay buffer
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
if self.eta > 0: # Add for EDAC. EDAC error should be added for usual critic loss
n_qs = len(current_q_values)
action_dim = next_actions.size(1)
sample_size = min(n_qs, action_dim)
indices = th.randperm(n_qs, device=self.device)[:sample_size] # Useful for calculating only for "square" matrix.
observation_tiles = replay_data.observations.repeat(n_qs, 1, 1)
action_tiles = replay_data.actions.repeat(n_qs, 1, 1).requires_grad_(True) # To compute the gradient we should repeat.
q_vals = self.critic.edac_forward(observation_tiles, action_tiles)
q_vals = th.stack([current_q_value for current_q_value in q_vals], dim=0) # [n_qs, batch, 1]
q_vals_grad, = th.autograd.grad(q_vals.sum(), action_tiles) # [n_qs, batch_size, action_dim]: Derivative w.r.t action_tiles.
q_vals_grad = th.index_select(q_vals_grad, dim=0, index=indices).transpose(1, 0) # [batch_size, sample_size, action_dim]
inn_prod_grads = th.einsum("bik, bjk -> bij", q_vals_grad, q_vals_grad) # [batch_size, sample_size, sample_size]
# Additional loss w.r.t "different" index.
mask = th.eye(sample_size, sample_size, device=self.device).repeat(batch_size, 1, 1) # [batch_size, sample_size, sample_size]
edac_loss = ((1 - mask) * inn_prod_grads / (sample_size - 1)).mean()
edac_additional_losses.append(edac_loss.item())
critic_loss += self.eta * edac_loss
critic_losses.append(critic_loss.item())
# Optimize the critic
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
# Mean over all critic networks
q_values_pi = th.cat(self.critic.forward(replay_data.observations, actions_pi), dim=1)
min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Update target networks
if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/ent_coef", np.mean(ent_coefs))
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
if len(ent_coef_losses) > 0:
self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
if len(edac_additional_losses) > 0:
self.logger.record("train/edac_loss", np.mean(edac_additional_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "EDAC",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(EDAC, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(EDAC, self)._excluded_save_params() + ["actor", "critic", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
if self.ent_coef_optimizer is not None:
saved_pytorch_variables = ["log_ent_coef"]
state_dicts.append("ent_coef_optimizer")
else:
saved_pytorch_variables = ["ent_coef_tensor"]
return state_dicts, saved_pytorch_variables
|
<reponame>khromiumos/chromiumos-chromite
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run lint checks on the specified files."""
from __future__ import print_function
import errno
import functools
import json
import multiprocessing
import os
import re
import sys
from six.moves import urllib
from chromite.lib import constants
from chromite.cli import command
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# Extract a script's shebang.
SHEBANG_RE = re.compile(br'^#!\s*([^\s]+)(\s+([^\s]+))?')
def _GetProjectPath(path):
"""Find the absolute path of the git checkout that contains |path|."""
if git.FindRepoCheckoutRoot(path):
manifest = git.ManifestCheckout.Cached(path)
return manifest.FindCheckoutFromPath(path).GetPath(absolute=True)
else:
# Maybe they're running on a file outside of a checkout.
# e.g. cros lint ~/foo.py /tmp/test.py
return os.path.dirname(path)
def _GetPylintrc(path):
"""Locate pylintrc or .pylintrc file that applies to |path|.
If not found - use the default.
"""
path = os.path.realpath(path)
project_path = _GetProjectPath(path)
parent = os.path.dirname(path)
while project_path and parent.startswith(project_path):
pylintrc = os.path.join(parent, 'pylintrc')
dotpylintrc = os.path.join(parent, '.pylintrc')
# Only allow one of these to exist to avoid confusing which one is used.
if os.path.isfile(pylintrc) and os.path.isfile(dotpylintrc):
cros_build_lib.Die('%s: Only one of "pylintrc" or ".pylintrc" is allowed',
parent)
if os.path.isfile(pylintrc):
return pylintrc
if os.path.isfile(dotpylintrc):
return dotpylintrc
parent = os.path.dirname(parent)
return os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc')
def _GetPylintGroups(paths):
"""Return a dictionary mapping pylintrc files to lists of paths."""
groups = {}
for path in paths:
pylintrc = _GetPylintrc(path)
if pylintrc:
groups.setdefault(pylintrc, []).append(path)
return groups
def _GetPythonPath(paths):
"""Return the set of Python library paths to use."""
# Carry through custom PYTHONPATH that the host env has set.
return os.environ.get('PYTHONPATH', '').split(os.pathsep) + [
# Add the Portage installation inside the chroot to the Python path.
# This ensures that scripts that need to import portage can do so.
os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage',
'pym'),
# Allow platform projects to be imported by name (e.g. crostestutils).
os.path.join(constants.SOURCE_ROOT, 'src', 'platform'),
# Ideally we'd modify meta_path in pylint to handle our virtual chromite
# module, but that's not possible currently. We'll have to deal with
# that at some point if we want `cros lint` to work when the dir is not
# named 'chromite'.
constants.SOURCE_ROOT,
# Also allow scripts to import from their current directory.
] + list(set(os.path.dirname(x) for x in paths))
# The mapping between the "cros lint" --output-format flag and cpplint.py
# --output flag.
CPPLINT_OUTPUT_FORMAT_MAP = {
'colorized': 'emacs',
'msvs': 'vs7',
'parseable': 'emacs',
}
# The mapping between the "cros lint" --output-format flag and shellcheck
# flags.
# Note that the msvs mapping here isn't quite VS format, but it's closer than
# the default output.
SHLINT_OUTPUT_FORMAT_MAP = {
'colorized': ['--color=always'],
'msvs': ['--format=gcc'],
'parseable': ['--format=gcc'],
}
def _LinterRunCommand(cmd, debug, **kwargs):
"""Run the linter with common run args set as higher levels expect."""
return cros_build_lib.run(cmd, check=False, print_cmd=debug,
debug_level=logging.NOTICE, **kwargs)
def _WhiteSpaceLintData(path, data):
"""Run basic whitespace checks on |data|.
Args:
path: The name of the file (for diagnostics).
data: The file content to lint.
Returns:
True if everything passed.
"""
ret = True
# Make sure files all have a trailing newline.
if not data.endswith('\n'):
ret = False
logging.warning('%s: file needs a trailing newline', path)
# Disallow leading & trailing blank lines.
if data.startswith('\n'):
ret = False
logging.warning('%s: delete leading blank lines', path)
if data.endswith('\n\n'):
ret = False
logging.warning('%s: delete trailing blank lines', path)
for i, line in enumerate(data.splitlines(), start=1):
if line.rstrip() != line:
ret = False
logging.warning('%s:%i: trim trailing whitespace: %s', path, i, line)
return ret
def _CpplintFile(path, output_format, debug):
"""Returns result of running cpplint on |path|."""
cmd = [os.path.join(constants.DEPOT_TOOLS_DIR, 'cpplint.py')]
if output_format != 'default':
cmd.append('--output=%s' % CPPLINT_OUTPUT_FORMAT_MAP[output_format])
cmd.append(path)
return _LinterRunCommand(cmd, debug)
def _PylintFile(path, output_format, debug, interp):
"""Returns result of running pylint on |path|."""
pylint = os.path.join(constants.DEPOT_TOOLS_DIR, 'pylint-1.9')
# vpython3 isn't actually Python 3. But maybe it will be someday.
if interp != 'python3':
vpython = os.path.join(constants.DEPOT_TOOLS_DIR, 'vpython')
else:
vpython = os.path.join(constants.DEPOT_TOOLS_DIR, 'vpython3')
pylint = os.path.join(constants.CHROMITE_DIR, 'cli', 'cros', 'pylint-2')
pylintrc = _GetPylintrc(path)
cmd = [vpython, pylint, '--rcfile=%s' % pylintrc]
if interp == 'python3':
cmd += ['--disable=old-division']
if output_format != 'default':
cmd.append('--output-format=%s' % output_format)
cmd.append(path)
extra_env = {
# When inside the SDK, Gentoo's python wrappers (i.e. `python`, `python2`,
# and `python3`) will select a version based on $EPYTHON first. Make sure
# we run through the right Python version when switching.
# We can drop this once we are Python 3-only.
'EPYTHON': interp,
'PYTHONPATH': ':'.join(_GetPythonPath([path])),
}
return _LinterRunCommand(cmd, debug, extra_env=extra_env)
def _Pylint2File(path, output_format, debug):
"""Returns result of running pylint via python2 on |path|."""
return _PylintFile(path, output_format, debug, 'python2')
def _Pylint3File(path, output_format, debug):
"""Returns result of running pylint via python3 on |path|."""
return _PylintFile(path, output_format, debug, 'python3')
def _Pylint23File(path, output_format, debug):
"""Returns result of running pylint via python2 & python3 on |path|."""
ret2 = _Pylint2File(path, output_format, debug)
ret3 = _Pylint3File(path, output_format, debug)
# Caller only checks returncode atm.
return ret2 if ret2.returncode else ret3
def _PylintProbeFile(path, output_format, debug):
"""Returns result of running pylint based on the interp."""
try:
with open(path, 'rb') as fp:
data = fp.read(128)
if data.startswith(b'#!'):
if b'python3' in data:
return _Pylint3File(path, output_format, debug)
elif b'python2' in data:
return _Pylint2File(path, output_format, debug)
elif b'python' in data:
return _Pylint23File(path, output_format, debug)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# TODO(vapier): Change the unknown default to Python 2+3 compat.
return _Pylint2File(path, output_format, debug)
def _GolintFile(path, _, debug):
"""Returns result of running golint on |path|."""
# Try using golint if it exists.
try:
cmd = ['golint', '-set_exit_status', path]
return _LinterRunCommand(cmd, debug)
except cros_build_lib.RunCommandError:
logging.notice('Install golint for additional go linting.')
return cros_build_lib.CommandResult('gofmt "%s"' % path,
returncode=0)
def _JsonLintFile(path, _output_format, _debug):
"""Returns result of running json lint checks on |path|."""
result = cros_build_lib.CommandResult('python -mjson.tool "%s"' % path,
returncode=0)
data = osutils.ReadFile(path)
# Strip off leading UTF-8 BOM if it exists.
if data.startswith(u'\ufeff'):
data = data[1:]
# Strip out comments for JSON parsing.
stripped_data = re.sub(r'^\s*#.*', '', data, flags=re.M)
# See if it validates.
try:
json.loads(stripped_data)
except ValueError as e:
result.returncode = 1
logging.notice('%s: %s', path, e)
# Check whitespace.
if not _WhiteSpaceLintData(path, data):
result.returncode = 1
return result
def _MarkdownLintFile(path, _output_format, _debug):
"""Returns result of running lint checks on |path|."""
result = cros_build_lib.CommandResult('mdlint(internal) "%s"' % path,
returncode=0)
data = osutils.ReadFile(path)
# Check whitespace.
if not _WhiteSpaceLintData(path, data):
result.returncode = 1
return result
def _ShellLintFile(path, output_format, debug, gentoo_format=False):
"""Returns result of running lint checks on |path|.
Args:
path: The path to the script on which to run the linter.
output_format: The format of the output that the linter should emit. See
|SHLINT_OUTPUT_FORMAT_MAP|.
debug: Whether to print out the linter command.
gentoo_format: Whether to treat this file as an ebuild style script.
Returns:
A CommandResult object.
"""
# TODO: Try using `checkbashisms`.
syntax_check = _LinterRunCommand(['bash', '-n', path], debug)
if syntax_check.returncode != 0:
return syntax_check
# Try using shellcheck if it exists, with a preference towards finding it
# inside the chroot. This is OK as it is statically linked.
shellcheck = (
osutils.Which('shellcheck', path='/usr/bin',
root=os.path.join(constants.SOURCE_ROOT, 'chroot'))
or osutils.Which('shellcheck'))
if not shellcheck:
logging.notice('Install shellcheck for additional shell linting.')
return syntax_check
# Instruct shellcheck to run itself from the shell script's dir. Note that
# 'SCRIPTDIR' is a special string that shellcheck rewrites to the dirname of
# the given path.
extra_checks = [
'avoid-nullary-conditions', # SC2244
'check-unassigned-uppercase', # Include uppercase in SC2154
'require-variable-braces', # SC2250
]
if not gentoo_format:
extra_checks.append('quote-safe-variables') # SC2248
cmd = [shellcheck, '--source-path=SCRIPTDIR',
'--enable=%s' % ','.join(extra_checks)]
if output_format != 'default':
cmd.extend(SHLINT_OUTPUT_FORMAT_MAP[output_format])
cmd.append('-x')
if gentoo_format:
# ebuilds don't explicitly export variables or contain a shebang.
cmd.append('--exclude=SC2148')
# ebuilds always use bash.
cmd.append('--shell=bash')
cmd.append(path)
lint_result = _LinterRunCommand(cmd, debug)
# During testing, we don't want to fail the linter for shellcheck errors,
# so override the return code.
if lint_result.returncode != 0:
bug_url = (
'https://bugs.chromium.org/p/chromium/issues/entry?' +
urllib.parse.urlencode({
'template':
'Defect report from Developer',
'summary':
'Bad shellcheck warnings for %s' % os.path.basename(path),
'components':
'Infra>Client>ChromeOS>Build,',
'cc':
'<EMAIL>,<EMAIL>',
'comment':
'Shellcheck output from file:\n%s\n\n<paste output here>\n\n'
"What is wrong with shellcheck's findings?\n" % path,
}))
logging.warning('Shellcheck found problems. These will eventually become '
'errors. If the shellcheck findings are not useful, '
'please file a bug at:\n%s', bug_url)
lint_result.returncode = 0
return lint_result
def _GentooShellLintFile(path, output_format, debug):
"""Run shell checks with Gentoo rules."""
return _ShellLintFile(path, output_format, debug, gentoo_format=True)
def _BreakoutDataByLinter(map_to_return, path):
"""Maps a linter method to the content of the |path|."""
# Detect by content of the file itself.
try:
with open(path, 'rb') as fp:
# We read 128 bytes because that's the Linux kernel's current limit.
# Look for BINPRM_BUF_SIZE in fs/binfmt_script.c.
data = fp.read(128)
if not data.startswith(b'#!'):
# If the file doesn't have a shebang, nothing to do.
return
m = SHEBANG_RE.match(data)
if m:
prog = m.group(1)
if prog == b'/usr/bin/env':
prog = m.group(3)
basename = os.path.basename(prog)
if basename.startswith(b'python3'):
pylint_list = map_to_return.setdefault(_Pylint3File, [])
pylint_list.append(path)
elif basename.startswith(b'python2'):
pylint_list = map_to_return.setdefault(_Pylint2File, [])
pylint_list.append(path)
elif basename.startswith(b'python'):
pylint_list = map_to_return.setdefault(_Pylint2File, [])
pylint_list.append(path)
pylint_list = map_to_return.setdefault(_Pylint3File, [])
pylint_list.append(path)
elif basename in (b'sh', b'dash', b'bash'):
shlint_list = map_to_return.setdefault(_ShellLintFile, [])
shlint_list.append(path)
except IOError as e:
logging.debug('%s: reading initial data failed: %s', path, e)
# Map file extensions to a linter function.
_EXT_TO_LINTER_MAP = {
# Note these are defined to keep in line with cpplint.py. Technically, we
# could include additional ones, but cpplint.py would just filter them out.
frozenset({'.cc', '.cpp', '.h'}): _CpplintFile,
frozenset({'.json'}): _JsonLintFile,
frozenset({'.py'}): _PylintProbeFile,
frozenset({'.go'}): _GolintFile,
frozenset({'.sh'}): _ShellLintFile,
frozenset({'.ebuild', '.eclass', '.bashrc'}): _GentooShellLintFile,
frozenset({'.md'}): _MarkdownLintFile,
}
def _BreakoutFilesByLinter(files):
"""Maps a linter method to the list of files to lint."""
map_to_return = {}
for f in files:
extension = os.path.splitext(f)[1]
for extensions, linter in _EXT_TO_LINTER_MAP.items():
if extension in extensions:
todo = map_to_return.setdefault(linter, [])
todo.append(f)
break
else:
if os.path.isfile(f):
_BreakoutDataByLinter(map_to_return, f)
return map_to_return
def _Dispatcher(errors, output_format, debug, linter, path):
"""Call |linter| on |path| and take care of coalescing exit codes/output."""
result = linter(path, output_format, debug)
if result.returncode:
with errors.get_lock():
errors.value += 1
@command.CommandDecorator('lint')
class LintCommand(command.CliCommand):
"""Run lint checks on the specified files."""
EPILOG = """
Right now, only supports cpplint and pylint. We may also in the future
run other checks (e.g. pyflakes, etc.)
"""
# The output formats supported by cros lint.
OUTPUT_FORMATS = ('default', 'colorized', 'msvs', 'parseable')
@classmethod
def AddParser(cls, parser):
super(LintCommand, cls).AddParser(parser)
parser.add_argument('--py2', dest='pyver', action='store_const',
const='py2',
help='Assume Python files are Python 2')
parser.add_argument('--py3', dest='pyver', action='store_const',
const='py3',
help='Assume Python files are Python 3')
parser.add_argument('--py23', dest='pyver', action='store_const',
const='py23',
help='Assume Python files are Python 2 & 3 compatible')
parser.add_argument('files', help='Files to lint', nargs='*')
parser.add_argument('--output', default='default',
choices=LintCommand.OUTPUT_FORMATS,
help='Output format to pass to the linters. Supported '
'formats are: default (no option is passed to the '
'linter), colorized, msvs (Visual Studio) and '
'parseable.')
def Run(self):
files = self.options.files
if not files:
# Running with no arguments is allowed to make the repo upload hook
# simple, but print a warning so that if someone runs this manually
# they are aware that nothing was linted.
logging.warning('No files provided to lint. Doing nothing.')
if self.options.pyver == 'py2':
_EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint2File
elif self.options.pyver == 'py3':
_EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint3File
elif self.options.pyver == 'py23':
_EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint23File
errors = multiprocessing.Value('i')
linter_map = _BreakoutFilesByLinter(files)
dispatcher = functools.partial(_Dispatcher, errors,
self.options.output, self.options.debug)
# Special case one file as it's common -- faster to avoid parallel startup.
if not linter_map:
return 0
elif sum(len(x) for x in linter_map.values()) == 1:
linter, files = next(iter(linter_map.items()))
dispatcher(linter, files[0])
else:
# Run the linter in parallel on the files.
with parallel.BackgroundTaskRunner(dispatcher) as q:
for linter, files in linter_map.items():
for path in files:
q.put([linter, path])
if errors.value:
logging.error('Found lint errors in %i files.', errors.value)
sys.exit(1)
|
import numpy as np
from gpsearch import recommend, custom_KDE, funmin
def mll(m_list, inputs, pts=None, y_list=None, t_list=None):
"""Mean log loss as defined in (23) of Merchant and Ramos, ICRA 2014.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
pts : array_like
Sampled points used for RMSE computation.
y_list : list
Output values of the true map at `pts` and various time instants
corresponding to `record_time`.
Returns
-------
res : list
A list containing the values of the MLL for each model
in `m_list`.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
time = t_list[ii]
aug_pts = np.hstack((pts, time*np.ones((pts.shape[0],1))))
mu, var = model.predict(aug_pts)
mu, var = mu.flatten(), var.flatten()
yy = y_list[ii].flatten()
res[ii] = 0.5 * np.mean( np.log(2*np.pi*var) + (mu-yy)**2/var )
return res
def rmse(m_list, inputs, pts=None, y_list=None, t_list=None):
"""Root-mean-square error between GP model and objective function.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
pts : array_like
Sampled points used for RMSE computation.
y_list : list
Output values of the true map at `pts` and various time instants
corresponding to `record_time`.
Returns
-------
res : list
A list containing the values of the RMSE for each model
in `m_list`.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
time = t_list[ii]
aug_pts = np.hstack((pts, time*np.ones((pts.shape[0],1))))
mu = model.predict(aug_pts)[0]
diff = mu.flatten() - y_list[ii].flatten()
res[ii] = np.sqrt(np.mean(np.square(diff)))
return res
def log_pdf(m_list, inputs, pts=None, pt_list=None, clip=True, t_list=None):
"""Log-error between estimated pdf and true pdf.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
pts : array_like
Randomly sampled points used for KDE of the GP model.
pt_list : list of instances of `FFTKDE`
The true pdf for time instants corresponding to `record_time`.
clip : boolean, optional
Whether or not to clip the pdf values below machine-precision.
Returns
-------
res : list
A list containing the values of the log-error for each model
in `m_list`. The log-error is defined as
e = \int | log(pdf_{GP}) - log(pdf_{true}) | dy
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
time = t_list[ii]
aug_pts = np.hstack((pts, time*np.ones((pts.shape[0],1))))
mu = model.predict(aug_pts)[0].flatten()
ww = inputs.pdf(pts)
pb = custom_KDE(mu, weights=ww)
pt = pt_list[ii]
x_min = min( pb.data.min(), pt.data.min() )
x_max = max( pb.data.max(), pt.data.max() )
rang = x_max-x_min
x_eva = np.linspace(x_min - 0.01*rang,
x_max + 0.01*rang, 1024)
yb, yt = pb.evaluate(x_eva), pt.evaluate(x_eva)
log_yb, log_yt = np.log(yb), np.log(yt)
if clip: # Clip to machine-precision
np.clip(log_yb, -14, None, out=log_yb)
np.clip(log_yt, -14, None, out=log_yt)
log_diff = np.abs(log_yb-log_yt)
noInf = np.isfinite(log_diff)
res[ii] = np.trapz(log_diff[noInf], x_eva[noInf])
return res
def regret_tmap(m_list, inputs, true_ymin_list=None, tmap=None, t_list=None):
"""Immediate regret using objective function.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
true_ymin : list
The minimum values of the objective function arranged
in a list for each time instant in `record_time`.
tmap : instance of `BlackBox`
The black box.
Returns
-------
res : list
A list containing the values of the immediate regret for each
model in `m_list` using the black-box objective function:
$r(n) = f(x_n) - y_{true}$
where f is the black box, x_n the algorithm recommendation at
iteration n, and y_{true} the minimum of the objective function.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
time = t_list[ii]
x_min = recommend(model, inputs, time)
tmap.kwargs["time"] = time
y_min = tmap.evaluate(x_min, include_noise=False)
res[ii] = y_min - true_ymin_list[ii]
return res
def distmin_model(m_list, inputs, true_xmin_list=None, t_list=None):
"""Distance to minimum using surrogate GP model.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
true_xmin : list
The locations of the minima of the objective function arranged
in a list for each time instant in `record_time`.
Returns
-------
res : list
A list containing the values of the distance to minimum for each
model in `m_list` using the surrogate GP model:
$\ell(n) = \Vert x_n - x_{true} \Vert^2$
where x_n is the algorithm recommendation at iteration n, and
x_{true} the location of the minimum of the objective function.
When more than one global minimum exists, we compute the
distance to each minimum and report the smallest value.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
time = t_list[ii]
x_min = recommend(model, inputs, time)
l2_dist = [ np.linalg.norm(x_min - true)
for true in true_xmin_list[ii] ]
res[ii] = min(l2_dist)
return res
def recommend(model, inputs, time, num_restarts=10, parallel_restarts=False):
"""Compute recommendation for where minimum is located.
Parameters
----------
model : instance of `GPRegression`
A GPy model.
inputs : instance of `Inputs`
The input space.
num_restarts : int, optional
Number of restarts for the optimizer.
parallel_restarts : boolean, optional
Whether or not to solve the optimization problems in parallel.
Returns
-------
x_min : array
The recommendation for where the GP model believes the global
minimum is located.
"""
if parallel_restarts:
set_worker_env()
x_min = funmin(compute_mean,
compute_mean_jac,
inputs,
args=(model, time),
num_restarts=num_restarts,
parallel_restarts=parallel_restarts,
init_method="sample_fun")
return x_min
def compute_mean(x, model, time):
x = np.atleast_2d(x)
x = np.hstack((x, time * np.ones((x.shape[0],1))))
mu, _ = model.predict(x)
return mu.flatten()
def compute_mean_jac(x, model, time):
x = np.atleast_2d(x)
x = np.hstack((x, time * np.ones((x.shape[0],1))))
mu_jac, _ = model.predictive_gradients(x)
return mu_jac[:,0:2,0]
|
<filename>tools/validators/instance_validator/validate/handler.py<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation Helper."""
from __future__ import print_function
from datetime import datetime
import sys
from typing import Callable, Dict, List, Optional
from validate import entity_instance
from validate import generate_universe
from validate import instance_parser
from validate import subscriber
from validate import telemetry_validator
from yamlformat.validator import presubmit_validate_types_lib as pvt
def Deserialize(
yaml_files: List[str]) -> Dict[str, entity_instance.EntityInstance]:
"""Parses a yaml configuration file and deserializes it.
Args:
yaml_files: list of building configuration files.
Returns:
A map of entity name to EntityInstance.
"""
print('Validating syntax please wait ...')
parser = instance_parser.InstanceParser()
for yaml_file in yaml_files:
print('Opening file: {0}, please wait ...'.format(yaml_file))
parser.AddFile(yaml_file)
parser.Finalize()
default_entity_operation = instance_parser.EntityOperation.ADD
if parser.GetConfigMode() == instance_parser.ConfigMode.UPDATE:
default_entity_operation = instance_parser.EntityOperation.UPDATE
entities = {}
for entity_name, entity_yaml in parser.GetEntities().items():
entities[entity_name] = entity_instance.EntityInstance.FromYaml(
entity_yaml, default_entity_operation)
return entities, parser.GetConfigMode()
def _ValidateConfig(
filenames: List[str],
universe: pvt.ConfigUniverse) -> List[entity_instance.EntityInstance]:
"""Runs all config validaton checks."""
print('\nLoading config files...\n')
entities, config_mode = Deserialize(filenames)
print('\nStarting config validation...\n')
helper = EntityHelper(universe)
return helper.Validate(entities, config_mode)
def _ValidateTelemetry(subscription: str, service_account: str,
entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
"""Runs all telemetry validation checks."""
helper = TelemetryHelper(subscription, service_account)
helper.Validate(entities, report_filename, timeout)
def RunValidation(filenames: List[str],
modified_types_filepath: str = None,
subscription: str = None,
service_account: str = None,
report_filename: str = None,
timeout: int = 60) -> None:
"""Master runner for all validations."""
if bool(subscription) != bool(service_account):
print('Subscription and a service account file are '
'both needed for the telemetry validation!')
sys.exit(0)
print('\nStarting validator...\n')
print('\nStarting universe generation...\n')
universe = generate_universe.BuildUniverse(modified_types_filepath)
if not universe:
print('\nError generating universe')
sys.exit(0)
print('\nStarting config validation...\n')
entities = _ValidateConfig(filenames, universe)
if subscription:
print('\nStarting telemetry validation...\n')
_ValidateTelemetry(subscription, service_account, entities, report_filename,
timeout)
class TelemetryHelper(object):
"""A validation helper to encapsulate telemetry validation.
Attributes:
subscription: resource string referencing the subscription to check
service_account_file: path to file with service account information
"""
def __init__(self, subscription, service_account_file):
super().__init__()
self.subscription = subscription
self.service_account_file = service_account_file
def Validate(self, entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
"""Validates telemetry payload received from the subscription.
Args:
entities: EntityInstance dictionary keyed by entity name
report_filename: path to write results to
timeout: number of seconds to wait for telemetry
"""
print('Connecting to pubsub subscription: ', self.subscription)
sub = subscriber.Subscriber(self.subscription, self.service_account_file)
validator = telemetry_validator.TelemetryValidator(
entities, timeout,
self.BuildTelemetryValidationCallback(report_filename))
validator.StartTimer()
sub.Listen(validator.ValidateMessage)
def BuildTelemetryValidationCallback(
self,
report_filename: Optional[str] = None
) -> Callable[[telemetry_validator.TelemetryValidator], None]:
"""Returns a callback to be called when a telemetry message is received.
Args:
report_filename: path to write results to
"""
def TelemetryValidationCallback(
validator: telemetry_validator.TelemetryValidator) -> None:
"""Callback when the telemetry validator finishes.
This could be called due to a timeout or because telemetry messages were
received and validated for every expected entity.
Args:
validator: the telemetry validator that triggered the callback.
"""
print('Generating validation report ...')
current_time = datetime.now()
timestamp = current_time.strftime('%d-%b-%Y (%H:%M:%S)')
report = '\nReport Generated at: {0}\n'.format(timestamp)
if not validator.AllEntitiesValidated():
report += ('No telemetry message was received for the following '
'entities:')
report += '\n'
for entity_name in validator.GetUnvalidatedEntityNames():
report += ' {0}\n'.format(entity_name)
report += '\nTelemetry validation errors:\n'
for error in validator.GetErrors():
report += error.GetPrintableMessage()
report += '\nTelemetry validation warnings:\n'
for warnings in validator.GetWarnings():
report += warnings.GetPrintableMessage()
if report_filename:
with open(self.report_filename, 'w') as f:
f.write(report)
f.close()
else:
print('\n')
print(report)
print('Report Generated')
sys.exit(0)
return TelemetryValidationCallback
class EntityHelper(object):
"""A validation helper to coordinate the various steps of the validation.
Attributes:
universe: ConfigUniverse to validate against
"""
def __init__(self, universe: pvt.ConfigUniverse):
super().__init__()
self.universe = universe
def Validate(
self, entities: Dict[str, entity_instance.EntityInstance],
config_mode: instance_parser.ConfigMode
) -> Dict[str, entity_instance.EntityInstance]:
"""Validates entity instances that are already deserialized.
Args:
entities: a list of entity instances
config_mode: processing mode of the configuration
Returns:
A dictionary containing valid entities by name
Raises:
SyntaxError: If no building is found in the config
"""
print('Validating entities ...')
building_found = False
valid_entities = {}
validator = entity_instance.CombinationValidator(self.universe, config_mode,
entities)
for entity_name, current_entity in entities.items():
if (current_entity.operation is not instance_parser.EntityOperation.DELETE
and current_entity.type_name.lower() == 'building'):
building_found = True
if not validator.Validate(current_entity):
print(entity_name, 'is not a valid instance')
continue
valid_entities[entity_name] = entity_instance
if not building_found:
print('Config must contain a non-deleted entity with a building type')
raise SyntaxError('Building Config must contain an '
'entity with a building type')
print('All entities validated')
return valid_entities
|
<reponame>pulsar-chem/BPModule
#!/usr/bin/env python3
import os
import sys
import traceback
import array
# Add the pulsar path
thispath = os.path.dirname(os.path.realpath(__file__))
psrpath = os.path.join(os.path.dirname(thispath), "../", "modules")
parent = os.path.join(os.path.dirname(thispath))
sys.path.insert(0, psrpath)
sys.path.insert(0, parent)
from pulsar.testing import *
from pulsar.system import *
from pulsar.datastore import *
from pulsar.output import *
from helper.TestAtoms import water
def CompareList(lst1, lst2, tol):
if len(lst1) != len(lst2):
return False
else:
for i in range(0, len(lst1)):
if abs(lst1[i]-lst2[i]) > tol:
return False
return True
def Run(mm):
try:
out = get_global_output()
# Load the python modules
# supermodule module name key
mm.load_module("LibERD", "LibERD_ERI", "ERI")
mm.print(out)
mm.sanity_check()
atoms = list(water)
u = AtomSetUniverse()
for a in atoms:
u.insert(a)
s = System(u, True)
s = apply_single_basis("Primary","sto-3g",s)
eri = mm.get_module("ERI", 0)
eri.enable_debug(True)
iwfn = Wavefunction()
iwfn.system = s
eri.SetInitialWfn(iwfn)
eri.SetBases("Primary", "Primary", "Primary", "Primary")
tester = Tester("Testing basic ERI")
tester.print_header()
ref_0000 = [ 4.78506540470550323e+00 ]
ref_1000 = [ 7.41380351973407792e-01, # ( s s | s s )
0.00000000000000000e+00, # ( px s | s s )
0.00000000000000000e+00, # ( py s | s s )
8.20213813474668284e-18 ] # ( pz s | s s )
ref_1010 = [ 1.36873385354388311e-01, # ( s s | s s )
0.00000000000000000e+00, # ( s s | px s )
0.00000000000000000e+00, # ( s s | py s )
5.83811792070188069e-19, # ( s s | pz s )
0.00000000000000000e+00, # ( px s | s s )
0.00000000000000000e+00, # ( py s | s s )
5.83811792070188069e-19, # ( pz s | s s )
2.44774122580992542e-02, # ( px s | px s )
0.00000000000000000e+00, # ( px s | py s )
0.00000000000000000e+00, # ( px s | pz s )
0.00000000000000000e+00, # ( py s | px s )
2.44774122580992542e-02, # ( py s | py s )
0.00000000000000000e+00, # ( py s | pz s )
0.00000000000000000e+00, # ( pz s | px s )
0.00000000000000000e+00, # ( pz s | py s )
2.44774122580992542e-02 ] # ( pz s | pz s )
# Multi will calculate 0000, 0010, 1000, 1010
# we take advantage of some permutational symmetry: ref_1000 = ref_0010
ref_multi = list(ref_0000) + list(ref_1000) + list(ref_1000) + list(ref_1010)
outbuf = array.array('d', [0]*1024)
n = eri.calculate(0, 0, 0, 0, 0, outbuf)
tester.test_value("Number of integrals for 0000", 1, n)
tester.test_value("Values for 0000 integrals", True, CompareList(list(outbuf[:n]), ref_0000, 1e-15))
n = eri.calculate(0, 1, 0, 0, 0, outbuf)
tester.test_value("Number of integrals for 1000", 4, n)
tester.test_value("Values for 1000 integrals", True, CompareList(list(outbuf[:n]), ref_1000, 1e-15))
n = eri.calculate(0, 1, 0, 1, 0, outbuf)
tester.test_value("Number of integrals for 1010", 16, n)
tester.test_value("Values for 1010 integrals", True, CompareList(list(outbuf[:n]), ref_1010, 1e-15))
n = eri.calculateMulti(0, [0, 1], [ 0 ], [0, 1], [ 0 ], outbuf)
tester.test_value("Number of integrals for calculate multi", 25, n)
tester.test_value("Values from calculateMulti", True, CompareList(list(outbuf[:n]), ref_multi, 1e-15))
tester.print_results()
except Exception as e:
print_global_output("Caught exception in main handler\n")
traceback.print_exc()
psr.initialize(sys.argv, color = True, debug = True)
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
|
<filename>pipeline/core/data/context.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from copy import deepcopy
from pprint import pformat
from pipeline.exceptions import ReferenceNotExistError, InvalidOperationException
class Context(object):
def __init__(self, act_outputs, output_key=None, scope=None):
self.variables = scope or {}
self.act_outputs = act_outputs
self._output_key = set(output_key or [])
self._change_keys = set()
self._raw_variables = None
def extract_output(self, activity):
self.extract_output_from_data(activity.id, activity.data)
def extract_output_from_data(self, activity_id, data):
if activity_id in self.act_outputs:
global_outputs = self.act_outputs[activity_id]
output = data.get_outputs()
for key in global_outputs:
# set value to key if can not find
# e.g. key: result
# e.g. global_outputs[key]: result_5hoi2
self.variables[global_outputs[key]] = output.get(key, global_outputs[key])
self.change_keys.add(global_outputs[key])
def get(self, key):
try:
return self.variables[key]
except KeyError:
raise ReferenceNotExistError('reference "%s" does not exist.' % key)
def set_global_var(self, key, val):
self.variables[key] = val
self.change_keys.add(key)
def update_global_var(self, var_dict):
self.variables.update(var_dict)
self.change_keys.update(var_dict.keys())
def mark_as_output(self, key):
self._output_key.add(key)
def write_output(self, pipeline):
from pipeline.core.data import var
data = pipeline.data
for key in self._output_key:
value = self.get(key)
if issubclass(value.__class__, var.Variable):
value = value.get()
# break circle
data.set_outputs(key, value)
def duplicate_variables(self):
self._raw_variables = deepcopy(self.variables)
def clear(self):
self.variables.clear()
if self.raw_variables:
self.raw_variables.clear()
def recover_variable(self):
if self.raw_variables is None:
raise InvalidOperationException('make sure duplicate_variables() is called before do recover')
# collect all act output key
act_outputs_keys = set()
for global_outputs in self.act_outputs.values():
for output_key in global_outputs.values():
act_outputs_keys.add(output_key)
# recover to Variable for which key not in act output
for key, var in self.raw_variables.items():
if key not in act_outputs_keys:
self.variables[key] = deepcopy(var)
def clear_change_keys(self):
if hasattr(self, '_change_keys'):
self.change_keys.clear()
def sync_change(self, context):
for k in context.change_keys:
self.set_global_var(k, context.get(k))
def __repr__(self):
return 'variables:{}\nact_outputs:{}\n_output_key:{}'.format(
pformat(self.variables),
pformat(self.act_outputs),
pformat(self._output_key)
)
def __str__(self):
return self.__repr__()
def __unicode__(self):
return self.__repr__()
@property
def change_keys(self):
if not hasattr(self, '_change_keys'):
self._change_keys = set()
return self._change_keys
@property
def raw_variables(self):
if not hasattr(self, '_raw_variables'):
self._raw_variables = None
return self._raw_variables
class OutputRef(object):
def __init__(self, key, context):
self.key = key
self.context = context
@property
def value(self):
return self.context.get(self.key)
def __deepcopy__(self, memodict={}):
return self
|
import numpy as np
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from gridworld.algorithms.models import layer_init
class MLP2(nn.Module):
def __init__(self, input_dim, hidden_dim=64, feature_dim=64, num_outputs=1):
super().__init__()
self.fc_1 = layer_init(nn.Linear(input_dim, hidden_dim))
self.fc_2 = layer_init(nn.Linear(hidden_dim, hidden_dim))
self.fc_3 = layer_init(nn.Linear(hidden_dim, hidden_dim))
self.fc_4 = layer_init(nn.Linear(hidden_dim, hidden_dim))
self.outfc0 = layer_init(nn.Linear(hidden_dim, feature_dim))
self.out_fcs = [self.outfc0]
if num_outputs >1:
for i in range(1, num_outputs):
fc = layer_init(nn.Linear(hidden_dim, feature_dim))
self.__setattr__("outfc{}".format(i), fc)
self.out_fcs.append(fc)
def forward(self, x):
batch_size = x.shape[0]
y = torch.reshape(x, (batch_size, -1))
y = F.relu(self.fc_1(y))
y = F.relu(self.fc_2(y))
#y = F.relu(self.fc_3(y))
outputs = []
for fc in self.out_fcs:
h = fc(y).unsqueeze(1)
outputs.append(h)
return outputs
class CNN2(nn.Module):
def __init__(self, in_channels, out_channels=4, feature_dim=64, agent_centric = False):
super().__init__()
self.feature_dim = feature_dim
#self.fc_out = out_channels*11*10#*22*20
if agent_centric:
self.fc_out = out_channels*22*20#*22*20
else:
self.fc_out = out_channels*21*20#*22*20
self.fc_out = out_channels*3*3
self.conv1 = layer_init(nn.Conv2d(in_channels, 16, kernel_size=3, stride=3))
self.conv2 = layer_init(nn.Conv2d(16, 32, kernel_size=5, stride=2,padding=2))
self.conv3 = layer_init(nn.Conv2d(32, 64, kernel_size=3, stride=2,padding=1))
self.conv4 = layer_init(nn.Conv2d(64, out_channels, kernel_size=3, stride=1,padding=1))
#self.conv5 = layer_init(nn.Conv2d(128, 256, kernel_size=3, stride=1,padding=1))
#self.conv6 = layer_init(nn.Conv2d(256, 128, kernel_size=3, stride=1,padding=1))
#self.conv7 = layer_init(nn.Conv2d(128, out_channels, kernel_size=3, stride=1,padding=1))
self.fc_1 = layer_init(nn.Linear(self.fc_out, feature_dim))
self.convs = [self.conv1, self.conv2, self.conv3]#, self.conv4, self.conv7]
def forward(self, x):
batch_size = x.shape[0]
y = self.conv1(x)
for k in range(1,len(self.convs)):
y = F.relu(y)
y = self.convs[k](y)
#print(y.shape)
y = torch.reshape(y, (batch_size, -1))
#print(y.shape)
y = F.relu(y)
y = self.fc_1(y)
#print(y.shape)
return y
class CompositeDotModelV3(nn.Module):
""" [f(s_0, a), (g(s*_0, s*_T) -g(s_0, s_t))]"""
def __init__(self, device=None,task_embedding_dim=256, relu=False):
super().__init__()
self.goal_cnn = CNN2(6, out_channels=64, feature_dim=task_embedding_dim)
self.image_cnn = CNN2(3, out_channels= 64, feature_dim=task_embedding_dim)
self.mlp = MLP2(input_dim=task_embedding_dim*2, hidden_dim=256, feature_dim=6, num_outputs=6)
self.relu = relu
def get_goal_feat(self, pre_image, post_image):
goal = torch.cat((pre_image, post_image), dim=1)
goal_feat = self.goal_cnn(goal)
if self.relu:
goal_feat = F.relu(goal_feat)
return goal_feat
def forward(self, first_image,image, pre_image=None, post_image=None, final_image = None, return_delta=False, goal_feat=None):
if goal_feat is None:
goal_feat = self.get_goal_feat(pre_image, post_image)
back_goal = self.get_goal_feat(first_image, image)
img_feat = self.image_cnn(image)
obs = torch.cat((img_feat, goal_feat-back_goal), dim=1)
actions = self.mlp(obs)[0].squeeze(1)
if final_image is not None:
final_features = self.get_goal_feat(first_image, final_image)
return actions, goal_feat, final_features, back_goal
if return_delta:
return actions, goal_feat-back_goal
return actions
class TaskEmbeddingModel(nn.Module):
""" [f(s_0, a), (g(s*_0, s*_T) -g(s_0, s_t))]"""
def __init__(self, device=None,task_embedding_dim=256, relu=False):
super().__init__()
self.goal_cnn = CNN2(6, out_channels=64, feature_dim=task_embedding_dim)
self.image_cnn = CNN2(3, out_channels= 64, feature_dim=task_embedding_dim)
self.mlp = MLP2(input_dim=task_embedding_dim*3, hidden_dim=256, feature_dim=6, num_outputs=6)
self.relu = relu
def get_goal_feat(self, pre_image, post_image):
goal = torch.cat((pre_image, post_image), dim=1)
goal_feat = self.goal_cnn(goal)
if self.relu:
goal_feat = F.relu(goal_feat)
return goal_feat
def forward(self, first_image,image, pre_image=None, post_image=None, final_image = None, return_delta=False, goal_feat=None):
if goal_feat is None:
goal_feat = self.get_goal_feat(pre_image, post_image)
back_goal = self.get_goal_feat(first_image, image)
img_feat = self.image_cnn(image)
first_image_feat = self.image_cnn(first_image)
obs = torch.cat((img_feat, first_image_feat, goal_feat), dim=1)
actions = self.mlp(obs)[0].squeeze(1)
if final_image is not None:
final_features = self.get_goal_feat(first_image, final_image)
return actions, goal_feat, final_features, back_goal
if return_delta:
return actions, goal_feat-back_goal
return actions
class NaiveModel(nn.Module):
""" [f(s_0, a), (g(s*_0, s*_T) -g(s_0, s_t))]"""
def __init__(self, device=None,task_embedding_dim=256, relu=False):
super().__init__()
self.image_cnn = CNN2(12, out_channels= 64, feature_dim=task_embedding_dim)
self.mlp = MLP2(input_dim=task_embedding_dim, hidden_dim=256, feature_dim=6, num_outputs=6)
def forward(self, first_image,image, pre_image, post_image):
obs = torch.cat((pre_image, post_image, first_image, image), dim=1)
obs = self.image_cnn(obs)
actions = self.mlp(obs)[0].squeeze(1)
return actions
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email <EMAIL> or <EMAIL>.
#
"""trace class accepts a stream of incoming records (represented by
the record class) and produces the output in the form of an SVG image,
describing the stream.
Some assumptions are made about the input stream:
- all records are from the same process,
- the time-stamps of incoming records are monotonically
increasing. m0addb2dump output does not necessarily conform to this
restriction. This can always be fixed by doing
m0addb2dump -f | sort -k2.1,2.29 -s | m0addb2dump -d
- if a trace does not contain all records produced by a Motr process from
start-up to shutdown, certain corner cases (like re-use of the same
memory address for a new fom), can result in an incorrect
interpretation of a final portion of the trace.
Output layout.
The entirety of the output image is divided into vertical stripes,
corresponding to the localities, divided by 10px-wide vertical lines . The
number of localities is specified by the "loc_nr" parameter.
Vertical axis corresponds to time (from top to bottom). Horizontal dashed
lines mark time-steps with the granularity specified by the "step"
parameter.
In the area, corresponding to a locality, the foms, executed in this
locality are represented. Each fom is represented as a rectangle with fixed
width and with the height corresponding to the fom life-time. The left
border of this rectangle is marked by a black line, other borders are
transparent.
The interior of a fom rectangle is divided into 3 vertical "lanes". The
first lane is used for labels. When a fom is created, its type and address
are written to the label lane. When the fom experiences a phase transition,
the name of the new phase is written to the label lane.
The second lane, represents phases, marked by different colours.
The third lane contains states, marked by different colours.
The line at the left border of fom area can be missing if the final state
transition for the fom is missing from the log.
If fom phase state machine doesn't have transition descriptions, the phase
lane will be empty and phase labels will be missing.
By specifying "starttime" and "duration" parameters, view area can be
narrowed to produce more manageable images. When view area is narrowed, the
entire trace is still processed, but the SVG elements that would fall outside
of the visible image are omitted.
"""
import datetime
import svgwrite
class trace(object):
def __init__(self, width, height, loc_nr, duration, starttime = None,
step = 100, outname = "out.svg", maxfom = 20, verbosity = 0,
label = True):
self.timeformat = "%Y-%m-%d-%H:%M:%S.%f"
if starttime != None:
self.start = datetime.datetime.strptime(starttime, self.timeformat)
else:
self.start = None
self.prep = False
self.label = label
self.width = width
self.height = height
self.loc_nr = loc_nr
self.usec = duration * 1000000
self.step = step
self.verb = verbosity
self.maxfom = maxfom
self.out = svgwrite.Drawing(outname, profile='full', \
size = (str(width) + "px",
str(height) + "px"))
self.lmargin = width * 0.01
self.reqhwidth = width * 0.87
self.lockwidth = width * 0.01
self.netwidth = width * 0.05
self.iowidth = width * 0.05
self.rmargin = width * 0.01
assert (self.lmargin + self.reqhwidth + self.lockwidth + self.netwidth +
self.iowidth + self.rmargin == self.width)
self.reqhstart = self.lmargin
self.lockstart = self.reqhstart + self.reqhwidth
self.netstart = self.lockstart + self.lockwidth
self.iostart = self.netstart + self.netwidth
self.loc_width = self.reqhwidth / loc_nr
self.loc_margin = self.loc_width * 0.02
self.fom_width = (self.loc_width - 2*self.loc_margin) / self.maxfom
self.maxlane = 4
self.lane_margin = self.fom_width * 0.10
self.lane_width = (self.fom_width - 2*self.lane_margin) / self.maxlane
self.axis = svgwrite.rgb(0, 0, 0, '%')
self.locality = []
self.iomax = 128
self.iolane = (self.iowidth - 300) / self.iomax
self.iolane0 = self.iostart + 300
self.iolast = []
self.netmax = 128
self.netlane = (self.netwidth - 400) / self.netmax
self.netlane0 = self.netstart + 400
self.netlast = []
self.lockmax = 32
self.locklane = (self.lockwidth - 300) / self.lockmax
self.locklane0 = self.lockstart + 300
self.locks = {}
self.processed = 0
self.reported = 0
self.textstep = 15
self.scribbles = set()
self.foms = {}
self.dash = {
"stroke" : self.axis,
"stroke_width" : 1,
"stroke_dasharray" :"1,1"
}
self.warnedlabel = False
self.warnednet = False
self.warnedlock = False
self.warnedio = False
self.warnedfom = 0
for i in range(loc_nr):
x = self.getloc(i)
self.locality.append(locality(self, i))
self.line((x, 0), (x, height), stroke = self.axis, stroke_width = 10)
self.text("locality " + str(i), insert = (x + 10, 20))
for _ in range(self.iomax):
self.iolast.append(datetime.datetime(1970, 01, 01))
for _ in range(self.netmax):
self.netlast.append(datetime.datetime(1970, 01, 01))
self.line((self.lockstart - 10, 0), (self.lockstart - 10, height),
stroke = self.axis, stroke_width = 10)
self.text("lock", insert = (self.lockstart + 10, 20))
self.line((self.netstart - 10, 0), (self.netstart - 10, height),
stroke = self.axis, stroke_width = 10)
self.text("net", insert = (self.netstart + 10, 20))
self.line((self.iostart - 10, 0), (self.iostart - 10, height),
stroke = self.axis, stroke_width = 10)
self.text("io", insert = (self.iostart + 10, 20))
def done(self):
self.out.save()
def fomadd(self, fom):
self.locality[fom.getloc()].fomadd(fom)
def fomdel(self, fom):
self.locality[fom.getloc()].fomdel(fom)
def getloc(self, idx):
return self.reqhstart + self.loc_width * idx
def getlane(self, fom, lane):
assert 0 <= lane and lane < self.maxlane
return self.getloc(fom.loc.idx) + self.loc_margin + \
self.fom_width * fom.loc_idx + self.lane_margin + \
self.lane_width * lane
def getpos(self, stamp):
interval = stamp - self.start
usec = interval.microseconds + (interval.seconds +
interval.days * 24 * 3600) * 1000000
return self.height * usec / self.usec
def fomfind(self, rec):
addr = rec.get("fom")
f = self.foms.get(addr)
if f == None:
f = fom()
f.time = self.start
f.params = [None, None, None, None, None, rec.ctx["fom"][1]]
f.ctx = rec.ctx
f.done(self)
return f
@staticmethod
def getcolour(self, str):
seed = str + "^" + str
red = hash(seed + "r") % 90
green = hash(seed + "g") % 90
blue = hash(seed + "b") % 90
return svgwrite.rgb(red, green, blue, '%')
def fomcolour(self, fom):
return self.getcolour(fom.phase)
def fomrect(self, fom, lane, start, end):
start = self.getpos(start)
height = self.getpos(end) - start
lane = self.getlane(fom, lane)
return { "insert": (lane, start), "size": (self.lane_width, height) }
@staticmethod
def statecolour(self, fom):
state = fom.state
if state == "Init":
return svgwrite.rgb(100, 100, 0, '%')
elif state == "Ready":
return svgwrite.rgb(100, 0, 0, '%')
elif state == "Running":
return svgwrite.rgb(0, 100, 0, '%')
elif state == "Waiting":
return svgwrite.rgb(0, 0, 100, '%')
else:
return svgwrite.rgb(10, 10, 10, '%')
def rect(self, **kw):
y = kw["insert"][1]
h = kw["size"][1]
if y + h >= 0 and y < self.height:
self.out.add(self.out.rect(**kw))
def line(self, start, end, **kw):
if end[1] >= 0 and start[1] < self.height:
self.out.add(self.out.line(start, end, **kw))
def tline(self, start, end, **kw):
if self.label:
self.line(start, end, **kw)
def text(self, text, connect = False, force = False, **kw):
x = int(kw["insert"][0])
y0 = y = int(kw["insert"][1]) // self.textstep * self.textstep
if not self.label and not force:
return (x, y)
if y >= 0 and y < self.height:
i = 0
while (x, y // self.textstep) in self.scribbles:
y += self.textstep
if i > 30:
if not self.warnedlabel:
print "Labels are overcrowded. Increase image height."
self.warnedlabel = True
break
i += 1
kw["insert"] = (x + 10, y)
kw["font_family"] = "Courier"
self.out.add(self.out.text(text, **kw))
if connect:
self.line((x, y0), (x + 10, y - 4), **self.dash)
self.scribbles.add((x, y // self.textstep))
return x + 10 + len(text) * 9.5, y - 4 # magic. Depends on the font.
def fomtext(self, fom, text, time):
return self.text(text, insert = (self.getlane(fom, 0),
self.getpos(time)))
def prepare(self, time):
if self.start == None:
self.start = time
self.lastreport = self.start
duration = datetime.timedelta(microseconds = self.usec)
self.end = self.start + duration
delta = datetime.timedelta(milliseconds = self.step)
n = 0
while n*delta <= duration:
t = self.start + n * delta
y = self.getpos(t)
label = t.strftime(self.timeformat)
self.line((0, y), (self.width, y), stroke = self.axis,
stroke_width = 1, stroke_dasharray = "20,10,5,5,5,10")
self.text(label, insert = (0, y - 10), force = True)
n = n + 1
self.prep = True
def ioadd(self, time, fid, seconds):
duration = datetime.timedelta(microseconds = float(seconds) * 1000000)
start = time - duration
y0 = self.getpos(start)
y1 = self.getpos(time)
l0 = self.text("L " + fid, insert = (self.iostart, y0))
l1 = self.text("E " + fid, insert = (self.iostart, y1))
slot = next((i for i in range(len(self.iolast)) if
self.iolast[i] < start), None)
if slot != None:
x = self.iolane0 + self.iolane * slot
self.rect(insert = (x, y0), size = (self.iolane * 3/4, y1 - y0),
fill = self.getcolour(str(slot) + str(start)))
self.iolast[slot] = time
self.tline(l0, (x, y0), **self.dash)
self.tline(l1, (x, y1), **self.dash)
elif not self.warnedio:
self.warnedio = True
print "Too many concurrent IO-s. Increase iomax."
def netbufadd(self, time, buf, qtype, seconds, stime, status, length):
qname = [
"msg-recv",
"msg-send",
"p-bulk-recv",
"p-bulk-send",
"a-bulk-recv",
"a-bulk-send",
]
if qtype == 0:
return # skip receives
start = parsetime(stime)
duration = datetime.timedelta(microseconds = float(seconds) * 1000000)
dequeue = start + duration
assert start <= dequeue and dequeue <= time
y0 = self.getpos(start)
y1 = self.getpos(dequeue)
y2 = self.getpos(time)
l0 = self.text("Q " + buf + " " + qname[qtype] + " " + str(length),
insert = (self.netstart, y0))
l2 = self.text("C " + buf, insert = (self.netstart, y2))
slot = next((i for i in range(len(self.netlast)) if
self.netlast[i] < start), None)
if slot != None:
x = self.netlane0 + self.netlane * slot
self.rect(insert = (x, y0), size = (self.netlane * 3/4, y1 - y0),
fill = self.getcolour(qname[qtype]))
self.rect(insert = (x, y1), size = (self.netlane * 1/4, y2 - y1),
fill = self.getcolour("cb"))
self.netlast[slot] = time
self.tline(l0, (x, y0), **self.dash)
self.tline(l2, (x, y2), **self.dash)
elif not self.warnednet:
self.warnednet = True
print "Too many concurrent netbufs. Increase netmax."
def mutex(self, mname, label, time, seconds, addr):
duration = datetime.timedelta(microseconds = float(seconds) * 1000000)
start = time - duration
y0 = self.getpos(start)
y1 = self.getpos(time)
exists = addr in self.locks
if not exists:
if len(self.locks) >= self.lockmax:
if not self.warnedlock:
self.warnedlock = True
print "Too many locks. Increase lockmax."
return
self.locks[addr] = len(self.locks)
lane = self.locks[addr]
x = self.locklane0 + self.locklane * lane
if not exists:
ly = max(y0, 40)
self.tline((x, 0), (x, self.height), **self.dash)
l = self.text(mname + " " + str(addr), insert = (self.lockstart, ly))
self.tline(l, (x, ly), **self.dash)
self.rect(insert = (x, y0), size = (self.locklane * 3/4, y1 - y0),
fill = self.getcolour(label), stroke = self.axis)
class locality(object):
def __init__(self, trace, idx):
self.trace = trace
self.foms = {}
self.idx = idx
def fomadd(self, fom):
trace = self.trace
j = len(self.foms)
for i in range(len(self.foms)):
if i not in self.foms:
j = i
break
if j > trace.maxfom:
if trace.warnedfom < j:
print ("{}: too many concurrent foms, "
"increase maxfom to {}".format(fom.time, j))
trace.warnedfom = j
self.foms[j] = fom
fom.loc_idx = j
fom.loc = self
def fomdel(self, fom):
assert self.foms[fom.loc_idx] == fom
del self.foms[fom.loc_idx]
def keep(word):
return word in tags
def parsetime(stamp):
#
# strptime() is expensive. Hard-code.
# # cut to microsecond precision
# datetime.datetime.strptime(stamp[0:-3], trace.timeformat)
#
# 2016-03-24-09:18:46.359427942
# 01234567890123456789012345678
return datetime.datetime(year = int(stamp[ 0: 4]),
month = int(stamp[ 5: 7]),
day = int(stamp[ 8:10]),
hour = int(stamp[11:13]),
minute = int(stamp[14:16]),
second = int(stamp[17:19]),
microsecond = int(stamp[20:26]))
def parse(trace, words):
stamp = words[0]
tag = words[1]
if tag in tags:
obj = tags[tag]()
obj.ctx = {}
obj.time = parsetime(stamp)
obj.params = words[2:]
obj.trace = trace
if not trace.prep:
trace.prepare(obj.time)
else:
obj = None
return obj
class record(object):
def add(self, words):
key = words[0]
val = words[1:]
assert key not in self.ctx
self.ctx[key] = val
self.trace = None
def done(self, trace):
self.trace = trace
trace.processed = trace.processed + 1
if (trace.verb > 0 and
self.time - trace.lastreport > datetime.timedelta(seconds = 1)):
print self.time, trace.processed - trace.reported, trace.processed
trace.lastreport = self.time
trace.reported = trace.processed
def get(self, label):
return self.ctx[label][0]
def fomexists(self):
return "fom" in self.ctx
def __str__(self):
return str(self.time)
def getloc(self):
loc = int(self.get("locality"))
if self.trace.loc_nr == 1:
loc = 0
assert 0 <= loc and loc < self.trace.loc_nr
return loc
class fstate(record):
def done(self, trace):
state = self.params[2]
super(fstate, self).done(trace)
if self.fomexists():
fom = trace.fomfind(self)
trace.rect(fill = trace.statecolour(fom),
**trace.fomrect(fom, 3, fom.state_time, self.time))
fom.state_time = self.time
fom.state = state
if state == "Finished":
start = trace.getpos(fom.time)
end = trace.getpos(self.time)
lane = trace.getlane(fom, 0) - 5
trace.line((lane, start), (lane, end), stroke = trace.axis,
stroke_width = 3)
self.trace.fomdel(fom)
del trace.foms[self.get("fom")]
class fphase(record):
def done(self, trace):
super(fphase, self).done(trace)
if (len(self.params) in (2, 3) and self.fomexists()):
fom = trace.fomfind(self)
trace.rect(fill = trace.fomcolour(fom),
**trace.fomrect(fom, 2, fom.phase_time, self.time))
l = trace.fomtext(fom, fom.phase, fom.phase_time)
x = trace.getlane(fom, 1)
if l[0] < x:
trace.tline(l, (x, l[1]), **trace.dash)
trace.tline((x, l[1]),
(trace.getlane(fom, 2), trace.getpos(fom.phase_time)),
**trace.dash)
fom.phase_time = self.time
fom.phase = self.params[-1]
class fom(record):
def done(self, trace):
addr = self.get("fom")
assert "locality" in self.ctx
# assert addr not in trace.foms
trace.foms[addr] = self
super(fom, self).done(trace)
self.loc_idx = -1
self.trace.fomadd(self)
self.state = "Ready"
self.phase = "init"
self.state_time = self.time
self.phase_time = self.time
trace.fomtext(self, self.params[5] + str(addr) +
"[" + self.get("locality") + "]", self.time)
def __str__(self):
return str(self.time) + " " + self.get("fom")
class forq(record):
def done(self, trace):
if "locality" not in self.ctx:
return # ast in 0-locality
super(forq, self).done(trace)
loc_id = self.getloc()
nanoseconds = float(self.params[0][:-1]) # Cut final comma.
duration = datetime.timedelta(microseconds = nanoseconds / 1000)
x = self.trace.getloc(loc_id) + 10
y = self.trace.getpos(self.time - duration)
trace.tline((x, y), (x, self.trace.getpos(self.time)),
stroke = svgwrite.rgb(80, 10, 10, '%'), stroke_width = 5)
trace.text(self.params[1], connect = True, insert = (x + 10, y))
class ioend(record):
def done(self, trace):
super(ioend, self).done(trace)
trace.ioadd(self.time, self.params[0][:-1], self.params[2][:-1])
class netbuf(record):
def done(self, trace):
super(netbuf, self).done(trace)
assert (self.params[0] == "buf:" and self.params[2] == "qtype:" and
self.params[4] == "time:" and self.params[6] == "duration:"
and self.params[8] == "status:" and self.params[10] == "len:")
trace.netbufadd(self.time,
buf = self.params[1][:-1],
qtype = int(self.params[3][:-1]),
stime = self.params[5][:-1],
seconds = float(self.params[7][:-1]),
status = int(self.params[9][:-1]),
length = int(self.params[11])) # no comma: last one
class mutex(record):
def setname(self, mname, label):
self.mname = mname
self.label = label
def done(self, trace):
super(mutex, self).done(trace)
trace.mutex(self.mname, self.label, self.time,
float(self.params[0][:-1]), self.params[1])
class rpcmachwait(mutex):
def done(self, trace):
self.setname("rpc-mach", "wait")
super(rpcmachwait, self).done(trace)
class rpcmachhold(mutex):
def done(self, trace):
self.setname("rpc-mach", "hold")
super(rpcmachhold, self).done(trace)
tags = {
"fom-descr" : fom,
"fom-state" : fstate,
"fom-phase" : fphase,
"loc-forq-duration" : forq,
"stob-io-end" : ioend,
"net-buf" : netbuf,
"rpc-mach-wait" : rpcmachwait,
"rpc-mach-hold" : rpcmachhold
}
|
import logging
import json
import paho.mqtt.client as mqttc
from ioctlgw import version
from ioctlgw.componentstate import ComponentState
LOG = logging.getLogger(__name__)
class MqttConnector(object):
def __init__(self, service):
self.service = service
self.config = self.service.config
self.mqtt_config = self.config["mqtt"]
self.mqtt = mqttc.Client()
self.mqtt_base_topic = self.mqtt_config["topic"]
self.mqtt.on_connect = self.mqtt_on_connect
self.mqtt.on_disconnect = self.mqtt_on_disconnect
self.mqtt.on_message = self.mqtt_on_message
self.mqtt.on_subscribe = self.mqtt_on_subscribe
# MQTT status jobs
self.service.scheduler.add_job(self.publish_status)
self.service.scheduler.add_job(self.publish_status, 'interval', seconds=10, jitter=5)
def start(self):
# Start a background thread to maintain the MQTT connection
LOG.info("MQTT Starting")
if "user" in self.mqtt_config and "pass" in self.mqtt_config:
self.mqtt.username_pw_set(self.mqtt_config["user"], self.mqtt_config["pass"])
mqtt_host = self.mqtt_config["host"]
mqtt_port = self.mqtt_config["port"]
LOG.info("MQTT Connecting to %s:%s", mqtt_host, mqtt_port)
self.mqtt.connect(mqtt_host, mqtt_port, 60)
# Subscribe to interesting MQTT topics
topics = [
"/boards/+/digitaloutput/+/command"
]
for topic_suffix in topics:
self.mqtt.subscribe(f"{self.mqtt_base_topic}{topic_suffix}")
self.mqtt.loop_start()
def mqtt_on_connect(self, client, data, flags, rc):
LOG.info("MQTT Connected %s", rc)
def mqtt_on_disconnect(self, client, userdata, rc):
if rc == 0:
LOG.warning("Unexpected MQTT disconnection.")
else:
LOG.warning("Unexpected MQTT disconnection. Will auto-reconnect")
def mqtt_on_subscribe(self, client, userdata, mid, gqos):
LOG.info("MQTT Subscribed %s", mid)
def mqtt_on_message(self, client, userdata, msg):
LOG.info("MQTT Message %s %s", msg.topic, str(msg.payload))
if msg.topic.startswith(self.mqtt_base_topic):
topic = msg.topic[len(self.mqtt_base_topic) + 1:]
parts = topic.split("/")
# TODO: check number of parts
controller_name = parts[1]
component = parts[2]
num = int(parts[3])
iocontroller = self.service.controllers[controller_name]
if controller_name not in self.service.controllers.keys():
LOG.warning("Message for unknown iocontroller '%s'", controller_name)
return
if component not in ["digitaloutput"]:
LOG.warning("Message for unknown component '%s'", component)
return
if num > iocontroller.num_digital_outputs:
LOG.warning("Output too high for this board: %s", num)
return
action = msg.payload.decode('utf-8').strip().upper()
if action not in ["OFF", "ON"]:
LOG.warning("Unsupported action '%s'", action)
return
LOG.debug("Requesting %s %s %s %s %s", iocontroller, controller_name, component, num, action)
iocontroller.request_digitaloutput(ComponentState(component="digitaloutput", num=num, status=action))
def mqtt_publish_message(self, suffix, payload, qos=0):
topic = "%s/%s" % (self.mqtt_base_topic, suffix)
self.mqtt.publish(topic=topic, payload=payload, qos=0)
LOG.info("%s %s", topic, payload)
def board_connection_event(self, name, event):
self.mqtt_publish_message(suffix=f"boards/{name}/connection", payload=event)
def board_io_event(self, name, state):
self.mqtt_publish_message(suffix=f"boards/{name}/{state.component}/{state.num}/status", payload=state.status)
def board_status(self, name, raw_msg):
assert True
def publish_status(self):
status = {
"version": version()
}
self.mqtt_publish_message(suffix="status", payload=json.dumps(status))
uptime = {
"minutes": self.service.uptime,
"started": self.service.startup.isoformat()
}
self.mqtt_publish_message(suffix="uptime", payload=json.dumps(uptime))
|
import codecs
import game
from hacktools import common
def writeLine(out, pos, byte, line, functions):
pos -= 16
function = ""
if pos in functions:
function = functions[pos] + " "
del functions[pos]
out.write(str(pos).zfill(5) + " 0x" + common.toHex(byte) + ": " + line + " " + function + "\n")
def run(filename, processed=False):
infolder = "data/work_NFP/SPC.NFP/" if processed else "data/extract_NFP/SPC.NFP/"
outfile = "data/analyze_spc.txt"
tablefile = "data/table.txt"
common.loadTable(tablefile)
functions = {}
inversetable = {}
for bigram, code in common.table.items():
inversetable[code] = bigram
common.logMessage("Analyzing", filename, "...")
with codecs.open(outfile, "w", "utf-8") as out:
out.write(filename + "\n")
with common.Stream(infolder + filename, "rb") as f:
f.seek(12) # "SCRP" + filesize + "CODE"
codesize = f.readUInt()
if codesize > 10:
f.seek(16 + codesize + 8)
while True:
function = f.readNullString()
if function == "":
break
# Read the pointers until we find 0
i = 0
while True:
pointer = f.readUInt()
if pointer == 0:
break
else:
if pointer in functions:
functions[pointer] += "," + function + "#" + str(i)
else:
functions[pointer] = function + "#" + str(i)
i += 1
f.seek(16 + 6)
while f.tell() < 16 + codesize - 2:
pos = f.tell()
byte = f.readByte()
if byte == 0x10:
line = f.readBytes(2)
f.seek(-2, 1)
convert = ""
if processed:
sjislen = f.readUShort()
try:
i = 0
while i < sjislen - 1:
strbyte = f.readByte()
if strbyte in game.codes:
convert += "<" + common.toHex(strbyte) + ">"
i += 1
else:
f.seek(-1, 1)
char = common.toHex(f.readByte()) + common.toHex(f.readByte())
convert += inversetable[char]
i += 2
except KeyError:
convert = ""
if convert != "":
line += "\"" + convert + "\" "
else:
f.seek(pos + 1)
sjis = game.readShiftJIS(f)
if sjis != "":
line += "\"" + sjis + "\" "
else:
f.seek(pos + 1)
asciilen = f.readUShort()
asciistr = f.read(asciilen - 1)
line += "\"" + asciistr.decode("ascii").replace("\r", "").replace("\n", "") + "\" "
line += f.readBytes(9)
writeLine(out, pos, byte, line, functions)
elif byte == 0x15:
line = f.readBytes(2)
f.seek(-1, 1)
bytelen = f.readByte()
for i in range(bytelen):
line += f.readBytes(8)
writeLine(out, pos, byte, line, functions)
elif byte in game.spccodes:
writeLine(out, pos, byte, f.readBytes(game.spccodes[byte]), functions)
else:
writeLine(out, pos, byte, "Unknown!", functions)
for k, v in functions.items():
out.write("Missing function pointer " + str(k) + ": " + str(v) + "\n")
common.logMessage("Done! Open", outfile)
|
#!/usr/bin/env python
# coding: utf-8
import torch.nn as nn
def init_weights(m):
""" initialize weights of fully connected layer
"""
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
# autoencoder with hidden units 20, latent, 20
# Encoder
class Encoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_20, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 20),
nn.ReLU(),
nn.Linear(20, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_20, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 20),
nn.ReLU(),
nn.Linear(20, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_20(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_20, self).__init__()
self.encoder = Encoder_20(num_inputs, code_dim)
self.decoder = Decoder_20(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
# autoencoder with hidden units 100, latent, 100
# Encoder
class Encoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_100, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 100),
nn.ReLU(),
nn.Linear(100, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_100, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 100),
nn.ReLU(),
nn.Linear(100, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_100(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_100, self).__init__()
self.encoder = Encoder_100(num_inputs, code_dim)
self.decoder = Decoder_100(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
# autoencoder with hidden units 8, 4, latent, 4, 8
class Encoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Encoder_3, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 20),
nn.ReLU(),
nn.Linear(20, 10),
nn.ReLU(),
nn.Linear(10, code_dim))
self.encoder.apply(init_weights)
def forward(self, x):
x = self.encoder(x)
return x
# Decoder
class Decoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(Decoder_3, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(code_dim, 10),
nn.ReLU(),
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, num_inputs),
nn.Sigmoid())
self.decoder.apply(init_weights)
def forward(self, x):
x = self.decoder(x)
return x
# Autoencoder
class autoencoder_3(nn.Module):
def __init__(self, num_inputs, code_dim):
super(autoencoder_3, self).__init__()
self.encoder = Encoder_3(num_inputs, code_dim)
self.decoder = Decoder_3(num_inputs, code_dim)
def forward(self, x):
code = self.encoder(x)
x = self.decoder(code)
return code, x
|
<reponame>TescaF/point_cloud_io
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
import numpy as np
import math
def publish():
pub = rospy.Publisher('pose_truth', PoseStamped, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
#pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]
# Sciossors_01_28 pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]
#Shears_02_01 pt = [0.189,-0.015,0.4,-0.4,-0.6,-0.01]
pt = [0.188,-0.015,0.4,-0.45,-0.6,-0.01]
# Scissors_08_01 pt = [0.2,-0.012,0.4,0,-1,0]
ests = [['scissors_01_00000027', [0.024235617160797116,-0.011359463453292846,0.019534289836883545]],
['scissors_01_00000060', [0.0011834951639175398,-0.013148486614227295,-0.005846852660179138]],
['scissors_01_00000003', [0.024251672744750975,-0.011589790105819703,0.0003066921234130859]],
['shears_01_00000009', [-0.009251792550086976,-0.017923964738845825,0.010005302429199218]],
['shears_01_00000033', [-0.027354883074760434,-0.012586298942565919,0.031511585712432864]],
['shears_01_00000090', [-0.03358910477161407,-0.013879684925079346,-0.014482853412628173]]]
pt = ests[0][1] + [0,0,1]
#pt[2] += 0.05
pos = pose_from_vec(pt)
pose = PoseStamped()
pose.pose = pos
pose.header.frame_id = "base_link"
while not rospy.is_shutdown():
pub.publish(pose)
rate.sleep()
def pose_from_vec(waypoint):
pose = Pose()
pose.position.x = waypoint[0]
pose.position.y = waypoint[1]
pose.position.z = waypoint[2]
u = [1,0,0]
norm = np.linalg.norm(np.array(waypoint[3:]))
v = np.array(waypoint[3:])/norm
if (np.array_equal(u, v)):
pose.orientation.w = 1
pose.orientation.x = 0
pose.orientation.y = 0
pose.orientation.z = 0
elif (np.array_equal(u, np.negative(v))):
pose.orientation.w = 0
pose.orientation.x = 0
pose.orientation.y = 0
pose.orientation.z = 1
else:
half = [u[0]+v[0], u[1]+v[1], u[2]+v[2]]
pose.orientation.w = np.dot(u, half)
temp = np.cross(u, half)
pose.orientation.x = temp[0]
pose.orientation.y = temp[1]
pose.orientation.z = temp[2]
norm = math.sqrt(pose.orientation.x*pose.orientation.x + pose.orientation.y*pose.orientation.y +
pose.orientation.z*pose.orientation.z + pose.orientation.w*pose.orientation.w)
if norm == 0:
norm = 1
pose.orientation.x /= norm
pose.orientation.y /= norm
pose.orientation.z /= norm
pose.orientation.w /= norm
return pose
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass
|
<reponame>steelee/minnow_max_maker
# Copyright (c) 2014 Intel Corporation, All Rights Reserved
# Author: <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and#or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numbers
import time
import numpy as np
import Image
import ImageDraw
import Adafruit_GPIO.Platform as Platform
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
HX8357D=0xD
HX8357B=0xB
HX8357_TFTWIDTH=320
HX8357_TFTHEIGHT=480
HX8357_NOP=0x00
HX8357_SWRESET=0x01
HX8357_RDDID=0x04
HX8357_RDDST=0x09
HX8357_RDPOWMODE=0x0A
HX8357_RDMADCTL=0x0B
HX8357_RDCOLMOD=0x0C
HX8357_RDDIM=0x0D
HX8357_RDDSDR=0x0F
HX8357_SLPIN=0x10
HX8357_SLPOUT=0x11
HX8357B_PTLON=0x12
HX8357B_NORON=0x13
HX8357_INVOFF=0x20
HX8357_INVON=0x21
HX8357_DISPOFF=0x28
HX8357_DISPON=0x29
HX8357_CASET=0x2A
HX8357_PASET=0x2B
HX8357_RAMWR=0x2C
HX8357_RAMRD=0x2E
HX8357B_PTLAR=0x30
HX8357_TEON=0x35
HX8357_TEARLINE=0x44
HX8357_MADCTL=0x36
HX8357_COLMOD=0x3A
HX8357_SETOSC=0xB0
HX8357_SETPWR1=0xB1
HX8357B_SETDISPLAY=0xB2
HX8357_SETRGB=0xB3
HX8357D_SETCOM=0xB6
HX8357B_SETDISPMODE=0xB4
HX8357D_SETCYC=0xB4
HX8357B_SETOTP=0xB7
HX8357D_SETC=0xB9
HX8357B_SET_PANEL_DRIVING=0xC0
HX8357D_SETSTBA=0xC0
HX8357B_SETDGC=0xC1
HX8357B_SETID=0xC3
HX8357B_SETDDB=0xC4
HX8357B_SETDISPLAYFRAME=0xC5
HX8357B_GAMMMASET=0xC8
HX8357B_SETCABC=0xC9
HX8357_SETPANEL=0xCC
HX8357B_SETPOWER=0xD0
HX8357B_SETVCOM=0xD1
HX8357B_SETPWRNORMAL=0xD2
HX8357B_RDID1=0xDA
HX8357B_RDID2=0xDB
HX8357B_RDID3=0xDC
HX8357B_RDID4=0xDD
HX8357D_SETGAMMA=0xE0
HX8357B_SETGAMMA=0xC8
HX8357B_SETPANELRELATED=0xE9
# Colors:
HX8357_BLACK=0x0000
HX8357_BLUE=0x001F
HX8357_RED=0xF800
HX8357_GREEN=0x07E0
HX8357_CYAN=0x07FF
HX8357_MAGENTA=0xF81F
HX8357_YELLOW=0xFFE0
HX8357_WHITE=0xFFFF
def color565(r, g, b):
"""Convert red, green, blue components to a 16-bit 565 RGB value. Components
should be values 0 to 255.
"""
return ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3)
def image_to_data(image):
"""Generator function to convert a PIL image to 16-bit 565 RGB bytes."""
#NumPy is much faster at doing this. NumPy code provided by:
#Keith (https:##www.blogger.com/profile/02555547344016007163)
pb = np.array(image.convert('RGB')).astype('uint16')
color = ((pb[:,:,0] & 0xF8) << 8) | ((pb[:,:,1] & 0xFC) << 3) | (pb[:,:,2] >> 3)
return np.dstack(((color >> 8) & 0xFF, color & 0xFF)).flatten().tolist()
class HX8357(object):
"""Representation of an HX8357 TFT LCD."""
def __init__(self, dc, spi, rst=None, gpio=None, width=HX8357_TFTWIDTH,
height=HX8357_TFTHEIGHT):
"""Create an instance of the display using SPI communication. Must
provide the GPIO pin number for the D#C pin and the SPI driver. Can
optionally provide the GPIO pin number for the reset pin as the rst
parameter.
"""
self._dc = dc
self._rst = rst
self._spi = spi
self._gpio = gpio
self.width = width
self.height = height
if self._gpio is None:
self._gpio = GPIO.get_platform_gpio()
# Set DC as output.
self._gpio.setup(dc, GPIO.OUT)
# Setup reset as output (if provided).
if rst is not None:
self._gpio.setup(rst, GPIO.OUT)
# Set SPI to mode 0, MSB first.
spi.set_mode(0)
spi.set_bit_order(SPI.MSBFIRST)
#need to nerf the clock for Minnow
if(Platform.platform_detect() == 3):
spi.set_clock_hz(16000000)
print 'Rate: MAX'
else:
spi.set_clock_hz(64000000)
print 'Rate: 64hz'
# Create an image buffer.
self.buffer = Image.new('RGB', (width, height))
def send(self, data, is_data=True, chunk_size=4096):
"""Write a byte or array of bytes to the display. Is_data parameter
controls if byte should be interpreted as display data (True) or command
data (False). Chunk_size is an optional size of bytes to write in a
single SPI transaction, with a default of 4096.
"""
# Set DC low for command, high for data.
self._gpio.output(self._dc, is_data)
# Convert scalar argument to list so either can be passed as parameter.
if isinstance(data, numbers.Number):
data = [data & 0xFF]
# Write data a chunk at a time.
for start in range(0, len(data), chunk_size):
end = min(start+chunk_size, len(data))
self._spi.write(data[start:end])
def command(self, data):
"""Write a byte or array of bytes to the display as command data."""
self.send(data, False)
def data(self, data):
"""Write a byte or array of bytes to the display as display data."""
self.send(data, True)
def reset(self):
"""Reset the display, if reset pin is connected."""
if self._rst is not None:
self._gpio.set_high(self._rst)
time.sleep(0.005)
self._gpio.set_low(self._rst)
time.sleep(0.02)
self._gpio.set_high(self._rst)
time.sleep(0.150)
def _init(self):
self.command(HX8357_SWRESET)
self.command(HX8357D_SETC)
self.data(0xFF)
self.data(0x83)
self.data(0x57)
time.sleep(0.300)
self.command(HX8357_SETRGB)
self.data(0x80)
self.data(0x0)
self.data(0x06)
self.data(0x06)
self.command(HX8357D_SETCOM)
self.data(0x25) ## -1.52V
self.command(HX8357_SETOSC)
self.data(0x68) ## Normal mode 70Hz, Idle mode 55 Hz
self.command(HX8357_SETPANEL) ##Set Panel
self.data(0x05) ## BGR, Gate direction swapped
self.command(HX8357_SETPWR1)
self.data(0x00) ## Not deep standby
self.data(0x15) ##BT
self.data(0x1C) ##VSPR
self.data(0x1C) ##VSNR
self.data(0x83) ##AP
self.data(0xAA) ##FS
self.command(HX8357D_SETSTBA)
self.data(0x50) ##OPON normal
self.data(0x50) ##OPON idle
self.data(0x01) ##STBA
self.data(0x3C) ##STBA
self.data(0x1E) ##STBA
self.data(0x08) ##GEN
self.command(HX8357D_SETCYC)
self.data(0x02) ##NW 0x02
self.data(0x40) ##RTN
self.data(0x00) ##DIV
self.data(0x2A) ##DUM
self.data(0x2A) ##DUM
self.data(0x0D) ##GDON
self.data(0x78) ##GDOFF
self.command(HX8357D_SETGAMMA)
self.data(0x02)
self.data(0x0A)
self.data(0x11)
self.data(0x1d)
self.data(0x23)
self.data(0x35)
self.data(0x41)
self.data(0x4b)
self.data(0x4b)
self.data(0x42)
self.data(0x3A)
self.data(0x27)
self.data(0x1B)
self.data(0x08)
self.data(0x09)
self.data(0x03)
self.data(0x02)
self.data(0x0A)
self.data(0x11)
self.data(0x1d)
self.data(0x23)
self.data(0x35)
self.data(0x41)
self.data(0x4b)
self.data(0x4b)
self.data(0x42)
self.data(0x3A)
self.data(0x27)
self.data(0x1B)
self.data(0x08)
self.data(0x09)
self.data(0x03)
self.data(0x00)
self.data(0x01)
self.command(HX8357_COLMOD)
self.data(0x55) #/ 16 bit
self.command(HX8357_MADCTL)
self.data(0xC0)
self.command(HX8357_TEON) #/ TE off
self.data(0x00)
self.command(HX8357_TEARLINE) #/ tear line
self.data(0x00)
self.data(0x02)
self.command(HX8357_SLPOUT) #/Exit Sleep
time.sleep(0.150)
self.command(HX8357_DISPON) #/ display on
time.sleep(0.50)
def begin(self):
"""Initialize the display. Should be called once before other calls that
interact with the display are called.
"""
self.reset()
self._init()
def set_window(self, x0=0, y0=0, x1=None, y1=None):
"""Set the pixel address window for proceeding drawing commands. x0 and
x1 should define the minimum and maximum x pixel bounds. y0 and y1
should define the minimum and maximum y pixel bound. If no parameters
are specified the default will be to update the entire display from 0,0
to 239,319.
"""
if x1 is None:
x1 = self.width-1
if y1 is None:
y1 = self.height-1
self.command(HX8357_CASET) # Column addr set
self.data(x0 >> 8)
self.data(x0) # XSTART
self.data(x1 >> 8)
self.data(x1) # XEND
self.command(HX8357_PASET) # Row addr set
self.data(y0 >> 8)
self.data(y0) # YSTART
self.data(y1 >> 8)
self.data(y1) # YEND
self.command(HX8357_RAMWR) # write to RAM
def display(self, image=None):
"""Write the display buffer or provided image to the hardware. If no
image parameter is provided the display buffer will be written to the
hardware. If an image is provided, it should be RGB format and the
same dimensions as the display hardware.
"""
# By default write the internal buffer to the display.
if image is None:
image = self.buffer
# Set address bounds to entire display.
self.set_window()
# Convert image to array of 16bit 565 RGB data bytes.
# Unfortunate that this copy has to occur, but the SPI byte writing
# function needs to take an array of bytes and PIL doesn't natively
# store images in 16-bit 565 RGB format.
pixelbytes = list(image_to_data(image))
# Write data to hardware.
self.data(pixelbytes)
def clear(self, color=(0,0,0)):
"""Clear the image buffer to the specified RGB color (default black)."""
width, height = self.buffer.size
self.buffer.putdata([color]*(width*height))
def draw(self):
"""Return a PIL ImageDraw instance for 2D drawing on the image buffer."""
return ImageDraw.Draw(self.buffer)
|
<reponame>ckolumbus/WikidPad.svn<gh_stars>1-10
import re
import wx, wx.xrc
from wxHelper import *
from . import SystemInfo
from .StringOps import uniToGui, guiToUni, colorDescToRgbTuple,\
rgbToHtmlColor, strToBool, splitIndent, escapeForIni, unescapeForIni
from .AdditionalDialogs import DateformatDialog, FontFaceDialog
from . import Localization
from . import OsAbstract
from . import WikiHtmlView
class DefaultOptionsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
def setVisible(self, vis):
return True
def checkOk(self):
return True
def handleOk(self):
pass
class ResourceOptionsPanel(DefaultOptionsPanel):
"""
GUI of panel is defined by a ressource.
"""
def __init__(self, parent, resName):
p = wx.PrePanel()
self.PostCreate(p)
# self.optionsDlg = optionsDlg
res = wx.xrc.XmlResource.Get()
res.LoadOnPanel(self, parent, resName)
def setVisible(self, vis):
return True
def checkOk(self):
return True
def handleOk(self):
pass
class PluginOptionsPanel(DefaultOptionsPanel):
def __init__(self, parent, optionsDlg):
DefaultOptionsPanel.__init__(self, parent)
self.idToOptionEntryMap = {}
self.oldSettings = {}
self.optionToControl = []
self.mainControl = optionsDlg.getMainControl()
def addOptionEntry(self, opt, ctl, typ, *params):
self.optionToControl.append((opt, ctl, typ) + params)
def transferOptionsToDialog(self, config=None):
# List of tuples (<configuration file entry>, <gui control name>, <type>)
# Supported types:
# b: boolean checkbox
# i0+: nonnegative integer
# t: text
# tes: text with escaped spaces, using StringOps.escapeForIni
# tre: regular expression
# ttdf: time/date format
# f0+: nonegative float
# seli: integer position of a selection in dropdown list
# selt: Chosen text in dropdown list
# color0: HTML color code or empty
# spin: Numeric SpinCtrl
#
# guilang: special choice for GUI language
# ttdf and color0 entries have a 4th item with the name
# of the "..." button to call a dialog to set.
# selt entries have a list with the internal config names (unicode) of the
# possible choices as 4th item.
# Transfer options to dialog
if config is None:
config = self.mainControl.getConfig()
for oct in self.optionToControl:
self.transferSingleOptionToDialog(config, oct)
def transferSingleOptionToDialog(self, config, oct):
o, ctl, t = oct[:3]
self.idToOptionEntryMap[ctl.GetId()] = oct
self.oldSettings[o] = config.get("main", o)
if t == "b": # boolean field = checkbox
ctl.SetValue(
config.getboolean("main", o))
elif t == "b3": # boolean field = checkbox
value = config.get("main", o)
if value == "Gray":
ctl.Set3StateValue(wx.CHK_UNDETERMINED)
else:
if strToBool(value):
ctl.Set3StateValue(wx.CHK_CHECKED)
else:
ctl.Set3StateValue(wx.CHK_UNCHECKED)
# ctl.SetValue(
# config.getboolean("main", o))
elif t in ("t", "tre", "ttdf", "i0+", "f0+", "color0"): # text field or regular expression field
ctl.SetValue( uniToGui(config.get("main", o)) )
elif t == "tes": # Text escaped
ctl.SetValue( unescapeForIni(uniToGui(config.get("main", o))) )
elif t == "seli": # Selection -> transfer index
ctl.SetSelection(config.getint("main", o))
elif t == "selt": # Selection -> transfer content string
try:
idx = oct[3].index(config.get("main", o))
ctl.SetSelection(idx)
except (IndexError, ValueError):
ctl.SetStringSelection(uniToGui(config.get("main", o)) )
elif t == "spin": # Numeric SpinCtrl -> transfer number
ctl.SetValue(config.getint("main", o))
elif t == "guilang": # GUI language choice
# First fill choice with options
ctl.Append(_(u"Default"))
for ls, lt in Localization.getLangList():
ctl.Append(lt)
# Then select previous setting
optValue = config.get("main", o)
ctl.SetSelection(Localization.findLangListIndex(optValue) + 1)
# Register events for "..." buttons
if t in ("color0", "ttdf"):
params = oct[3:]
if len(params) > 0:
# params[0] is the "..." button after the text field
dottedButtonId = params[0].GetId()
self.idToOptionEntryMap[dottedButtonId] = oct
wx.EVT_BUTTON(self, dottedButtonId,
self.OnDottedButtonPressed)
def checkOk(self):
"""
Called when "OK" is pressed in dialog. The plugin should check here if
all input values are valid. If not, it should return False, then the
Options dialog automatically shows this panel.
There should be a visual indication about what is wrong (e.g. red
background in text field). Be sure to reset the visual indication
if field is valid again.
"""
fieldsValid = True
# First check validity of field contents
for oct in self.optionToControl:
if not self.checkSingleOptionOk(oct):
fieldsValid = False
return fieldsValid
def checkSingleOptionOk(self, oct):
o, ctl, t = oct[:3]
fieldsValid = True
if t == "tre":
# Regular expression field, test if re is valid
try:
rexp = guiToUni(ctl.GetValue())
re.compile(rexp, re.DOTALL | re.UNICODE | re.MULTILINE)
ctl.SetBackgroundColour(wx.WHITE)
except: # TODO Specific exception
fieldsValid = False
ctl.SetBackgroundColour(wx.RED)
elif t == "i0+":
# Nonnegative integer field
try:
val = int(guiToUni(ctl.GetValue()))
if val < 0:
raise ValueError
ctl.SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
ctl.SetBackgroundColour(wx.RED)
elif t == "f0+":
# Nonnegative float field
try:
val = float(guiToUni(ctl.GetValue()))
if val < 0:
raise ValueError
ctl.SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
ctl.SetBackgroundColour(wx.RED)
elif t == "color0":
# HTML Color field or empty field
val = guiToUni(ctl.GetValue())
rgb = colorDescToRgbTuple(val)
if val != "" and rgb is None:
ctl.SetBackgroundColour(wx.RED)
fieldsValid = False
else:
ctl.SetBackgroundColour(wx.WHITE)
elif t == "spin":
# SpinCtrl
try:
val = ctl.GetValue()
if val < ctl.GetMin() or \
val > ctl.GetMax():
raise ValueError
ctl.SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
ctl.SetBackgroundColour(wx.RED)
return fieldsValid
def transferDialogToOptions(self, config=None):
if config is None:
config = self.mainControl.getConfig()
for oct in self.optionToControl:
self.transferDialogToSingleOption(config, oct)
def transferDialogToSingleOption(self, config, oct):
"""
Transfer option from dialog to config object
"""
o, ctl, t = oct[:3]
# TODO Handle unicode text controls
if t == "b":
config.set("main", o, repr(ctl.GetValue()))
elif t == "b3":
value = ctl.Get3StateValue()
if value == wx.CHK_UNDETERMINED:
config.set("main", o, "Gray")
elif value == wx.CHK_CHECKED:
config.set("main", o, "True")
elif value == wx.CHK_UNCHECKED:
config.set("main", o, "False")
elif t in ("t", "tre", "ttdf", "i0+", "f0+", "color0"):
config.set("main", o, guiToUni(ctl.GetValue()) )
elif t == "tes":
config.set( "main", o, guiToUni(escapeForIni(ctl.GetValue(),
toEscape=u" ")) )
elif t == "seli": # Selection -> transfer index
config.set(
"main", o, unicode(ctl.GetSelection()) )
elif t == "selt": # Selection -> transfer content string
try:
config.set("main", o,
oct[3][ctl.GetSelection()])
except IndexError:
config.set("main", o,
guiToUni(ctl.GetStringSelection()))
elif t == "spin": # Numeric SpinCtrl -> transfer number
config.set(
"main", o, unicode(ctl.GetValue()) )
elif t == "guilang": # GUI language choice
idx = ctl.GetSelection()
if idx < 1:
config.set("main", o, u"")
else:
config.set("main", o,
Localization.getLangList()[idx - 1][0])
def OnDottedButtonPressed(self, evt):
"""
Called when a "..." button is pressed (for some of them) to show
an alternative way to specify the input, e.g. showing a color selector
for color entries instead of using the bare text field
"""
oct = self.idToOptionEntryMap[evt.GetId()]
o, ctl, t = oct[:3]
params = oct[3:]
if t == "color0":
self.selectColor(ctl)
elif t == "ttdf": # Date/time format
self.selectDateTimeFormat(ctl)
def selectColor(self, tfield):
rgb = colorDescToRgbTuple(tfield.GetValue())
if rgb is None:
rgb = (0, 0, 0)
color = wx.Colour(*rgb)
colordata = wx.ColourData()
colordata.SetColour(color)
dlg = wx.ColourDialog(self, colordata)
try:
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().GetColour()
if color.Ok():
tfield.SetValue(
rgbToHtmlColor(color.Red(), color.Green(),
color.Blue()))
finally:
dlg.Destroy()
def selectDateTimeFormat(self, tfield):
dlg = DateformatDialog(self, -1, self.mainControl,
deffmt=tfield.GetValue())
try:
if dlg.ShowModal() == wx.ID_OK:
tfield.SetValue(dlg.GetValue())
finally:
dlg.Destroy()
# class KeyDefField(wx.TextCtrl):
# def __init__(self, parent, ID=-1):
# wx.TextCtrl.__init__(self, parent, ID)
# self.mods = None
# self.vkCode = None
# EVT_
# def
class OptionsDialog(wx.Dialog):
# List of tuples (<configuration file entry>, <gui control name>, <type>)
# Supported types:
# b: boolean checkbox
# i0+: nonnegative integer
# t: text
# tes: text with escaped spaces, using StringOps.escapeForIni
# tre: regular expression
# ttdf: time/date format
# f0+: nonegative float
# seli: integer position of a selection in dropdown list
# selt: Chosen text in dropdown list
# color0: HTML color code or empty
# spin: Numeric SpinCtrl
#
# guilang: special choice for GUI language
# wikilang: special choice for wiki language
# ttdf and color0 entries have a 4th item with the name
# of the "..." button to call for a dialog to set.
# selt entries have a list with the internal config names (unicode) of the
# possible choices as 4th item.
_lastShownPanelName = None
OPTION_TO_CONTROL_GLOBAL = (
# application-wide options
("single_process", "cbSingleProcess", "b"),
("zombieCheck", "cbZombieCheck", "b"),
("wikiPathes_relative", "cbWikiPathesRelative", "b"),
("wikiOpenNew_defaultDir", "tfWikiOpenNewDefaultDir",
"t"),
("collation_order", "chCollationOrder", "selt",
[u"Default", u"C"]),
("collation_uppercaseFirst", "cbCollationUppercaseFirst", "b"),
("wikiWord_renameDefault_modifyWikiLinks",
"cbRenameDefaultModifyLinks", "b"),
("wikiWord_renameDefault_renameSubPages",
"cbRenameDefaultRenameSubPages", "b"),
("hotKey_showHide_byApp_isActive", "cbHotKeyShowHideByAppIsActive",
"b"),
("hotKey_showHide_byApp", "tfHotKeyShowHideByApp", "t"),
("tempHandling_preferMemory", "cbTempHandlingPreferMemory", "b"),
("tempHandling_tempMode", "chTempHandlingTempMode", "selt",
[u"system", u"config", u"given"]),
("tempHandling_tempDir", "tfTempHandlingTempDir", "tdir",
"btnSelectTempHandlingTempDir"),
("showontray", "cbShowOnTray", "b"),
("minimize_on_closeButton", "cbMinimizeOnCloseButton", "b"),
("mainTabs_switchMruOrder", "cbMainTabsSwitchMruOrder", "b"),
("startup_splashScreen_show", "cbStartupSplashScreenShow", "b"),
("openWordDialog_askForCreateWhenNonexistingWord",
"cbOpenWordDialogAskForCreateWhenNonexistingWord", "b"),
("pagestatus_timeformat", "tfPageStatusTimeFormat", "ttdf",
"btnSelectPageStatusTimeFormat"),
("gui_language", "chGuiLanguage", "guilang"),
("recentWikisList_length", "scRecentWikisListLength", "spin"),
("option/user/log_window_autoshow", "cbLogWindowAutoShowUser", "b"),
("log_window_autohide", "cbLogWindowAutoHide", "b"),
("docStructure_position", "chDocStructurePosition", "seli"),
("docStructure_depth", "scDocStructureDepth", "spin"),
("docStructure_autohide", "cbDocStructureAutoHide", "b"),
("docStructure_autofollow", "cbDocStructureAutoFollow", "b"),
("process_autogenerated_areas", "cbProcessAutoGenerated", "b"),
("insertions_allow_eval", "cbInsertionsAllowEval", "b"),
# ("tempFiles_inWikiDir", "cbTempFilesInWikiDir", "b"),
("script_security_level", "chScriptSecurityLevel", "seli"),
("script_search_reverse", "cbScriptSearchReverse", "b"),
("mainTree_position", "chMainTreePosition", "seli"),
("viewsTree_position", "chViewsTreePosition", "seli"),
("tree_auto_follow", "cbTreeAutoFollow", "b"),
("tree_update_after_save", "cbTreeUpdateAfterSave", "b"),
("hideundefined", "cbHideUndefinedWords", "b"),
("tree_no_cycles", "cbTreeNoCycles", "b"),
("tree_autohide", "cbTreeAutoHide", "b"),
("tree_bg_color", "tfTreeBgColor", "color0",
"btnSelectTreeBgColor"),
("tree_font_nativeDesc", "tfTreeFontNativeDesc", "tfont0",
"btnSelectTreeFont"),
("tree_updateGenerator_minDelay", "tfTreeUpdateGeneratorMinDelay",
"f0+"),
("start_browser_after_export", "cbStartBrowserAfterExport", "b"),
("facename_html_preview", "tfFacenameHtmlPreview", "t"),
("html_preview_proppattern_is_excluding",
"cbHtmlPreviewProppatternIsExcluding", "b"),
("html_preview_proppattern", "tfHtmlPreviewProppattern", "tre"),
("html_export_proppattern_is_excluding",
"cbHtmlExportProppatternIsExcluding", "b"),
("html_export_proppattern", "tfHtmlExportProppattern", "tre"),
("html_preview_pics_as_links", "cbHtmlPreviewPicsAsLinks", "b"),
("html_export_pics_as_links", "cbHtmlExportPicsAsLinks", "b"),
("export_table_of_contents", "chTableOfContents", "seli"),
("html_toc_title", "tfHtmlTocTitle", "t"),
("html_export_singlePage_sepLineCount",
"tfHtmlExportSinglePageSepLineCount", "i0+"),
("html_preview_renderer", "chHtmlPreviewRenderer", "seli"),
("html_preview_ieShowIframes", "cbHtmlPreviewIeShowIframes", "b"),
("html_preview_webkitViKeys", "cbHtmlPreviewWebkitViKeys", "b"),
("html_body_link", "tfHtmlLinkColor", "color0",
"btnSelectHtmlLinkColor"),
("html_body_alink", "tfHtmlALinkColor", "color0",
"btnSelectHtmlALinkColor"),
("html_body_vlink", "tfHtmlVLinkColor", "color0",
"btnSelectHtmlVLinkColor"),
("html_body_text", "tfHtmlTextColor", "color0",
"btnSelectHtmlTextColor"),
("html_body_bgcolor", "tfHtmlBgColor", "color0",
"btnSelectHtmlBgColor"),
("html_body_background", "tfHtmlBgImage", "t"),
("html_header_doctype", "tfHtmlDocType", "t"),
("sync_highlight_byte_limit", "tfSyncHighlightingByteLimit", "i0+"),
("async_highlight_delay", "tfAsyncHighlightingDelay", "f0+"),
("editor_shortHint_delay", "tfEditorShortHintDelay", "i0+"),
("editor_autoUnbullets", "cbAutoUnbullets", "b"),
("editor_autoComplete_closingBracket",
"cbAutoCompleteClosingBracket", "b"),
("editor_sync_byPreviewSelection", "cbEditorSyncByPreviewSelection",
"b"),
("editor_colorizeSearchFragments", "cbEditorColorizeSearchFragments", "b"),
("attributeDefault_global.wrap_type",
"chAttributeDefaultGlobalWrapType", "selt",
[
u"word",
u"char"
]),
("editor_tabWidth", "scEditorTabWidth", "spin"),
("editor_imageTooltips_localUrls", "cbEditorImageTooltipsLocalUrls",
"b"),
("editor_imageTooltips_maxWidth", "scEditorImageTooltipsMaxWidth",
"spin"),
("editor_imageTooltips_maxHeight", "scEditorImageTooltipsMaxHeight",
"spin"),
("editor_imagePaste_filenamePrefix", "tfEditorImagePasteFilenamePrefix", "t"),
("editor_imagePaste_fileType", "chEditorImagePasteFileType", "seli"),
("editor_imagePaste_quality", "tfEditorImagePasteQuality", "i0+"),
("editor_imagePaste_askOnEachPaste", "cbEditorImagePasteAskOnEachPaste", "b"),
("editor_filePaste_prefix", "tfEditorFilePastePrefix", "tes"),
("editor_filePaste_middle", "tfEditorFilePasteMiddle", "tes"),
("editor_filePaste_suffix", "tfEditorFilePasteSuffix", "tes"),
("editor_filePaste_bracketedUrl", "cbEditorFilePasteBracketedUrl", "b"),
("editor_plaintext_color", "tfEditorPlaintextColor", "color0",
"btnSelectEditorPlaintextColor"),
("editor_link_color", "tfEditorLinkColor", "color0",
"btnSelectEditorLinkColor"),
("editor_attribute_color", "tfEditorAttributeColor", "color0",
"btnSelectEditorAttributeColor"),
("editor_bg_color", "tfEditorBgColor", "color0",
"btnSelectEditorBgColor"),
("editor_selection_fg_color", "tfEditorSelectionFgColor", "color0",
"btnSelectEditorSelectionFgColor"),
("editor_selection_bg_color", "tfEditorSelectionBgColor", "color0",
"btnSelectEditorSelectionBgColor"),
("editor_margin_bg_color", "tfEditorMarginBgColor", "color0",
"btnSelectEditorMarginBgColor"),
("editor_caret_color", "tfEditorCaretColor", "color0",
"btnSelectEditorCaretColor"),
("mouse_middleButton_withoutCtrl", "chMouseMiddleButtonWithoutCtrl", "seli"),
("mouse_middleButton_withCtrl", "chMouseMiddleButtonWithCtrl", "seli"),
("userEvent_mouse/leftdoubleclick/preview/body", "chMouseDblClickPreviewBody", "selt",
[
u"action/none",
u"action/presenter/this/subcontrol/textedit",
u"action/presenter/new/foreground/end/page/this/subcontrol/textedit"
]),
("userEvent_mouse/middleclick/pagetab", "chMouseMdlClickPageTab", "selt",
[
u"action/none",
u"action/presenter/this/close",
u"action/presenter/this/clone"
]),
("userEvent_mouse/leftdrop/editor/files", "chMouseLeftDropEditor", "selt",
[
u"action/none",
u"action/editor/this/paste/files/insert/url/absolute",
u"action/editor/this/paste/files/insert/url/relative",
u"action/editor/this/paste/files/insert/url/tostorage",
u"action/editor/this/paste/files/insert/url/movetostorage",
u"action/editor/this/paste/files/insert/url/ask"
]),
("userEvent_mouse/leftdrop/editor/files/modkeys/shift", "chMouseLeftDropEditorShift", "selt",
[
u"action/none",
u"action/editor/this/paste/files/insert/url/absolute",
u"action/editor/this/paste/files/insert/url/relative",
u"action/editor/this/paste/files/insert/url/tostorage",
u"action/editor/this/paste/files/insert/url/movetostorage",
u"action/editor/this/paste/files/insert/url/ask"
]),
("userEvent_mouse/leftdrop/editor/files/modkeys/ctrl", "chMouseLeftDropEditorCtrl", "selt",
[
u"action/none",
u"action/editor/this/paste/files/insert/url/absolute",
u"action/editor/this/paste/files/insert/url/relative",
u"action/editor/this/paste/files/insert/url/tostorage",
u"action/editor/this/paste/files/insert/url/movetostorage",
u"action/editor/this/paste/files/insert/url/ask"
]),
("timeView_position", "chTimeViewPosition", "seli"),
("timeView_dateFormat", "tfTimeViewDateFormat", "ttdf",
"btnSelectTimeViewDateFormat"),
("timeView_autohide", "cbTimeViewAutoHide", "b"),
("timeView_showWordListOnHovering",
"cbTimeViewShowWordListOnHovering", "b"),
("timeView_showWordListOnSelect",
"cbTimeViewShowWordListOnSelect", "b"),
("timeline_showEmptyDays", "cbTimelineShowEmptyDays", "b"),
("timeline_sortDateAscending", "cbTimelineSortDateAscending", "b"),
("versioning_dateFormat", "tfVersioningDateFormat", "ttdf",
"btnSelectVersioningDateFormat"),
("wikiWideHistory_dateFormat", "tfWikiWideHistoryDateFormat", "ttdf",
"btnSelectWikiWideHistoryDateFormat"),
("newWikiDefault_editor_text_mode",
"cbNewWikiDefaultEditorForceTextMode", "b"),
("newWikiDefault_wikiPageFiles_asciiOnly",
"cbNewWikiDefaultWikiPageFilesAsciiOnly", "b"),
("search_stripSpaces", "cbSearchStripSpaces", "b"),
("search_wiki_searchType", "rboxWwSearchSearchType", "seli"),
("search_wiki_caseSensitive", "cbWwSearchCaseSensitive", "b"),
("search_wiki_wholeWord", "cbWwSearchWholeWord", "b"),
("search_wiki_context_before", "tfWwSearchContextBefore", "i0+"),
("search_wiki_context_after", "tfWwSearchContextAfter", "i0+"),
("search_wiki_count_occurrences", "cbWwSearchCountOccurrences", "b"),
("search_wiki_max_count_occurrences",
"tfWwSearchMaxCountOccurrences", "i0+"),
("incSearch_autoOffDelay", "tfIncSearchAutoOffDelay", "i0+"),
("fastSearch_searchType", "rboxFastSearchSearchType", "seli"),
("fastSearch_caseSensitive", "cbFastSearchCaseSensitive", "b"),
("fastSearch_wholeWord", "cbFastSearchWholeWord", "b"),
("wikiLockFile_ignore", "cbWikiLockFileIgnore", "b"),
("wikiLockFile_create", "cbWikiLockFileCreate", "b"),
("editor_useImeWorkaround", "cbEditorUseImeWorkaround", "b"),
("menu_accels_kbdTranslate", "cbMenuAccelsKbdTranslate", "b"),
("search_dontAllowCancel", "cbSearchDontAllowCancel", "b"),
("editor_compatibility_ViKeys", "cbEditorCompatibilityViKeys", "b"),
("mouse_scrollUnderPointer", "cbMouseScrollUnderPointer", "b"),
("html_preview_reduceUpdateHandling",
"cbHtmlPreviewReduceUpdateHandling", "b"),
("auto_save", "cbAutoSave", "b"),
("auto_save_delay_key_pressed", "tfAutoSaveDelayKeyPressed", "i0+"),
("auto_save_delay_dirty", "tfAutoSaveDelayDirty", "i0+"),
)
# wiki-specific options
# "wiki_wikiLanguage"
# ("footnotes_as_wikiwords", "cbFootnotesAsWws", "b"),
OPTION_TO_CONTROL_WIKI = (
("export_default_dir", "tfExportDefaultDir", "t"),
("tree_expandedNodes_rememberDuration",
"chTreeExpandedNodesRememberDuration", "seli"),
("indexSearch_enabled", "cbIndexSearchEnabled", "b"),
("tabs_maxCharacters", "tfMaxCharactersOnTab", "i0+"),
("template_pageNamesRE", "tfTemplatePageNamesRE", "tre"),
("tree_force_scratchpad_visibility",
"cbTreeForceScratchpadVisibility", "b"),
("option/wiki/log_window_autoshow", "cbLogWindowAutoShowWiki", "b3"),
# The following three need special handling on dialog construction
("wikiPageFiles_asciiOnly", "cbWikiPageFilesAsciiOnly", "b"),
("wikiPageFiles_maxNameLength", "tfWikiPageFilesMaxNameLength", "i0+"),
("wikiPageFiles_gracefulOutsideAddAndRemove",
"cbWikiPageFilesGracefulOutsideAddAndRemove", "b"),
("wiki_icon", "tfWikiIcon", "t"),
("hotKey_showHide_byWiki", "tfHotKeyShowHideByWiki", "t"),
("trashcan_maxNoOfBags", "tfTrashcanMaxNoOfBags", "i0+"),
("trashcan_askOnDelete", "cbTrashcanAskOnDelete", "b"),
("trashcan_storageLocation", "chTrashcanStorageLocation", "seli"),
("first_wiki_word", "tfFirstWikiWord", "t"),
("wiki_onOpen_rebuild", "chWikiOnOpenRebuild", "seli"),
("wiki_onOpen_tabsSubCtrl", "chWikiOnOpenTabsSubCtrl", "selt",
[
u"",
u"preview",
u"textedit"
]),
("wikiPageTitlePrefix", "tfWikiPageTitlePrefix", "t"),
("wikiPageTitle_headingLevel", "scWikiPageTitleHeadingLevel" , "spin"),
("wikiPageTitle_creationMode", "chWikiPageTitleCreationMode", "seli"),
("wikiPageTitle_fromLinkTitle", "cbWikiPageTitleFromLinkTitle", "b"),
("headingsAsAliases_depth", "scHeadingsAsAliasesDepth", "spin"),
("versioning_storageLocation", "chVersioningStorageLocation", "seli"),
("versioning_completeSteps", "tfVersioningCompleteSteps", "i0+"),
("tabHistory_maxEntries", "tfTabHistoryMaxEntries", "i0+"),
("wikiWideHistory_maxEntries", "tfWikiWideHistoryMaxEntries", "i0+"),
("wiki_wikiLanguage", "chWikiWikiLanguage", "wikilang"),
("fileStorage_identity_modDateMustMatch", "cbFsModDateMustMatch", "b"),
("fileStorage_identity_filenameMustMatch", "cbFsFilenameMustMatch", "b"),
("fileStorage_identity_modDateIsEnough", "cbFsModDateIsEnough", "b"),
("fileSignature_timeCoarsening", "tfFileSignatureTimeCoarsening",
"f0+"),
("editor_text_mode", "cbEditorForceTextMode", "b"),
)
# Windows specific options
OPTION_TO_CONTROL_CLIPBOARD_CATCHER = (
("clipboardCatcher_prefix", "tfClipboardCatcherPrefix", "t"),
("clipboardCatcher_suffix", "tfClipboardCatcherSuffix", "t"),
("clipboardCatcher_filterDouble", "cbClipboardCatcherFilterDouble",
"b"),
("clipboardCatcher_userNotification", "chClipCatchUserNotification", "seli"),
("clipboardCatcher_soundFile", "tfClipCatchSoundFile", "t")
)
# Non-Windows specific options
OPTION_TO_CONTROL_NON_WINDOWS_ONLY = (
("fileLauncher_path", "tfFileLauncherPath", "t"),
)
DEFAULT_PANEL_LIST = (
("OptionsPageApplication", N_(u"Application")),
("OptionsPageUserInterface", 2 * u" " + N_(u"User interface")),
("OptionsPageSecurity", 2 * u" " + N_(u"Security")),
("OptionsPageTree", 2 * u" " + N_(u"Tree")),
("OptionsPageHtml", 2 * u" " + N_(u"HTML preview/export")),
("OptionsPageHtmlHeader", 4 * u" " + N_(u"HTML header")),
("OptionsPageEditor", 2 * u" " + N_(u"Editor")),
("OptionsPageEditorColors", 4 * u" " + N_(u"Editor Colors")),
("OptionsPageClipboardCatcher", 4 * u" " + N_(u"Clipboard Catcher")),
("OptionsPageFileLauncher", 2 * u" " + N_(u"File Launcher")),
("OptionsPageMouse", 2 * u" " + N_(u"Mouse")),
("OptionsPageChronView", 2 * u" " + N_(u"Chron. view")),
("OptionsPageSearching", 2 * u" " + N_(u"Searching")),
("OptionsPageNewWikiDefaults", 2 * u" " + N_(u"New wiki defaults")),
("OptionsPageAdvanced", 2 * u" " + N_(u"Advanced")),
("OptionsPageAdvTiming", 4 * u" " + N_(u"Timing")),
("OptionsPageAutosave", 4 * u" " + N_(u"Autosave")),
("??switch mark/current wiki/begin", u""),
("OptionsPageCurrentWiki", N_(u"Current Wiki")),
("OptionsPageCwOnOpen", 2 * u" " + N_(u"On Open")),
("OptionsPageCwHeadings", 2 * u" " + N_(u"Headings")),
("OptionsPageCwChronological", 2 * u" " + N_(u"Chronological")),
("OptionsPageCwWikiLanguage", 2 * u" " + N_(u"Wiki language")),
("??insert mark/current wiki/wiki lang", u""),
("OptionsPageCwAdvanced", 2 * u" " + N_(u"Advanced")),
("??insert mark/current wiki", u""),
("??switch mark/current wiki/end", u"")
)
def __init__(self, pWiki, ID, startPanelName=None, title="Options",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.NO_3D):
d = wx.PreDialog()
self.PostCreate(d)
self.pWiki = pWiki
self.oldSettings = {}
res = wx.xrc.XmlResource.Get()
res.LoadOnDialog(self, self.pWiki, "OptionsDialog")
self.combinedOptionToControl = self.OPTION_TO_CONTROL_GLOBAL
if self.pWiki.isWikiLoaded():
self.combinedOptionToControl += self.OPTION_TO_CONTROL_WIKI
# Hold own copy, it may need modification
self.combinedPanelList = wx.GetApp().getOptionsDlgPanelList()[:]
# Maps ids of the GUI controls named in self.combinedOptionToControl
# to the entries (the appropriate tuple) there
self.idToOptionEntryMap = {}
# Add additional option depending on OS and environment
if OsAbstract.supportsClipboardInterceptor():
self.combinedOptionToControl += self.OPTION_TO_CONTROL_CLIPBOARD_CATCHER
if not SystemInfo.isWindows():
self.combinedOptionToControl += self.OPTION_TO_CONTROL_NON_WINDOWS_ONLY
if not self.pWiki.isWikiLoaded():
# Remove wiki-bound setting pages
try:
del self.combinedPanelList[self.combinedPanelList.index(
("??switch mark/current wiki/begin", u"")) :
self.combinedPanelList.index(
("??switch mark/current wiki/end", u""))]
except ValueError:
pass
# Rewrite panel list depending on OS and environment
newPL = []
for e in self.combinedPanelList:
if isinstance(e[0], basestring):
if e[0] == "OptionsPageFileLauncher" and SystemInfo.isWindows():
# For Windows the OS-function is used, for other systems
# we need the path to an external script
continue
elif e[0] == "OptionsPageClipboardCatcher" and \
not OsAbstract.supportsClipboardInterceptor():
continue
elif e[0].startswith("??"):
# Entry is only a mark for inserting of panels from plugins so skip it
continue
newPL.append(e)
self.combinedPanelList = newPL
# if SystemInfo.isWindows():
# self.combinedOptionToControl += self.OPTION_TO_CONTROL_CLIPBOARD_CATCHER
#
# newPL = []
# for e in self.combinedPanelList:
# if isinstance(e[0], basestring):
# if e[0] == "OptionsPageFileLauncher":
# continue
# if e[0].startswith("??"):
# # Entry is only a mark for insert operations so skip it
# continue
#
# newPL.append(e)
#
# self.combinedPanelList = newPL
# else:
# self.combinedOptionToControl += self.OPTION_TO_CONTROL_NON_WINDOWS_ONLY
#
# newPL = []
# for i, e in enumerate(self.combinedPanelList):
# if isinstance(e[0], basestring):
# if e[0] == "OptionsPageClipboardCatcher":
# continue
# if e[0].startswith("??"):
# # Entry is only a mark for insert operations so skip it
# continue
#
# newPL.append(e)
#
# self.combinedPanelList = newPL
self.ctrls = XrcControls(self)
self.emptyPanel = None
self.panelList = []
self.ctrls.lbPages.Clear()
mainsizer = LayerSizer() # wx.BoxSizer(wx.VERTICAL)
for pn, pt in self.combinedPanelList:
indPt, textPt = splitIndent(pt)
pt = indPt + _(textPt)
if isinstance(pn, basestring):
if pn != "":
panel = ResourceOptionsPanel(self.ctrls.panelPages, pn)
else:
if self.emptyPanel is None:
# Necessary to avoid a crash
self.emptyPanel = DefaultOptionsPanel(self.ctrls.panelPages)
panel = self.emptyPanel
else:
# Factory function or class
panel = pn(self.ctrls.panelPages, self, self.pWiki)
self.panelList.append(panel)
self.ctrls.lbPages.Append(pt)
mainsizer.Add(panel)
self.ctrls.panelPages.SetSizer(mainsizer)
self.ctrls.panelPages.SetMinSize(mainsizer.GetMinSize())
self.ctrls.panelPages.Fit()
self.Fit()
self.ctrls.btnOk.SetId(wx.ID_OK)
self.ctrls.btnCancel.SetId(wx.ID_CANCEL)
# Special options to be prepared before transferring to dialog
self.ctrls.chHtmlPreviewRenderer.optionsDialog_clientData = [0]
if WikiHtmlView.WikiHtmlViewIE is not None:
self.ctrls.chHtmlPreviewRenderer.Append(_(u"IE"))
self.ctrls.chHtmlPreviewRenderer.optionsDialog_clientData.append(1)
self.ctrls.chHtmlPreviewRenderer.Append(_(u"Mozilla"))
self.ctrls.chHtmlPreviewRenderer.optionsDialog_clientData.append(2)
if WikiHtmlView.WikiHtmlViewWK is not None:
self.ctrls.chHtmlPreviewRenderer.Append(_(u"Webkit"))
self.ctrls.chHtmlPreviewRenderer.optionsDialog_clientData.append(3)
self.ctrls.chHtmlPreviewRenderer.Enable(
len(self.ctrls.chHtmlPreviewRenderer.optionsDialog_clientData) > 1)
# Transfer options to dialog
for oct in self.combinedOptionToControl:
o, c, t = oct[:3]
self.idToOptionEntryMap[self.ctrls[c].GetId()] = oct
self.oldSettings[o] = self.pWiki.getConfig().get("main", o)
if t == "b": # boolean field = checkbox
self.ctrls[c].SetValue(
self.pWiki.getConfig().getboolean("main", o))
elif t == "b3": # boolean field = checkbox
value = self.pWiki.getConfig().get("main", o)
if value == "Gray":
self.ctrls[c].Set3StateValue(wx.CHK_UNDETERMINED)
else:
if strToBool(value):
self.ctrls[c].Set3StateValue(wx.CHK_CHECKED)
else:
self.ctrls[c].Set3StateValue(wx.CHK_UNCHECKED)
# self.ctrls[c].SetValue(
# self.pWiki.getConfig().getboolean("main", o))
elif t in ("t", "tre", "ttdf", "tfont0", "tdir", "i0+", "f0+",
"color0"): # text field or regular expression field
self.ctrls[c].SetValue(
uniToGui(self.pWiki.getConfig().get("main", o)) )
elif t == "tes": # Text escaped
self.ctrls[c].SetValue(
unescapeForIni(uniToGui(self.pWiki.getConfig().get(
"main", o))) )
elif t == "seli": # Selection -> transfer index
sel = self.pWiki.getConfig().getint("main", o)
if hasattr(self.ctrls[c], "optionsDialog_clientData"):
# There is client data to take instead of real selection
try:
sel = self.ctrls[c].optionsDialog_clientData.index(sel)
except (IndexError, ValueError):
sel = 0
self.ctrls[c].SetSelection(sel)
elif t == "selt": # Selection -> transfer content string
try:
idx = oct[3].index(self.pWiki.getConfig().get("main", o))
self.ctrls[c].SetSelection(idx)
except (IndexError, ValueError):
self.ctrls[c].SetStringSelection(
uniToGui(self.pWiki.getConfig().get("main", o)) )
elif t == "spin": # Numeric SpinCtrl -> transfer number
self.ctrls[c].SetValue(
self.pWiki.getConfig().getint("main", o))
elif t == "guilang": # GUI language choice
# First fill choice with options
self.ctrls[c].Append(_(u"Default"))
for ls, lt in Localization.getLangList():
self.ctrls[c].Append(lt)
# Then select previous setting
optValue = self.pWiki.getConfig().get("main", o)
self.ctrls[c].SetSelection(
Localization.findLangListIndex(optValue) + 1)
elif t == "wikilang": # wiki language choice
# Fill choice with options and find previous selection
optValue = self.pWiki.getConfig().get("main", o)
sel = -1
for i, ld in enumerate(
wx.GetApp().listWikiLanguageDescriptions()):
self.ctrls[c].Append(ld[1])
if ld[0] == optValue:
sel = i
if sel > -1:
# Then select previous setting
self.ctrls[c].SetSelection(sel)
# Register events for "..." buttons
if t in ("color0", "ttdf", "tfont0", "tdir"):
params = oct[3:]
if len(params) > 0:
# params[0] is name of the "..." button after the text field
dottedButtonId = self.ctrls[params[0]].GetId()
self.idToOptionEntryMap[dottedButtonId] = oct
wx.EVT_BUTTON(self, dottedButtonId,
self.OnDottedButtonPressed)
# Options with special treatment
self.ctrls.cbNewWindowWikiUrl.SetValue(
self.pWiki.getConfig().getint("main",
"new_window_on_follow_wiki_url") != 0)
wikiDocument = self.pWiki.getWikiDocument()
if wikiDocument is not None:
self.ctrls.cbWikiReadOnly.SetValue(
wikiDocument.getWriteAccessDeniedByConfig())
fppCap = wikiDocument.getWikiData().checkCapability("filePerPage")
self.ctrls.cbWikiPageFilesAsciiOnly.Enable(fppCap is not None)
self.ctrls.tfWikiPageFilesMaxNameLength.Enable(fppCap is not None)
self.ctrls.cbWikiPageFilesGracefulOutsideAddAndRemove.Enable(
fppCap is not None)
self.ctrls.chTrashcanStorageLocation.Enable(
fppCap is not None)
self.ctrls.chVersioningStorageLocation.Enable(
fppCap is not None)
self.ctrls.cbEditorForceTextMode.Enable(
fppCap is not None)
self.OnUpdateUiAfterChange(None)
# Now show the right panel
self.activePageIndex = -1
for panel in self.panelList:
panel.Show(False)
panel.Enable(False)
if startPanelName is None:
startPanelName = OptionsDialog._lastShownPanelName
if not self.selectPanelByName(startPanelName):
self.ctrls.lbPages.SetSelection(0)
self._refreshForPage()
# Fixes focus bug under Linux
self.SetFocus()
wx.EVT_LISTBOX(self, GUI_ID.lbPages, self.OnLbPages)
wx.EVT_BUTTON(self, wx.ID_OK, self.OnOk)
wx.EVT_BUTTON(self, GUI_ID.btnSelectFaceHtmlPrev, self.OnSelectFaceHtmlPrev)
wx.EVT_BUTTON(self, GUI_ID.btnSelectClipCatchSoundFile,
lambda evt: self.selectFile(self.ctrls.tfClipCatchSoundFile,
_(u"Wave files (*.wav)|*.wav")))
wx.EVT_BUTTON(self, GUI_ID.btnSelectExportDefaultDir,
lambda evt: self.selectDirectory(self.ctrls.tfExportDefaultDir))
wx.EVT_BUTTON(self, GUI_ID.btnSelectWikiOpenNewDefaultDir,
lambda evt: self.selectDirectory(
self.ctrls.tfWikiOpenNewDefaultDir))
wx.EVT_BUTTON(self, GUI_ID.btnSelectFileLauncherPath,
lambda evt: self.selectFile(self.ctrls.tfFileLauncherPath,
_(u"All files (*.*)|*")))
wx.EVT_CHOICE(self, GUI_ID.chTempHandlingTempMode,
self.OnUpdateUiAfterChange)
wx.EVT_CHECKBOX(self, GUI_ID.cbEditorImageTooltipsLocalUrls,
self.OnUpdateUiAfterChange)
wx.EVT_CHOICE(self, GUI_ID.chEditorImagePasteFileType,
self.OnUpdateUiAfterChange)
wx.EVT_CHOICE(self, GUI_ID.chHtmlPreviewRenderer,
self.OnUpdateUiAfterChange)
wx.EVT_CHECKBOX(self, GUI_ID.cbWwSearchCountOccurrences,
self.OnUpdateUiAfterChange)
wx.EVT_CHECKBOX(self, GUI_ID.cbSingleProcess,
self.OnUpdateUiAfterChange)
def _refreshForPage(self):
if self.activePageIndex > -1:
panel = self.panelList[self.activePageIndex]
if not panel.setVisible(False):
self.ctrls.lbPages.SetSelection(self.activePageIndex)
return
panel.Show(False)
panel.Enable(False)
self.activePageIndex = self.ctrls.lbPages.GetSelection()
panel = self.panelList[self.activePageIndex]
panel.setVisible(True) # Not checking return value here
# Enable appropriate addit. options panel
panel.Enable(True)
panel.Show(True)
def selectPanelByName(self, panelName):
if panelName is None:
return False
for i, e in enumerate(self.combinedPanelList):
if e[0] == panelName:
self.ctrls.lbPages.SetSelection(i)
self._refreshForPage()
return True
return False
def getMainControl(self):
return self.pWiki
def OnLbPages(self, evt):
self._refreshForPage()
evt.Skip()
def OnOk(self, evt):
fieldsValid = True
# First check validity of field contents
for oct in self.combinedOptionToControl:
o, c, t = oct[:3]
if t == "tre":
# Regular expression field, test if re is valid
try:
rexp = guiToUni(self.ctrls[c].GetValue())
re.compile(rexp, re.DOTALL | re.UNICODE | re.MULTILINE)
self.ctrls[c].SetBackgroundColour(wx.WHITE)
except: # TODO Specific exception
fieldsValid = False
self.ctrls[c].SetBackgroundColour(wx.RED)
elif t == "i0+":
# Nonnegative integer field
try:
val = int(guiToUni(self.ctrls[c].GetValue()))
if val < 0:
raise ValueError
self.ctrls[c].SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
self.ctrls[c].SetBackgroundColour(wx.RED)
elif t == "f0+":
# Nonnegative float field
try:
val = float(guiToUni(self.ctrls[c].GetValue()))
if val < 0:
raise ValueError
self.ctrls[c].SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
self.ctrls[c].SetBackgroundColour(wx.RED)
elif t == "color0":
# HTML Color field or empty field
val = guiToUni(self.ctrls[c].GetValue())
rgb = colorDescToRgbTuple(val)
if val != "" and rgb is None:
self.ctrls[c].SetBackgroundColour(wx.RED)
fieldsValid = False
else:
self.ctrls[c].SetBackgroundColour(wx.WHITE)
elif t == "spin":
# SpinCtrl
try:
val = self.ctrls[c].GetValue()
if val < self.ctrls[c].GetMin() or \
val > self.ctrls[c].GetMax():
raise ValueError
self.ctrls[c].SetBackgroundColour(wx.WHITE)
except ValueError:
fieldsValid = False
self.ctrls[c].SetBackgroundColour(wx.RED)
if not fieldsValid:
self.Refresh()
return
# Check each panel
for i, panel in enumerate(self.panelList):
if not panel.checkOk():
# One panel has a problem (probably invalid data)
self.ctrls.lbPages.SetSelection(i)
self._refreshForPage()
return
# Options with special treatment (before standard handling)
wikiDocument = self.pWiki.getWikiDocument()
if wikiDocument is not None and not self.ctrls.cbWikiReadOnly.GetValue():
wikiDocument.setWriteAccessDeniedByConfig(False)
config = self.pWiki.getConfig()
# Then transfer options from dialog to config object
for oct in self.combinedOptionToControl:
o, c, t = oct[:3]
# TODO Handle unicode text controls
if t == "b":
config.set("main", o, repr(self.ctrls[c].GetValue()))
elif t == "b3":
value = self.ctrls[c].Get3StateValue()
if value == wx.CHK_UNDETERMINED:
config.set("main", o, "Gray")
elif value == wx.CHK_CHECKED:
config.set("main", o, "True")
elif value == wx.CHK_UNCHECKED:
config.set("main", o, "False")
elif t in ("t", "tre", "ttdf", "tfont0", "tdir", "i0+", "f0+", "color0"):
config.set( "main", o, guiToUni(self.ctrls[c].GetValue()) )
elif t == "tes":
config.set( "main", o, guiToUni(
escapeForIni(self.ctrls[c].GetValue(), toEscape=u" ")) )
elif t == "seli": # Selection -> transfer index
sel = self.ctrls[c].GetSelection()
if hasattr(self.ctrls[c], "optionsDialog_clientData"):
# There is client data to take instead of real selection
sel = self.ctrls[c].optionsDialog_clientData[sel]
config.set("main", o, unicode(sel))
elif t == "selt": # Selection -> transfer content string
try:
config.set("main", o, oct[3][self.ctrls[c].GetSelection()])
except IndexError:
config.set("main", o,
guiToUni(self.ctrls[c].GetStringSelection()))
elif t == "spin": # Numeric SpinCtrl -> transfer number
config.set( "main", o, unicode(self.ctrls[c].GetValue()) )
elif t == "guilang": # GUI language choice
idx = self.ctrls[c].GetSelection()
if idx < 1:
config.set("main", o, u"")
else:
config.set("main", o,
Localization.getLangList()[idx - 1][0])
elif t == "wikilang": # GUI language choice
idx = self.ctrls[c].GetSelection()
config.set("main", o,
wx.GetApp().listWikiLanguageDescriptions()[idx][0])
# Options with special treatment (after standard handling)
if self.ctrls.cbNewWindowWikiUrl.GetValue():
config.set("main", "new_window_on_follow_wiki_url", "1")
else:
config.set("main", "new_window_on_follow_wiki_url", "0")
if wikiDocument is not None and self.ctrls.cbWikiReadOnly.GetValue():
wikiDocument.setWriteAccessDeniedByConfig(True)
# Ok for each panel
for panel in self.panelList:
panel.handleOk()
config.informChanged(self.oldSettings)
if self.activePageIndex > -1:
OptionsDialog._lastShownPanelName = self.combinedPanelList[
self.activePageIndex][0]
evt.Skip()
def getOldSettings(self):
return self.oldSettings
def OnSelectFaceHtmlPrev(self, evt):
dlg = FontFaceDialog(self, -1, self.pWiki,
self.ctrls.tfFacenameHtmlPreview.GetValue())
if dlg.ShowModal() == wx.ID_OK:
self.ctrls.tfFacenameHtmlPreview.SetValue(dlg.GetValue())
dlg.Destroy()
# def OnSelectPageStatusTimeFormat(self, evt):
# dlg = DateformatDialog(self, -1, self.pWiki,
# deffmt=self.ctrls.tfPageStatusTimeFormat.GetValue())
# if dlg.ShowModal() == wx.ID_OK:
# self.ctrls.tfPageStatusTimeFormat.SetValue(dlg.GetValue())
# dlg.Destroy()
def OnUpdateUiAfterChange(self, evt):
"""
Some controls must be updated (esp. dis-/enabled) after a change.
"""
# If temp. handling is set to "given" directory, field to enter
# directory must be enabled
enabled = self.ctrls.chTempHandlingTempMode.GetSelection() == 2
self.ctrls.tfTempHandlingTempDir.Enable(enabled)
self.ctrls.btnSelectTempHandlingTempDir.Enable(enabled)
# Dimensions of image preview tooltips can only be set if tooltips are
# enabled
enabled = self.ctrls.cbEditorImageTooltipsLocalUrls.GetValue()
self.ctrls.scEditorImageTooltipsMaxWidth.Enable(enabled)
self.ctrls.scEditorImageTooltipsMaxHeight.Enable(enabled)
# If image should be pasted as JPEG, quality can be set
enabled = self.ctrls.chEditorImagePasteFileType.GetSelection() == 2
self.ctrls.tfEditorImagePasteQuality.Enable(enabled)
# If HTML preview is not internal one, allow to set if iframes should
# be shown inside the preview
self.ctrls.cbHtmlPreviewIeShowIframes.Enable(
self.ctrls.chHtmlPreviewRenderer.GetSelection() > 0)
# If occurrences of search terms are counted, allow to set maximum
# number to count up to
self.ctrls.tfWwSearchMaxCountOccurrences.Enable(
self.ctrls.cbWwSearchCountOccurrences.GetValue())
# If single process mode checked, allow to check for other
# WikidPad processes already running
self.ctrls.cbZombieCheck.Enable(self.ctrls.cbSingleProcess.GetValue())
def OnDottedButtonPressed(self, evt):
"""
Called when a "..." button is pressed (for some of them) to show
an alternative way to specify the input, e.g. showing a color selector
for color entries instead of using the bare text field
"""
oct = self.idToOptionEntryMap[evt.GetId()]
o, c, t = oct[:3]
params = oct[3:]
if t == "color0":
self.selectColor(self.ctrls[c])
elif t == "ttdf": # Date/time format
self.selectDateTimeFormat(self.ctrls[c])
elif t == "tfont0": # Font or empty
self.selectFont(self.ctrls[c])
elif t == "tdir":
self.selectDirectory(self.ctrls[c])
def selectColor(self, tfield):
rgb = colorDescToRgbTuple(tfield.GetValue())
if rgb is None:
rgb = 0, 0, 0
color = wx.Colour(*rgb)
colordata = wx.ColourData()
colordata.SetColour(color)
dlg = wx.ColourDialog(self, colordata)
try:
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().GetColour()
if color.Ok():
tfield.SetValue(
rgbToHtmlColor(color.Red(), color.Green(),
color.Blue()))
finally:
dlg.Destroy()
def selectDirectory(self, tfield):
seldir = wx.DirSelector(_(u"Select Directory"),
tfield.GetValue(),
style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON, parent=self)
if seldir:
tfield.SetValue(seldir)
def selectFile(self, tfield, wildcard=u""):
selfile = wx.FileSelector(_(u"Select File"),
tfield.GetValue(), wildcard = wildcard + u"|" + \
_(u"All files (*.*)|*"),
flags=wx.OPEN, parent=self)
if selfile:
tfield.SetValue(selfile)
def selectDateTimeFormat(self, tfield):
dlg = DateformatDialog(self, -1, self.pWiki,
deffmt=tfield.GetValue())
try:
if dlg.ShowModal() == wx.ID_OK:
tfield.SetValue(dlg.GetValue())
finally:
dlg.Destroy()
def selectFont(self, tfield):
fontDesc = tfield.GetValue()
# if fontDesc != u"":
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
# wx.Font() # 1, wx.FONTFAMILY_DEFAULT,
font.SetNativeFontInfoUserDesc(fontDesc)
newFont = wx.GetFontFromUser(self, font) # , const wxString& caption = wxEmptyString)
if newFont is not None and newFont.IsOk():
tfield.SetValue(newFont.GetNativeFontInfoUserDesc())
# GetNativeFontInfoUserDesc
# SetNativeFontInfoUserDesc
|
<reponame>xiaoxiae/Vimvaldi<filename>vimvaldi/components.py
"""The module containing all of the components logic."""
from __future__ import annotations
import curses
import logging # DEBUG; TO BE REMOVED
import os
import sys
from abc import ABC, abstractmethod
from typing import *
from signal import signal, SIGINT
# for suppressing Abjad messages
sys.stdout = open(os.devnull, "w")
import abjad
from vimvaldi.commands import *
# for debug
logging.basicConfig(filename="vimvaldi.log", level=logging.DEBUG)
print = logging.info
# catch SIGINT and prevent it from terminating the script
signal(SIGINT, lambda _, __: None)
class Changeable:
"""A class representing something for which it makes sense to be marked changed."""
changed = True
def has_changed(self) -> bool:
"""Return True if the changeable has changed."""
return self.changed
def set_changed(self, value: bool):
"""Set the state of this changeable."""
self.changed = value
class Component(ABC, Changeable):
"""A class that is inherited by all of the components."""
def handle_keypress(self, key: str) -> List[Command]:
"""Handles a single keypress. Returns the resulting commands. Internally calls
_handle_keypress that the classes define."""
return self._handle_keypress(key) or []
@abstractmethod
def _handle_keypress(self, key) -> Optional[List[Command]]:
"""Main logic of handle_keypress function."""
def handle_command(self, command: Command) -> List[Command]:
"""Handles the given command. Returns the resulting commands. Internally calls
_handle_command that the classes define."""
return self._handle_command(command) or []
@abstractmethod
def _handle_command(self, command: Command) -> Optional[List[Command]]:
"""Main logic of handle_command function."""
@dataclass
class MenuItem:
"""A class for representing an item of a menu."""
label: str
commands: List[Command] # which command to send in response to pressing this item
tooltip: str
class Menu(Component):
"""A class for working with a menu."""
def __init__(self, items: Sequence[Optional[MenuItem]]):
self.index = 0
self.items = items
def __move_index(self, delta):
"""Moves the index of the menu by delta positions (ignoring Nulls)."""
self.index = min(max((self.index + delta), 0), len(self.items) - 1)
# skip the spacers
while self.items[self.index] is None:
self.index = min(
max((self.index + (1 if delta > 0 else -1)), 0), len(self.items) - 1
)
self.set_changed(True)
def next(self):
"""Point to the next item in the menu."""
self.__move_index(1)
def previous(self):
"""Point to the previous item in the menu."""
self.__move_index(-1)
def get_tooltip(self) -> str:
"""Return the tooltip associated with the currently selected menu item."""
return self.items[self.index].tooltip
def get_selected(self) -> MenuItem:
"""Return the currently selected MenuItem object."""
return self.items[self.index]
def update_status_line(self) -> List[Command]:
"""Return the command necessary for the status line to change label."""
return [
ClearStatusLineCommand(),
SetStatusLineTextCommand(self.get_selected().tooltip, Position.CENTER),
]
def _handle_command(self, command: Command) -> Optional[List[Command]]:
"""React to Quit command by quitting."""
if isinstance(command, QuitCommand):
quit()
def _handle_keypress(self, key) -> Optional[List[Command]]:
if key in ("j", 258):
self.next()
return self.update_status_line()
if key in ("k", 259):
self.previous()
return self.update_status_line()
if key == chr(4): # ^D
self.__move_index(3)
return self.update_status_line()
if key == chr(21): # ^U
self.__move_index(-3)
return self.update_status_line()
if key in (curses.KEY_ENTER, "\n", "\r", "l", 261):
return self.get_selected().commands
if key == ":":
return [
ToggleFocusCommand(),
SetStatusLineStateCommand(State.NORMAL),
]
class LogoDisplay(Component):
"""A very simple for displaying the logo."""
def __init__(self, text: str):
self.text = text
def _handle_keypress(self, key) -> Optional[List[Command]]:
"""Go away from the logo when enter is pressed."""
if key in (curses.KEY_ENTER, "\n", "\r"):
return [PopComponentCommand()]
def _handle_command(self, command: Command) -> Optional[List[Command]]:
"""React to Quit command by quitting."""
if isinstance(command, QuitCommand):
quit()
class TextDisplay(Component):
"""A class for displaying scrollable text."""
def __init__(self, text: str):
self.text = text
# the current offset of the text display (by lines)
self.line_offset = 0
def _handle_command(self, command: Command) -> Optional[List[Command]]:
"""React to Quit command by quitting."""
if isinstance(command, QuitCommand):
quit()
def _handle_keypress(self, key) -> Optional[List[Command]]:
if key in ("j", curses.KEY_ENTER, "\n", "\r"):
self.line_offset += 1
self.set_changed(True)
elif key == "k":
self.line_offset -= 1
self.set_changed(True)
elif key == "q":
return [PopComponentCommand()]
class StatusLine(Component):
"""A class for inputting/displaying information for the app."""
# the current state of the status line
current_state = State.NORMAL
def __init__(self):
self.text = ["", "", ""] # left, center, right text
# current position of the cursor on the status line
self.cursor_offset = 0
def set_text(self, position: Position, text: str):
"""Change text at the specified position (left/center/right). Also, if the
position is left, move the cursor to the very end (done when adding a partial
command, for example)."""
self.text[position.value] = text
self.set_changed(True)
if position == Position.LEFT:
self.cursor_offset = len(self.text[position.value])
def clear_text(self, position: Position):
"""Clear the given status line position."""
self.text[position.value] = ""
# also reset the cursor if the left position is changed
if position.value == 0:
self.cursor_offset = 0
self.set_changed(True)
def clear(self):
"""Clear all text from the StatusLine."""
for pos in Position:
self.clear_text(pos)
def _handle_command(self, command: Command) -> Optional[List[Command]]:
if isinstance(command, SetStatusLineTextCommand):
self.set_text(command.position, command.text)
elif isinstance(command, ClearStatusLineCommand):
self.clear()
elif isinstance(command, SetStatusLineStateCommand):
self.current_state = command.state
def _handle_keypress(self, key) -> Optional[List[Command]]:
self.set_changed(True)
# to simplify the code
pos = self.cursor_offset
# backspace: delete previous character
if key in (curses.KEY_BACKSPACE, "\b", chr(127)):
# delete when it's not in the first position
if pos > 0:
self.text[0] = self.text[0][: pos - 1] + self.text[0][pos:]
self.cursor_offset -= 1
# if there is no text left, transfer focus
else:
if len(self.text[0]) == 0:
self.clear_text(Position.LEFT)
return [ToggleFocusCommand()]
# delete: delete next character
elif key == curses.KEY_DC:
self.text[0] = self.text[0][:pos] + self.text[0][pos + 1 :]
# escape: clear and transfer focus
elif isinstance(key, str) and ord(key) == 27: # esc
self.clear()
return [ToggleFocusCommand()]
# left: move cursor to the left
elif key == curses.KEY_LEFT: # move cursor left
self.cursor_offset = max(1, pos - 1)
# right: move cursor to the right
elif key == curses.KEY_RIGHT: # move cursor right
self.cursor_offset = min(len(self.text[0]), pos + 1)
# ctrl + right: move by words
elif key == 553:
space_pos = self.text[0].rfind(" ", 0, self.cursor_offset - 1)
self.cursor_offset = space_pos + 1 if space_pos != -1 else 1
# ctrl + left: move by words
elif key == 568: # ctrl + right
space_pos = self.text[0].find(" ", self.cursor_offset + 1)
self.cursor_offset = space_pos if space_pos != -1 else len(self.text[0])
# home: move to position 0
elif key == curses.KEY_HOME:
self.cursor_offset = 0
# end: move to the last position
elif key == curses.KEY_END:
self.cursor_offset = len(self.text[0])
# execute the command on enter
elif key in (curses.KEY_ENTER, "\n", "\r"):
# always toggle focus
commands = [ToggleFocusCommand()]
# get and clear the text
text = self.text[0]
self.clear_text(Position.LEFT)
# send an insert command if the mode is insert
if self.current_state is State.INSERT:
commands.append(InsertCommand(text))
# else parse the various : commands
elif self.current_state is State.NORMAL:
command = text.strip()
command_parts = command.split()
# return if there isn't anything in the command line
if len(command_parts) == 0:
return commands
# set command
if command_parts[0] == "set":
rest = command[len(command_parts[0]) :].strip()
# set a=b
if rest.count("=") == 1:
commands.append(SetCommand(*rest.split("=")))
# set a b
elif len(command_parts) >= 3:
option = command_parts[1]
value = rest[len(command_parts[1]) :].strip()
commands.append(SetCommand(option, value))
else:
commands[0].suppress_clear = True
return commands + [
SetStatusLineTextCommand(
"Invalid 'set' format.", Position.CENTER
)
]
# help and info screens from anywhere
if command in ("help", "info"):
commands.append(PushComponentCommand(command))
if command in ("q", "quit"):
commands += [QuitCommand()]
if command in ("q!", "quit!"):
commands += [QuitCommand(forced=True)]
# whatever is left after anything after w is stripped
possible_path = command[len(command_parts[0]) :].strip()
if command_parts[0] in ("n", "new"):
commands += [NewCommand()]
if command_parts[0] in ("n!", "new!"):
commands += [NewCommand(forced=True)]
if command_parts[0] in ("w", "write"):
commands += [SaveCommand(path=possible_path)]
if command_parts[0] in ("w!", "write!"):
commands += [SaveCommand(forced=True, path=possible_path)]
if command_parts[0] in ("o", "open"):
commands += [OpenCommand(path=possible_path)]
if command_parts[0] in ("o!", "open!"):
commands += [OpenCommand(forced=True, path=possible_path)]
if command_parts[0] == "wq":
commands += [SaveCommand(path=possible_path), QuitCommand()]
if command_parts[0] == "wq!":
commands += [
SaveCommand(forced=True, path=possible_path),
QuitCommand(),
]
return commands
else:
# else add the character to the first position
self.text[0] = self.text[0][:pos] + str(key) + self.text[0][pos:]
self.cursor_offset += len(str(key))
class Editor(Component):
"""A class for working with the notesheet."""
def __init__(self):
self.__initialize_score()
def __initialize_score(self):
"""Initialize a default score."""
# internal note representation (with some defaults)
self.score = abjad.Score(simultaneous=False)
self.key = abjad.KeySignature("c", "major")
self.clef = abjad.Clef("treble")
self.time = abjad.TimeSignature((4, 4))
self.position = 0 # position within the container
self.current_file_path = None # the file to which to save
self.changed_since_saving = False
self.previous_repeatable_command = None # the previous command (to repeat on .)
self.deleted_items = [] # last deleted items (to be possibly pasted back)
def get_score(self) -> abjad.Container:
"""Return the abjad container that stores the notes."""
return self.score
def _handle_keypress(self, key) -> Optional[List[Command]]:
if key == ":":
return [
ToggleFocusCommand(),
SetStatusLineStateCommand(State.NORMAL),
]
if key == "i":
return [
ToggleFocusCommand(),
SetStatusLineStateCommand(State.INSERT),
]
if key == ".":
self.set_changed(True)
return self._handle_command(self.previous_repeatable_command)
if key in ("l", 261):
self.set_changed(True)
self.position = min(len(self.score), self.position + 1)
if key in ("h", 260):
self.set_changed(True)
self.position = max(0, self.position - 1)
if key == "x":
if self.position != len(self.score):
self.deleted_items = [self.score.pop(self.position)]
self.set_changed(True)
if key == "p":
for item in self.deleted_items:
self.score.insert(self.position, type(item)(item))
self.position += 1
self.set_changed(True)
def __save_path_valid(self, path: str) -> List[Command]:
"""Checks, whether we can save to this path -- if it either doesn't exist or
it matches the self.current_file_path path. Returns the appropriate commands if it
doesn't."""
return (
[SetStatusLineTextCommand("The file already exists.", Position.CENTER)]
if os.path.isfile(path) and path != self.current_file_path
else []
)
def _handle_command(self, command: Command) -> Optional[List[Command]]:
if isinstance(command, InsertCommand):
return self.__handle_insert_command(command)
elif isinstance(command, SaveCommand):
return self.__handle_save_command(command)
elif isinstance(command, QuitCommand):
return self.__handle_quit_command(command)
elif isinstance(command, OpenCommand):
return self.__handle_open_command(command)
elif isinstance(command, NewCommand):
return self.__handle_new_command(command)
elif isinstance(command, SetCommand):
return self.__handle_set_command(command)
def __handle_set_command(self, command: SetCommand) -> List[Command]:
"""Handle set commands."""
try:
if command.option == "clef":
self.clef = abjad.Clef(command.value)
elif command.option == "time":
pair = command.value.split("/" if "/" in command.value else " ")
self.time = abjad.TimeSignature(tuple(map(int, pair)))
elif command.option == "key":
self.key = abjad.KeySignature(*command.value.split(" "))
else:
return [
SetStatusLineTextCommand(
f"Invalid option '{command.option}'.", Position.CENTER
)
]
except Exception as e:
return [
SetStatusLineTextCommand(
f"Could not parse '{command.value}' as '{command.option}'",
Position.CENTER,
)
]
return [SetStatusLineTextCommand(f"'{command.option}' set.", Position.CENTER,)]
def __handle_insert_command(self, command: InsertCommand) -> List[Command]:
"""Attempt to parse whatever the InsertCommand contains. Return either [] if
successful or a command that sets the status line text to what happened."""
text = command.text
if len(text) == 0:
return
try:
# objects to add
objects = []
for item in text.split(";"):
item = item.strip()
if item[0] == "r":
obj = abjad.Rest(item)
elif item[0] == "<":
obj = abjad.Chord(item)
else:
obj = abjad.Note(item)
objects.append(obj)
for obj in objects:
self.score.insert(self.position, obj)
self.position += 1
self.changed_since_saving = True
self.previous_repeatable_command = command
except Exception as e:
return [
SetStatusLineTextCommand(
"The string could not be parsed.", Position.CENTER
)
]
def __handle_save_command(self, command: SaveCommand) -> List[Command]:
path = command.path # the path to save file to
previous_save_file = self.current_file_path
if path is None:
# if there isn't a file currently open, warn
if self.current_file_path is None:
return [self.__get_empty_name_warning()]
# else set the command file to the current file
else:
path = self.current_file_path
file_status = self.__save_path_valid(path)
if len(file_status) != 0 and not command.forced:
return file_status
self.current_file_path = path
# attempt to write the score to the file
try:
with open(self.current_file_path, "w") as f:
sys.stdout = f # abjad prints to stdout and we don't want that
abjad.f(self.score)
self.changed_since_saving = False
except Exception as e:
# restore the previous file name if something went amiss (we didn't save...)
self.current_file_path = previous_save_file
# TODO: BETTER EXCEPTIONS
return [SetStatusLineTextCommand("Error writing to file.", Position.CENTER)]
# if everything went fine, let the user know
return [
self.get_file_name_command(),
SetStatusLineTextCommand("Saved.", Position.CENTER),
]
def __handle_new_command(self, command: NewCommand) -> List[Command]:
"""Discard current work in favour of a new file."""
if self.changed_since_saving and not command.forced:
return [self.__get_unsaved_changes_warning()]
self.__initialize_score()
def __handle_open_command(self, command: OpenCommand) -> List[Command]:
"""Attempt to open the specified file."""
path = command.path
if self.changed_since_saving and not command.forced:
return [self.__get_unsaved_changes_warning()]
if path is None:
return [self.__get_empty_name_warning()]
# attempt to read the score from
try:
with open(path, "r") as f:
self.score = abjad.Score(f.read())[0]
self.changed_since_saving = False
self.current_file_path = path
except Exception as e:
return [
SetStatusLineTextCommand("Error reading the file.", Position.CENTER,)
]
return [
self.get_file_name_command(),
SetStatusLineTextCommand("Opened.", Position.CENTER,),
]
def __handle_quit_command(self, command: QuitCommand) -> List[Command]:
"""Quit (if there are either no unsaved changes or the command is forced), else
warn about there being unsaved changes."""
if not self.changed_since_saving or command.forced:
quit()
return [self.__get_unsaved_changes_warning()]
def __get_unsaved_changes_warning(self) -> Command:
"""Return the warning command issued when there are unsaved changes."""
text = "Unsaved changes (maybe append '!'?)."
return SetStatusLineTextCommand(text, Position.CENTER)
def __get_empty_name_warning(self) -> Command:
return SetStatusLineTextCommand("No file name.", Position.CENTER)
def get_file_name_command(self) -> Command:
"""Return the appropriate command for changing the label of the status line to
the currently opened file."""
return (
SetStatusLineTextCommand("[no file]", Position.RIGHT)
if self.current_file_path is None
else SetStatusLineTextCommand(f"[{self.current_file_path}]", Position.RIGHT)
)
|
import enum
from typing import List
from sqlalchemy.orm.session import Session
from newsbot.core.constant import SourceName, SourceType
from newsbot.core.sql import database
from newsbot.core.sql import tables
from newsbot.core.sql.tables import ITables, Sources
from newsbot.core.sql.exceptions import FailedToAddToDatabase
class SourcesTable(ITables):
def __init__(self, session: Session) -> None:
self.setSession(session)
def setSession(self, session: Session) -> None:
self.s = session
def __convertFromEnum__(self, item:Sources) -> Sources:
t = item.source.__class__
if t == SourceName:
item.source = item.source.value
try:
item.type = item.type.value
except:
item.type = ''
return item
def __convertToEnum__(self, item: Sources) -> Sources:
item.source = SourceName.fromString(item.source)
item.type = SourceType.fromString(item.type)
return item
def __len__(self) -> int:
l = list()
try:
for res in self.s.query(Sources):
l.append(res)
except Exception as e:
pass
return len(l)
def clone(self, item: Sources) -> Sources:
"""
Takes the given object and makes a new object without the Session info.
"""
return Sources(
id = item.id,
name = item.name,
source= item.source,
type= item.type,
value= item.value,
enabled= item.enabled,
url= item.url,
tags=item.tags,
fromEnv=item.fromEnv
)
def add(self, item: Sources, session: Session = '') -> None:
if session != '':
self.s = session
try:
self.s.add(item)
self.s.commit()
self.s.close()
except FailedToAddToDatabase as e:
print(f"Failed to add {item.name} to Source table! {e}")
def update(self, item: Sources) -> None:
try:
exists = self.findByNameandSource(name=item.name, source=item.source)
if exists.source != "":
exists.name = item.name
exists.source = item.source
exists.type = item.type
exists.value = item.value
exists.enabled = item.enabled
exists.url = item.url
exists.tags = item.tags
self.add(exists)
else:
self.add(item)
except Exception as e:
print(e)
def updateId(self, id: str) -> None:
raise NotImplementedError
try:
self.clearSingle(id=id)
d = Sources(
name=self.name,
source=self.source,
url=self.url,
type=self.type,
value=self.value,
tags=self.tags,
enabled=self.enabled,
fromEnv=self.fromEnv
)
d.id = id
d.add()
except Exception as e:
print(f"Failed to update")
print(e)
pass
def findAllBySource(self, source: str) -> List[Sources]:
hooks = list()
try:
for res in self.s.query(Sources).filter(Sources.source.contains(source)):
hooks.append(self.__convertToEnum__(res))
#hooks.append(res)
except Exception as e:
pass
return hooks
def findAll(self) -> List[Sources]:
hooks = list()
try:
for res in self.s.query(Sources):
hooks.append(res)
except Exception as e:
pass
finally:
return hooks
def findAllBySource(self, source: str) -> List[Sources]:
hooks = list()
try:
for res in self.s.query(Sources).filter(Sources.source.contains(source)):
hooks.append(res)
except Exception as e:
pass
return hooks
def findAllByName(self, name: str ) -> List[Sources]:
hooks = list()
try:
for res in self.s.query(Sources).filter(Sources.name.contains(name)):
hooks.append(res)
except Exception as e:
pass
return hooks
def findById(self, id: str) -> Sources:
try:
for res in self.s.query(Sources).filter(Sources.id.contains(id)):
return res
except Exception as e:
pass
return None
def findByName(self, name: str) -> Sources:
try:
for res in self.s.query(Sources).filter(Sources.name.contains(name)):
return res
except Exception as e:
pass
return Sources()
def findByNameandSource(self, name: str, source: str) -> Sources:
try:
for d in self.s.query(Sources).filter(
Sources.name.contains(name),
Sources.source.contains(source)
):
return d
except Exception as e:
print(f'SQL Warning - Sources {e}')
pass
return Sources()
def findBySourceNameType(self, source: str, name: str, type: str) -> Sources:
hooks: List[Sources] = list()
try:
for res in self.s.query(Sources).filter(
Sources.source.contains(source),
Sources.name.contains(name),
Sources.type.contains(type)
):
return res
except Exception as e:
pass
return Sources()
def findBySourceAndName(self, source: str, name: str ) -> Sources:
try:
for res in self.s.query(Sources).filter(
Sources.source.contains(source),
Sources.name.contains(name)
):
return res
except Exception as e:
pass
return Sources()
def findByNameSourceType(self, name: str, source: str, type:str ) -> Sources:
try:
for d in self.s.query(Sources).filter(
Sources.name.contains(name),
Sources.source.contains(source),
Sources.type.contains(type)
):
return d
except Exception as e:
pass
return Sources()
def clearTable(self) -> None:
try:
for d in self.s.query(Sources):
self.s.delete(d)
self.s.commit()
except Exception as e:
print(f"{e}")
def clearSingle(self, id: str) -> bool:
"""
This will remove a single entry from the table by its ID value.
"""
result: bool = False
try:
for i in self.s.query(Sources).filter(Sources.id == id):
self.s.delete(i)
self.s.commit()
result = True
except Exception as e:
print(e)
return result
def toListDict(self, items: List[Sources]) -> List[dict]:
l = list()
for i in items:
l.append(self.toDict(i))
return l
def toDict(self, item: Sources) -> dict:
d = {
'id': item.id,
'name': item.name,
'source': item.source,
'type': item.type,
'value': item.value,
'enabled': item.enabled,
'url': item.url,
'tags': item.tags,
"fromEnv": item.fromEnv
}
return d
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.decoder.tf_decoder import TFGraph, TFGraphNode
from x2paddle.core.program import PaddleGraph
from x2paddle.core.op_mapper import OpMapper
from x2paddle.core.util import *
import traceback
import math
import inspect
import numpy
import sys
name_counter = dict()
def gen_name(op_name, var_name):
name = "{}_{}".format(op_name, var_name)
if name not in name_counter:
name_counter[name] = 0
else:
name_counter[name] += 1
name = name + '_' + str(name_counter[name])
return name
# compute padding size for SAME mode
def get_same_padding(in_size, kernel_size, stride):
new_size = int(math.ceil(in_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + kernel_size - in_size
if pad_size < 0:
pad_size = 0
pad0 = int(pad_size / 2)
pad1 = pad_size - pad0
return [pad0, pad1]
class TFOpMapper(OpMapper):
directly_map_ops = {
'Relu': ['paddle.nn.ReLU'],
'Relu6': ['paddle.nn.ReLU6'],
'Abs': ['paddle.abs'],
'Sigmoid': ['paddle.nn.Sigmoid'],
'Exp': ['paddle.exp'],
'Rsqrt': ['paddle.rsqrt'],
'Sqrt': ['paddle.sqrt'],
'swish_f32': ['paddle.nn.Swish'],
'Tanh': ['paddle.nn.Tanh'],
'Softplus': ['paddle.nn.Softplus'],
'LeakyRelu': ['paddle.nn.LeakyReLU', dict(alpha='negative_slope')],
'Softmax': ['paddle.nn.Softmax'],
'Floor': ['paddle.floor'],
'Erf': ['paddle.erf'],
'Square': ['paddle.square']
}
elementwise_ops = {
'Add': 'paddle.add',
'AddV2': 'paddle.add',
'RealDiv': 'paddle.divide',
'DivNoNan': 'paddle.divide',
'Sub': 'paddle.subtract',
'Maximum': 'paddle.maximum',
'Minimum': 'paddle.minimum',
'Mul': 'paddle.multiply',
'FloorDiv': 'paddle.floor_divide',
'FloorMod': 'paddle.floor_mod',
'LogicalAnd': 'logical_and',
}
bool_ops = {
'LessEqual': 'paddle.less_equal',
'GreaterEqual': 'paddle.greater_equal',
'Greater': 'paddle.greater_than',
'NotEqual': 'paddle.not_equal',
'Equal': 'paddle.equal',
}
def __init__(self, decoder):
super(TFOpMapper, self).__init__()
self.decoder = decoder
self.graph = decoder.tf_graph
if not self.op_checker():
raise Exception("Model is not supported yet.")
self.params = dict()
self.nn_name2id = dict()
self.input_index = 0
self.inputs_info = dict()
self.paddle_graph = PaddleGraph(
parent_layer=None, graph_type="dygraph", source_type="tf")
self.paddle_graph.outputs = self.graph.output_nodes
not_placeholder = list()
for name in self.graph.input_nodes:
if self.graph.get_node(
name).layer_type != "Placeholder" and self.graph.get_node(
name
).layer_type != "OneShotIterator" and self.graph.get_node(
name).layer_type != "IteratorV2":
not_placeholder.append(name)
for name in not_placeholder:
idx = self.graph.input_nodes.index(name)
del self.graph.input_nodes[idx]
print("Total nodes: {}".format(
sum([
isinstance(node, TFGraphNode)
for name, node in self.graph.node_map.items()
])))
print("Nodes converting ...")
for i, node_name in enumerate(self.graph.topo_sort):
sys.stderr.write("\rConverting node {} ... ".format(i + 1))
node = self.graph.get_node(node_name)
op = node.layer_type
if op in self.directly_map_ops:
self.directly_map(node)
elif op in self.elementwise_ops:
self.elementwise_map(node)
elif op in self.bool_ops:
self.bool_map(node)
elif hasattr(self, op):
func = getattr(self, op)
func(node)
print("\nNodes converted.")
self.paddle_graph.set_name(self.graph.graph_name)
self.paddle_graph.set_parameters(self.params)
self.paddle_graph.set_inputs_info(self.inputs_info)
def op_checker(self):
unsupported_ops = set()
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
op = node.layer_type
if not hasattr(self, op) and \
op not in self.directly_map_ops and \
op not in self.elementwise_ops and \
op not in self.bool_ops:
unsupported_ops.add(op)
if len(unsupported_ops) == 0:
return True
else:
if len(unsupported_ops) > 0:
print("\n========= {} OPs are not supported yet ===========".
format(len(unsupported_ops)))
for op in unsupported_ops:
print("========== {} ============".format(op))
return False
def directly_map(self, node):
inputs = node.layer.input
assert len(inputs) == 1, 'directly_map error with multi inputs'
op_info = self.directly_map_ops[node.layer_type]
input = self.graph.get_input_node(node, 0)
paddle_op = op_info[0]
layer_attrs = dict()
if len(op_info) > 1:
attrs_name_map_dict = op_info[1]
for tf_attr_name, pd_attr_name in attrs_name_map_dict.items():
layer_attrs[pd_attr_name] = node.get_attr(tf_attr_name)
if paddle_op.startswith("paddle.nn"):
op_name = paddle_op[10:].lower()
op_name = name_generator(op_name, self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs={"x": input.name},
outputs=layer_outputs,
**layer_attrs)
else:
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs={"x": input.name},
outputs=[node.name],
**layer_attrs)
def elementwise_map(self, node, op_type=None):
if op_type is None:
assert node.layer_type in self.elementwise_ops
op_type = self.elementwise_ops[node.layer_type]
x = self.graph.get_input_node(node, 0)
y = self.graph.get_input_node(node, 1)
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
kernel=op_type,
inputs={"x": x.name,
"y": y.name},
outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def bool_map(self, node):
op_type = self.bool_ops[node.layer_type]
self.elementwise_map(node, op_type)
node.set_dtype("bool")
def Placeholder(self, node):
shape = node.out_shapes[0]
assert len(shape) != 0, "Unknown shape of input nodes[{}].".format(
node.layer_name)
dtype = node.dtype
self.paddle_graph.add_layer(
kernel="paddle.to_tensor",
inputs={},
outputs=[node.name],
data="x{}".format(self.input_index))
self.inputs_info["x{}".format(self.input_index)] = [shape, node.dtype]
self.input_index += 1
def Const(self, node):
shape = node.out_shapes[0]
dtype = node.dtype
value = node.value
if len(shape) == 0:
assert value.size == 1, "Unexpected situation happend"
if value == float('inf'):
value = "float('inf')"
self.paddle_graph.add_layer(
"paddle.full",
inputs={},
outputs=[node.name],
dtype=string(dtype),
shape=[1],
fill_value=value)
return
self.params[node.name] = node.value
if 0 not in shape:
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[node.name],
shape=shape,
attr=string(node.name),
dtype=string(dtype),
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
def Transpose(self, node):
input = self.graph.get_input_node(node, 0)
perm = self.graph.get_input_node(node, 1)
if perm.layer_type == "Const":
perm = perm.value.tolist()
else:
perm = self.decoder.infer_tensor(
perm, use_diff_inputs=False).tolist()
self.paddle_graph.add_layer(
"paddle.transpose",
inputs={"x": input.name},
outputs=[node.name],
perm=perm)
def Where(self, node):
if len(node.layer.input) == 1:
cond = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
"paddle.nonzero", inputs={"x": cond.name}, outputs=[node.name])
else:
cond = self.graph.get_input_node(node, 0)
x = self.graph.get_input_node(node, 1)
y = self.graph.get_input_node(node, 2)
self.paddle_graph.add_layer(
"paddle.where",
inputs={"condition": cond.name,
"x": x.name,
"y": y.name},
outputs=[node.name])
def Neg(self, node):
input = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
"paddle.scale",
inputs={"x": input.name},
outputs=[node.name],
scale=-1)
def Fill(self, node):
dims = self.graph.get_input_node(node, 0)
input_value = self.graph.get_input_node(node, 1)
inputs = dict()
layer_attrs = dict()
assert input_value.layer_type == "Const", "Value of fill OP should be Const"
if dims.layer_type == "Const":
layer_attrs["shape"] = dims.value.tolist()
else:
inputs["shape"] = dims.name
layer_attrs["dtype"] = string(input_value.dtype)
layer_attrs["fill_value"] = input_value.value
self.paddle_graph.add_layer(
"paddle.full", inputs=inputs, outputs=[node.name], **layer_attrs)
def DepthToSpace(self, node):
input = self.graph.get_input_node(node, 0)
block_size = node.get_attr("block_size")
data_format = node.get_attr("data_format").decode()
if data_format == "NHWC":
n, h, w, c = input.out_shapes[0]
else:
n, c, h, w = input.out_shapes[0]
input_name = input.name
if data_format == "NHWC":
transpose_name = gen_name("depth_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
shape = [0, block_size * block_size, -1, h, w]
reshape_name = gen_name("depth_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[reshape_name],
shape=shape)
transpose_name = gen_name("depth_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=[0, 2, 1, 3, 4])
reshape_name = gen_name("depth_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[reshape_name],
shape=[0, c, h, w])
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pixel_shuffle",
inputs={"x": reshape_name},
outputs=[node.name],
upscale_factor=block_size)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def MaxPool(self, node):
input = self.graph.get_input_node(node, 0)
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
input_name = input.name
if data_format == "NHWC":
transpose_name = gen_name("max_pool", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
input_name = transpose_name
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
self.paddle_graph.add_layer(
kernel="paddle.nn.MaxPool2D",
inputs={"input": input_name},
outputs=layer_outputs,
kernel_size=k_size[2:4],
stride=strides[2:4],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Conv2D(self, node):
op_name = name_generator("conv", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
input = self.graph.get_input_node(node, 0)
kernel = self.graph.get_input_node(node, 1)
k_size = kernel.out_shapes[0]
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
if data_format == "NHWC":
n, h, w, c = input.out_shapes[0]
else:
n, c, h, w = input.out_shapes[0]
if kernel.layer_type == 'Const':
kernel_value = kernel.value
else:
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
kernel_weight_name = op_name + ".weight"
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(3, 2, 0, 1))
input_name = input.name
if data_format == "NHWC":
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name("conv2d", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
if c == -1:
attr = {"shape": [0, k_size[2], 0, 0]}
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[input_name],
shape=[0, k_size[2], 0, 0])
self.paddle_graph.add_layer(
kernel="paddle.nn.Conv2D",
inputs={"input": input_name},
outputs=layer_outputs,
weight_attr=string(kernel_weight_name),
bias_attr=False,
in_channels=k_size[2],
out_channels=k_size[3],
kernel_size=k_size[0:2],
stride=strides[2:4],
dilation=dilations[2:4],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Conv3D(self, node):
op_name = name_generator("conv", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
input = self.graph.get_input_node(node, 0)
kernel = self.graph.get_input_node(node, 1)
k_size = kernel.out_shapes[0]
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
if data_format == "NDHWC":
n, d, h, w, c = input.out_shapes[0]
else:
n, c, d, h, w = input.out_shapes[0]
if kernel.layer_type == 'Const':
kernel_value = kernel.value
else:
kernel_value = self.decoder.infer_tensor(
kernel, use_diff_inputs=False)
kernel_weight_name = op_name + ".weight"
self.params[kernel_weight_name] = numpy.transpose(kernel_value,
(4, 3, 0, 1, 2))
input_name = input.name
if data_format == "NDHWC":
strides = [strides[i] for i in [0, 4, 1, 2, 3]]
dilations = [dilations[i] for i in [0, 4, 1, 2, 3]]
transpose_name = gen_name("conv3d", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 4, 1, 2, 3])
input_name = transpose_name
if c == -1:
attr = {"shape": [0, k_size[2], 0, 0, 0]}
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[input_name],
shape=[0, k_size[2], 0, 0, 0])
self.paddle_graph.add_layer(
kernel="paddle.nn.Conv3D",
inputs={"input": input_name},
outputs=layer_outputs,
weight_attr=string(kernel_weight_name),
bias_attr=False,
in_channels=k_size[3],
out_channels=k_size[4],
kernel_size=k_size[0:3],
stride=strides[2:5],
dilation=dilations[2:5],
padding=string(pad_mode))
if data_format == "NDHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 4, 1])
def BiasAdd(self, node):
input = self.graph.get_input_node(node, 0)
bias = self.graph.get_input_node(node, 1)
self.paddle_graph.add_layer(
kernel="paddle.add",
inputs={"x": input.name,
"y": bias.name},
outputs=[node.name])
def FusedBatchNorm(self, node):
op_name = name_generator("bn", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
input = self.graph.get_input_node(node, 0)
gamma = self.graph.get_input_node(node, 1)
beta = self.graph.get_input_node(node, 2)
moving_mean = self.graph.get_input_node(node, 3)
moving_var = self.graph.get_input_node(node, 4)
data_format = node.get_attr("data_format").decode()
assert gamma.layer_type == "Const"
assert beta.layer_type == "Const"
assert moving_mean.layer_type == "Const"
assert moving_var.layer_type == "Const"
input_name = input.name
if data_format == "NHWC":
transpose_name = gen_name("batch_norm", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
n, h, w, c = input.out_shapes[0]
else:
n, c, h, w = input.out_shapes[0]
self.params["{}_{}".format(node.name, gamma.name)] = self.params[
gamma.name]
self.params["{}_{}".format(node.name, beta.name)] = self.params[
beta.name]
self.params["{}_{}".format(node.name, moving_mean.name)] = self.params[
moving_mean.name]
self.params["{}_{}".format(node.name, moving_var.name)] = self.params[
moving_var.name]
self.paddle_graph.add_layer(
kernel="paddle.nn.BatchNorm",
inputs={"input": input_name},
outputs=layer_outputs,
num_channels=c,
epsilon=node.get_attr("epsilon"),
param_attr=string("{}_{}".format(node.name, gamma.name)),
bias_attr=string("{}_{}".format(node.name, beta.name)),
moving_mean_name=string("{}_{}".format(node.name,
moving_mean.name)),
moving_variance_name=string("{}_{}".format(node.name,
moving_var.name)),
is_test=True)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def FusedBatchNormV3(self, node):
self.FusedBatchNorm(node)
def Mean(self, node):
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
dims = reduce_idx.value.tolist()
keep_dims = node.get_attr("keep_dims")
self.paddle_graph.add_layer(
kernel="paddle.mean",
inputs={"x": input.name},
outputs=[node.name],
axis=dims,
keepdim=keep_dims)
def Reshape(self, node):
input = self.graph.get_input_node(node, 0)
param = self.graph.get_input_node(node, 1)
input_name = input.name
if param.layer_type == "Const":
shape = param.value.tolist()
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[node.name],
shape=shape)
else:
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name,
"shape": param.name},
outputs=[node.name])
if param.layer_type != "Const":
out_shape = numpy.array(node.out_shapes[0])
if (out_shape > 0).any():
out_shape[out_shape < 0] = 0
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": node.name},
outputs=[node.name],
shape=out_shape.tolist())
def Pad(self, node):
input = self.graph.get_input_node(node, 0)
paddings = self.graph.get_input_node(node, 1)
assert paddings.layer_type == "Const", "Padding should be Const"
paddings = paddings.value.flatten().tolist()
constant_values = 0
if len(node.layer.input) > 2:
constant_values = self.graph.get_input_node(node, 2)
assert constant_values.layer_type == "Const", "Padding should be Const"
constant_values = constant_values.value
if len(paddings) == 8 and sum(paddings[:2]) == 0 \
and sum(paddings[-2:]) == 0:
paddings = paddings[2: -2]
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pad",
inputs={"x": input.name},
outputs=[node.name],
pad=paddings,
value=constant_values,
data_format=string('NHWC'))
else:
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pad",
inputs={"x": input.name},
outputs=[node.name],
pad=paddings,
value=constant_values)
def MirrorPad(self, node):
self.Pad(node)
def PadV2(self, node):
self.Pad(node)
def Squeeze(self, node):
input = self.graph.get_input_node(node, 0)
squeeze_dims = node.get_attr('squeeze_dims')
self.paddle_graph.add_layer(
kernel="paddle.squeeze",
inputs={"x": input.name},
outputs=[node.name],
axis=squeeze_dims)
def Shape(self, node):
input = self.graph.get_input_node(node, 0)
input_name = input.name
self.paddle_graph.add_layer(
kernel="paddle.shape",
inputs={"input": input_name},
outputs=[node.name])
def Size(self, node):
input = self.graph.get_input_node(node, 0)
input_name = input.name
self.paddle_graph.add_layer(
kernel="paddle.shape",
inputs={"input": input_name},
outputs=[node.name])
self.paddle_graph.add_layer(
kernel="paddle.prod", inputs={"x": node.name}, outputs=[node.name])
def Ceil(self, node):
input = self.graph.get_input_node(node, 0)
self.paddle_graph.add_layer(
kernel="paddle.ceil", inputs={"x": input.name},
outputs=[node.name])
def ArgMax(self, node):
input = self.graph.get_input_node(node, 0)
axis = self.graph.get_input_node(node, 1)
assert axis.layer_type == "Const", "ArgMax only support Const parameter"
axis = axis.value
self.paddle_graph.add_layer(
kernel="paddle.argmax",
inputs={"x": input.name},
outputs=[node.name],
axis=axis)
def TopKV2(self, node):
input = self.graph.get_input_node(node, 0)
k = self.graph.get_input_node(node, 1)
assert k.layer_type == "Const", "ArgMax only support Const parameter"
k = k.value
sort = node.get_attr('sorted')
self.paddle_graph.add_layer(
kernel="paddle.topk",
inputs={"x": input.name},
outputs=[node.name],
k=k,
sorted=sort)
def MatMul(self, node):
x = self.graph.get_input_node(node, 0)
y = self.graph.get_input_node(node, 1)
transpose_a = node.get_attr('transpose_a')
transpose_b = node.get_attr('transpose_b')
if transpose_a is None:
transpose_a = node.get_attr('adj_x')
if transpose_b is None:
transpose_b = node.get_attr('adj_y')
self.paddle_graph.add_layer(
kernel="paddle.matmul",
inputs={"x": x.name,
"y": y.name},
outputs=[node.name],
transpose_x=transpose_a,
transpose_y=transpose_b)
def BatchMatMul(self, node):
return self.MatMul(node)
def BatchMatMulV2(self, node):
return self.MatMul(node)
def DepthwiseConv2dNative(self, node):
op_name = name_generator("conv", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
input = self.graph.get_input_node(node, 0)
kernel = self.graph.get_input_node(node, 1)
assert kernel.layer_type == "Const", "Kernel of DepthwiseConv2DNative should be Const"
in_shape = input.out_shapes[0]
k_size = kernel.out_shapes[0]
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
kernel_weight_name = op_name + ".weight"
self.params[kernel_weight_name] = numpy.transpose(kernel.value,
(2, 3, 0, 1))
input_name = input.name
if data_format == "NHWC":
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name('depthwise_conv2d', 'transpose')
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
self.paddle_graph.add_layer(
kernel="paddle.nn.Conv2D",
inputs={"input": input_name},
outputs=layer_outputs,
weight_attr=string(kernel_weight_name),
bias_attr=False,
in_channels=in_shape[1],
out_channels=k_size[2],
kernel_size=k_size[0:2],
stride=strides[2:4],
dilation=dilations[2:4],
groups=k_size[3] * in_shape[1],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def AvgPool(self, node):
input = self.graph.get_input_node(node, 0)
k_size = node.get_attr("ksize")
strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode()
pad_mode = node.get_attr("padding").decode()
input_name = input.name
if data_format == "NHWC":
transpose_name = gen_name("avg_pool", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
input_name = transpose_name
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
# TODO(syf): The op has diff.
self.paddle_graph.add_layer(
kernel="paddle.nn.AvgPool2D",
inputs={"input": input_name},
outputs=layer_outputs,
kernel_size=k_size[2:4],
stride=strides[2:4],
padding=string(pad_mode))
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Pack(self, node):
inputs_list = list()
for i in range(len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
input_names = [i.name for i in inputs_list]
axis = node.get_attr("axis")
self.paddle_graph.add_layer(
kernel="paddle.stack",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
if len(node.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": node.name},
outputs=[node.name],
shape=[-1])
def Unpack(self, node):
input = self.graph.get_input_node(node, 0)
axis = node.get_attr("axis")
num = node.get_attr("num")
shape = input.out_shapes[0]
input_name = input.name
if len(shape) == 1:
if shape[0] > 0 and num == shape[0]:
self.paddle_graph.add_layer(
kernel="paddle.unsqueeze",
inputs={"x": input.name},
outputs=[node.name],
axis=[0])
input_name = node.name
axis = 1
else:
raise Exception("Unexpected situation happend in Unpack OP")
layer_outputs = [
"{}_p{}".format(node.layer_name, i) for i in range(num)
]
if len(layer_outputs) == 1:
layer_outputs[0] = "[{}]".format(node.layer_name)
self.paddle_graph.add_layer(
kernel="paddle.unstack",
inputs={"x": input_name},
outputs=layer_outputs,
axis=axis,
num=num)
def ConcatV2(self, node):
inputs_list = list()
for i in range(len(node.inputs) - 1):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, -1)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
def Concat(self, node):
inputs_list = list()
for i in range(1, len(node.inputs)):
inputs_list.append(self.graph.get_input_node(node, i))
axis = self.graph.get_input_node(node, 0)
assert axis.layer_type == "Const", "axis for ConcatV2 must be type Const"
axis = axis.value
if axis < 0:
axis += len(inputs_list[0].out_shapes[0])
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": input_names},
outputs=[node.name],
axis=axis)
def AddN(self, node):
inputs_list = list()
for i in range(len(node.inputs) - 1):
inputs_list.append(self.graph.get_input_node(node, i))
input_names = [i.name for i in inputs_list]
self.paddle_graph.add_layer(
kernel="paddle.add_n",
inputs={"inputs": input_names},
outputs=[node.name])
def StridedSlice(self, node):
input = self.graph.get_input_node(node, 0)
begin = self.graph.get_input_node(node, 1)
end = self.graph.get_input_node(node, 2)
strides = self.graph.get_input_node(node, 3)
if strides.layer_type == "Const":
strides = strides.value.tolist()
else:
strides = self.decoder.infer_tensor(strides)
if begin.layer_type == "Const":
begin = begin.value.tolist()
else:
begin = self.decoder.infer_tensor(begin)
if end.layer_type == "Const":
end = end.value.tolist()
else:
end = self.decoder.infer_tensor(end)
assert len(set(strides)) == 1 and strides[
0] == 1, "Only support strides be 1 in StridedSlice OP"
if len(begin) < len(input.out_shapes[0]):
begin = begin + [0] * (len(input.out_shapes[0]) - len(begin))
if len(end) < len(input.out_shapes[0]):
end = end + [0] * (len(input.out_shapes[0]) - len(end))
for i in range(len(end)):
if end[i] == 0:
end[i] = 999999
begin_mask = node.get_attr('begin_mask')
end_mask = node.get_attr('end_mask')
ellipsis_mask = node.get_attr('ellipsis_mask')
new_axis_mask = node.get_attr('new_axis_mask')
shrink_axis_mask = node.get_attr('shrink_axis_mask')
assert ellipsis_mask == 0, "(OP:{} Name:{})Only support ellipsis_mask be 0[now: {}] n StridedSlice OP".format(
node.layer_type, node.layer.name, ellipsis_mask)
# TODO codes without validation
# Use it carefully
new_begin = list()
new_end = list()
new_axes = list()
shrink_axes = list()
for i, item in enumerate(begin):
mask = (new_axis_mask >> i) & 1
if mask != 0:
new_axes.append(i)
continue
mask = (shrink_axis_mask >> i) & 1
if mask != 0:
shrink_axes.append(i)
mask = (begin_mask >> i) & 1
if mask != 0:
new_begin.append(0)
else:
new_begin.append(item)
mask = (end_mask >> i) & 1
if mask != 0:
new_end.append(999999)
else:
new_end.append(end[i])
if input.dtype == "bool":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": input.name},
outputs=[input.name],
dtype=string("int32"))
self.paddle_graph.add_layer(
kernel="paddle.slice",
inputs={"input": input.name},
outputs=[node.name],
axes=[i for i in range(len(new_begin))],
starts=new_begin,
ends=new_end)
if input.dtype == "bool":
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": node.name},
outputs=[node.name],
dtype=string("bool"))
if len(new_axes) > 0:
self.paddle_graph.add_layer(
kernel="paddle.unsqueeze",
inputs={"x": node.name},
outputs=[node.name],
axis=new_axes)
if len(shrink_axes) > 0:
if len(input.out_shapes[0]) + len(new_axes) <= 1:
pass
else:
self.paddle_graph.add_layer(
kernel="paddle.squeeze",
inputs={"x": node.name},
outputs=[node.name],
axis=shrink_axes)
def Prod(self, node):
input = self.graph.get_input_node(node, 0)
reduction_indices = self.graph.get_input_node(node, 1)
assert reduction_indices.layer_type == "Const"
keep_dims = node.get_attr('keep_dims')
axis = reduction_indices.value
self.paddle_graph.add_layer(
kernel="paddle.prod",
inputs={"x": input.name},
outputs=[node.layer_name],
keepdim=keep_dims,
axis=axis)
def Split(self, node):
dim = self.graph.get_input_node(node, 0)
input = self.graph.get_input_node(node, 1)
assert dim.layer_type == "Const"
num_split = node.get_attr('num_split')
dim = dim.value
self.paddle_graph.add_layer(
kernel="paddle.split",
inputs={"x": input.name},
outputs=[
"{}_p{}".format(node.layer_name, i) for i in range(num_split)
],
num_or_sections=num_split,
axis=dim)
def SplitV(self, node):
input = self.graph.get_input_node(node, 0)
size_splits = self.graph.get_input_node(node, 1)
assert size_splits.layer_type == "Const", "size_splits of SplitV OP should be Const"
size_splits = size_splits.value.tolist()
dim = self.graph.get_input_node(node, 2)
assert dim.layer_type == "Const", "dim of SplitV OP should be Const"
dim = dim.value
self.paddle_graph.add_layer(
kernel="paddle.split",
inputs={"x": input.name},
outputs=[
"{}_p{}".format(node.layer_name, i)
for i in range(len(size_splits))
],
num_or_sections=size_splits,
axis=dim)
def Slice(self, node):
input = self.graph.get_input_node(node, 0)
begin = self.graph.get_input_node(node, 1)
size = self.graph.get_input_node(node, 2)
inputs = {"x": input.name}
attrs = {}
if begin.layer_type == "Const":
begin = begin.value.tolist()
attrs['offsets'] = begin
else:
begin = self.decoder.infer_tensor(
begin, use_diff_inputs=False).tolist()
attrs['offsets'] = begin
if size.layer_type == "Const":
size = size.value.tolist()
attrs['shape'] = size
else:
shape = size.out_shapes[0]
reshape_name = gen_name("slice", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": size.name},
outputs=[reshape_name],
shape=shape)
inputs['shape'] = reshape_name
self.paddle_graph.add_layer(
kernel="paddle.crop", inputs=inputs, outputs=[node.name], **attrs)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("nearest"),
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
attrs["size"] = resize_shape
else:
shape = resize_shape.out_shapes[0]
reshape_name = gen_name("resize_nearest", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": resize_shape.name},
outputs=[reshape_name],
shape=shape)
inputs["size"] = reshape_name
if data_format == "NHWC":
transpose_name = gen_name("resize_nearest", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
inputs["x"] = transpose_name
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def ResizeBilinear(self, node):
input = self.graph.get_input_node(node, 0)
resize_shape = self.graph.get_input_node(node, 1)
data_format = "NHWC"
inputs = {"x": input.name}
attrs = {
"align_corners": node.get_attr("align_corners"),
"mode": string("bilinear"),
"align_mode": 1
}
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
attrs["size"] = resize_shape
else:
shape = resize_shape.out_shapes[0]
reshape_name = gen_name("resize_bilinear", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": resize_shape.name},
outputs=[reshape_name],
shape=shape)
inputs["size"] = reshape_name
if data_format == "NHWC":
transpose_name = gen_name("resize_bilinear", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
inputs["x"] = transpose_name
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Cast(self, node):
input = self.graph.get_input_node(node, 0)
dtype = node.dtype
self.paddle_graph.add_layer(
kernel="paddle.cast",
inputs={"x": input.name},
outputs=[node.name],
dtype=string(dtype))
def Sum(self, node):
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
keep_dims = node.get_attr("keep_dims")
dim = reduce_idx.value.tolist()
self.paddle_graph.add_layer(
kernel="paddle.sum",
inputs={"x": input.name},
outputs=[node.name],
axis=dim,
keepdim=keep_dims)
def Max(self, node):
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
keep_dims = node.get_attr("keep_dims")
dim = reduce_idx.value.tolist()
self.paddle_graph.add_layer(
kernel="paddle.max",
inputs={"x": input.name},
outputs=[node.name],
axis=dim,
keepdim=keep_dims)
def RandomUniform(self, node):
shape = self.graph.get_input_node(node, 0)
if shape.layer_type == "Const":
shape = shape.value.tolist()
self.paddle_graph.add_layer(
kernel="paddle.uniform",
inputs={},
outputs=[node.name],
shape=shape,
min=0.0,
max=0.9999)
else:
self.paddle_graph.add_layer(
kernel="paddle.uniform",
inputs={'shape': shape.name},
outputs=[node.name],
min=0.0,
max=0.9999)
def Conv2DBackpropInput(self, node):
op_name = name_generator("conv", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
out_shape = self.graph.get_input_node(node, 0)
kernel = self.graph.get_input_node(node, 1)
input = self.graph.get_input_node(node, 2)
assert kernel.layer_type == "Const", "Kernel of Conv2DBackpropInput should be Const"
if out_shape.layer_type == "Const":
out_shape = out_shape.value.tolist()
else:
out_shape = self.decoder.infer_tensor(
out_shape, out_shape=node.out_shapes[0])
in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(
input, use_diff_inputs=False).shape
k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(
kernel, use_diff_inputs=False).shape
pad_mode = node.get_attr("padding").decode()
strides = node.get_attr("strides")
dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode()
kernel_name = op_name + ".weight"
self.params[kernel_name] = numpy.transpose(kernel.value, (3, 2, 0, 1))
input_name = input.name
if data_format == "NHWC":
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
transpose_name = gen_name("conv2dbackpropinput", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": input.name},
outputs=[transpose_name],
perm=[0, 3, 1, 2])
input_name = transpose_name
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
outputs=["{}_{}".format(node.name, kernel_name).replace(".", "_")],
shape=self.params[kernel_name].shape,
attr=string(kernel_name))
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.conv2d_transpose",
inputs={
"x": input_name,
"weight":
"{}_{}".format(node.name, kernel_name).replace(".", "_")
},
outputs=[node.name],
bias=None,
stride=strides[2:4],
dilation=dilations[2:4],
padding=string(pad_mode),
output_size=out_shape[1:3])
if data_format == "NHWC":
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": node.name},
outputs=[node.name],
perm=[0, 2, 3, 1])
def Tile(self, node):
input = self.graph.get_input_node(node, 0)
repeat_times = self.graph.get_input_node(node, 1)
inputs = {"x": input.name}
attr = dict()
in_shape = input.out_shapes[0]
if repeat_times.layer_type == "Const":
repeat_times = repeat_times.value.tolist()
attr["repeat_times"] = repeat_times
else:
inputs["repeat_times"] = repeat_times.name
self.paddle_graph.add_layer(
kernel="paddle.tile", inputs=inputs, outputs=[node.name], **attr)
def Range(self, node):
start = self.graph.get_input_node(node, 0)
limit = self.graph.get_input_node(node, 1)
delta = self.graph.get_input_node(node, 2)
inputs = dict()
attr = dict()
dtype = 'int32'
if start.dtype.startswith('float'):
dtype = start.dtype
if start.layer_type == "Const":
attr["start"] = start.value
else:
inputs["start"] = start.name
if limit.dtype.startswith('float'):
dtype = limit.dtype
if limit.layer_type == "Const":
attr["end"] = limit.value
else:
inputs["end"] = limit.name
if delta.dtype.startswith('float'):
dtype = delta.dtype
if delta.layer_type == "Const":
attr["step"] = delta.value
else:
inputs["step"] = delta.name
node.set_dtype(dtype)
attr["dtype"] = string(node.dtype)
self.paddle_graph.add_layer(
kernel="paddle.arange", inputs=inputs, outputs=[node.name], **attr)
def SquaredDifference(self, node):
x = self.graph.get_input_node(node, 0)
y = self.graph.get_input_node(node, 1)
inputs = {"x": x.name, "y": y.name}
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
# TODO(syf)
layer_id = self.paddle_graph.add_layer(
"paddle.subtract", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
inputs = {"x": node.name, "y": node.name}
x_shape = node.out_shapes[0]
y_shape = node.out_shapes[0]
layer_id = self.paddle_graph.add_layer(
"paddle.multiply", inputs=inputs, outputs=[node.name])
self.paddle_graph.layers[layer_id].input_shapes = {
"x": x_shape,
"y": y_shape
}
def OneHot(self, node):
input = self.graph.get_input_node(node, 0)
depth = self.graph.get_input_node(node, 1)
on_value = self.graph.get_input_node(node, 2)
off_value = self.graph.get_input_node(node, 3)
assert depth.layer_type == 'Const', 'Parameter depth should be Const in OneHot'
assert on_value.layer_type == 'Const', 'Parameter on_value should be Const in OneHot'
assert off_value.layer_type == 'Const', 'Parameter off_value should be Const in OneHot'
attr = {'depth': depth.value}
on_value = on_value.value
off_value = off_value.value
assert math.fabs(on_value -
1.0) < 1e-06, "on_value should be 1 in OneHot"
assert math.fabs(off_value -
0.0) < 1e-06, "off_value should be 0 in OneHot"
self.paddle_graph.add_layer(
"paddle.nn.functional.one_hot",
inputs={"x": input.name},
outputs=[node.name],
num_classes=depth.value)
def Pow(self, node):
x = self.graph.get_input_node(node, 0)
factor = self.graph.get_input_node(node, 1)
inputs = {"x": x.name}
attr = dict()
if factor.layer_type == 'Const':
attr["y"] = factor.value.tolist()
else:
inputs["y"] = factor.name
self.paddle_graph.add_layer(
"paddle.pow", inputs=inputs, outputs=[node.name], **attr)
def All(self, node):
input = self.graph.get_input_node(node, 0)
reduce_idx = self.graph.get_input_node(node, 1)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
attr = dict()
attr["axis"] = reduce_idx.value.tolist()
attr["keepdim"] = node.get_attr("keep_dims")
input_name = input.name
if input.dtype != "bool":
input_name = gen_name("all", "cast")
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": input.name},
outputs=[input_name],
dtype=string("bool"))
self.paddle_graph.add_layer(
"paddle.all", inputs={"x": input_name}, outputs=[node.name], **attr)
node.layer.attr['dtype'].type = 10
def GatherV2(self, node):
embeddings = self.graph.get_input_node(node, 0)
index = self.graph.get_input_node(node, 1)
axis = self.graph.get_input_node(node, 2)
assert axis.layer_type == 'Const', "Only support Const parameter[axis]"
axis = axis.value
index_name = index.name
if len(index.out_shapes[0]) != 1:
reshape_name = gen_name("gather", "reshape")
index_name = reshape_name
self.paddle_graph.add_layer(
"paddle.reshape",
inputs={"x": index.name},
outputs=[reshape_name],
shape=[-1])
inputs = {'x': embeddings.name, 'index': index_name}
self.paddle_graph.add_layer(
"paddle.gather", inputs=inputs, outputs=[node.name], axis=axis)
if len(index.out_shapes[0]) != 1:
out_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": node.name},
outputs=[node.name],
shape=out_shape)
def GatherNd(self, node):
x = self.graph.get_input_node(node, 0)
index = self.graph.get_input_node(node, 1)
inputs = {'x': x.name, 'index': index.name}
self.paddle_graph.add_layer(
"paddle.gather_nd", inputs=inputs, outputs=[node.name])
def ExpandDims(self, node):
x = self.graph.get_input_node(node, 0, copy=True)
y = self.graph.get_input_node(node, 1, copy=True)
inputs = {"x": x.name}
attr = dict()
if y.layer_type == 'Const':
dim = y.value.tolist()
if not isinstance(dim, list):
dim = [dim]
attr['axis'] = dim
else:
inputs['axis'] = y.name
self.paddle_graph.add_layer(
"paddle.unsqueeze", inputs=inputs, outputs=[node.name], **attr)
def ReverseV2(self, node):
x = self.graph.get_input_node(node, 0)
axis = self.graph.get_input_node(node, 1)
inputs = {"x": x.name}
attr = dict()
if axis.layer_type == 'Const':
axis = axis.value.tolist()
if not isinstance(axis, list):
axis = [axis]
attr['axis'] = axis
else:
inputs['axis'] = axis.name
self.paddle_graph.add_layer(
"paddle.flip", inputs=inputs, outputs=[node.name], **attr)
def BatchToSpaceND(self, node):
'''
reshape->transpose->reshape->crop
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
crops = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if crops.layer_type == "Const":
crops = crops.value.tolist()
data_format = x.get_attr("data_format").decode()
if data_format == "NHWC":
n, h, w, c = x.out_shapes[0]
else:
n, c, h, w = x.out_shapes[0]
input_name = x.name
#reshape
shape = block_shape + [-1, h, w, c]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": input_name},
outputs=[reshape_name],
shape=shape)
#transpose
perm = [len(block_shape)] + list(j for i in range(len(block_shape)) for j in (i + len(block_shape) + 1, i)) +\
list(i + 2*len(block_shape) + 1 for i in range(len(x.out_shapes[0]) - len(block_shape) - 1))
transpose_name = gen_name("batch_to_space", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=perm)
#reshape
shape = [-1] + list(i * j
for i, j in zip(block_shape, x.out_shapes[0][
1:])) + x.out_shapes[0][1 + len(block_shape):]
reshape_name = gen_name("batch_to_space", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[reshape_name],
shape=shape)
#crop
attrs = {}
crop_shape = shape
crop_offsets = [0] * len(shape)
for i in range(len(crops)):
crop_shape[i + 1] = crop_shape[i + 1] - crops[i][0] - crops[i][1]
crop_offsets[i + 1] = crops[i][0]
attrs['shape'] = crop_shape
attrs['offsets'] = crop_offsets
self.paddle_graph.add_layer(
kernel="paddle.crop",
inputs={"x": reshape_name},
outputs=[node.name],
**attrs)
def SpaceToBatchND(self, node):
'''
zero-pad->reshape->transpose->reshape
'''
x = self.graph.get_input_node(node, 0)
block_shape = self.graph.get_input_node(node, 1)
paddings = self.graph.get_input_node(node, 2)
if block_shape.layer_type == "Const":
block_shape = block_shape.value.tolist()
if paddings.layer_type == "Const":
paddings = paddings.value.flatten().tolist()
input_name = x.name
#zero-pad
constant_values = 0
pad_name = gen_name("space_to_batch", "pad")
paddings = [0, 0] + paddings + [0, 0]
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.pad",
inputs={"x": input_name},
outputs=[pad_name],
pad=paddings,
value=constant_values)
#reshape
n, h, w, c = x.out_shapes[0]
h = h + paddings[2] + paddings[3]
w = w + paddings[4] + paddings[5]
shape = [
n, h // block_shape[0], block_shape[0], w // block_shape[1],
block_shape[1], c
]
reshape_name = gen_name("space_to_batch", "reshape")
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": pad_name},
outputs=[reshape_name],
shape=shape)
#transpose
transpose_name = gen_name("space_to_batch", "transpose")
self.paddle_graph.add_layer(
kernel="paddle.transpose",
inputs={"x": reshape_name},
outputs=[transpose_name],
perm=[2, 4, 0, 1, 3, 5])
#reshape
shape = [-1, h // block_shape[0], w // block_shape[1], c]
self.paddle_graph.add_layer(
kernel="paddle.reshape",
inputs={"x": transpose_name},
outputs=[node.name],
shape=shape)
|
<filename>vsphere/tests/test_api_rest.py
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import pytest
from mock import MagicMock
from pyVmomi import vim
from datadog_checks.vsphere import VSphereCheck
from datadog_checks.vsphere.api_rest import VSphereRestAPI
from datadog_checks.vsphere.config import VSphereConfig
logger = logging.getLogger()
@pytest.mark.usefixtures("mock_rest_api", "mock_type")
def test_get_resource_tags(realtime_instance):
config = VSphereConfig(realtime_instance, {}, logger)
mock_api = VSphereRestAPI(config, log=logger)
mock_mors = [MagicMock(spec=vim.VirtualMachine, _moId="foo")]
resource_tags = mock_api.get_resource_tags_for_mors(mock_mors)
expected_resource_tags = {
vim.HostSystem: {'10.0.0.104-1': ['my_cat_name_2:my_tag_name_2']},
vim.VirtualMachine: {'VM4-4-1': ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2']},
vim.Datacenter: {},
vim.Datastore: {'NFS-Share-1': ['my_cat_name_2:my_tag_name_2']},
vim.ClusterComputeResource: {},
}
assert expected_resource_tags == resource_tags
@pytest.mark.parametrize(
'init_config, instance_config, expected_shared_rest_api_options, expected_rest_api_options',
[
pytest.param(
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
},
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'tls_ca_cert': None,
'tls_ignore_warning': False,
'tls_verify': True,
},
id='no rest_api_options',
),
pytest.param(
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'ssl_capath': 'abc123',
'ssl_verify': False,
'tls_ignore_warning': True,
},
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'tls_ca_cert': 'abc123',
'tls_ignore_warning': True,
'tls_verify': False,
},
id='existing options rest_api_options',
),
pytest.param(
{
'rest_api_options': {
'timeout': 15,
}
},
{
'username': 'my-username',
'password': '<PASSWORD>',
},
{
'timeout': 15,
},
{
'username': 'my-username',
'password': '<PASSWORD>',
'tls_ca_cert': None,
'tls_ignore_warning': False,
'tls_verify': True,
},
id='init rest_api_options',
),
pytest.param(
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'rest_api_options': {
'timeout': 15,
},
},
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'tls_ca_cert': None,
'tls_ignore_warning': False,
'tls_verify': True,
'timeout': 15,
},
id='instance rest_api_options',
),
pytest.param(
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'rest_api_options': {
'timeout': 15,
},
},
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'tls_ca_cert': None,
'tls_ignore_warning': False,
'tls_verify': True,
'timeout': 15,
},
id='instance rest_api_options',
),
pytest.param(
{},
{
'username': 'my-username',
'password': '<PASSWORD>',
'rest_api_options': {
'timeout': 15,
'username': 'my-username2',
'password': '<PASSWORD>',
'tls_ca_cert': 'abc',
'tls_ignore_warning': True,
'tls_verify': False,
},
},
{},
{
'username': 'my-username2',
'password': '<PASSWORD>',
'tls_ca_cert': 'abc',
'tls_ignore_warning': True,
'tls_verify': False,
'timeout': 15,
},
id='rest_api_options has precedence',
),
],
)
def test_rest_api_config(init_config, instance_config, expected_shared_rest_api_options, expected_rest_api_options):
instance_config.update(
{
'name': 'abc',
'use_legacy_check_version': False,
'host': 'my-host',
}
)
check = VSphereCheck('vsphere', init_config, [instance_config])
assert check._config.rest_api_options == expected_rest_api_options
assert check._config.shared_rest_api_options == expected_shared_rest_api_options
@pytest.mark.usefixtures("mock_rest_api")
def test_create_session(realtime_instance):
config = VSphereConfig(realtime_instance, {}, logger)
mock_api = VSphereRestAPI(config, log=logger)
assert mock_api._client._http.options['headers']['vmware-api-session-id'] == "dummy-token"
@pytest.mark.usefixtures("mock_rest_api")
@pytest.mark.parametrize(("batch_size", "number_of_batches"), [(25, 40), (100, 10), (101, 10)])
def test_make_batch(realtime_instance, batch_size, number_of_batches):
realtime_instance['batch_tags_collector_size'] = batch_size
config = VSphereConfig(realtime_instance, {}, logger)
mock_api = VSphereRestAPI(config, log=logger)
data_to_batch = list(range(1000))
batches = list(VSphereRestAPI.make_batch(mock_api, data_to_batch))
flat_data = [x for y in batches for x in y]
assert flat_data == data_to_batch
assert len(batches) == number_of_batches
|
from mpi4py import MPI
import process_helpers.wordCloud as wordCloud
import process_helpers.bagOfWords as bagOfWords
import process_helpers.sentimentAnalysis as sentimentAnalysis
import process_helpers.outputter as outputter
import configs
import pandas as pd
from collections import OrderedDict
import re # we can do "import regex" if needed since python re module does not support \K which is a regex resetting the beginning of a match and starts from the current point
from datetime import datetime
import timeit
import functools
print = functools.partial(print, flush=True) #flush print functions by default (needed to see outputs of multiple processes in a more correct order)
FILES_TO_READ = ['ELECTRONICS (LAPTOPS)', 'SPORTS', 'TOOLS & HOME IMPROVEMENT' ] # csv files
START_TIME = datetime.now()
NUMBER_OF_ROWS_PROCESSED = 0 # set by the master process in multi-processing or by the only process in single-processing in the process() function
def main():
# COMM VARIABLES
global comm, nprocs, rank
comm = MPI.COMM_WORLD
nprocs = comm.Get_size() # for multiprocessing there are nprocs-1 slaves (their ranks are 1, 2, ... nprocs-1) and 1 master (its rank is 0) whereas for single-processing nprocs is 1 and the process' rank is 0.
rank = comm.Get_rank()
if nprocs > 1:
if rank == configs.MASTER_PROCESS_RANK: # print it only once
print("Parallel execution")
else:
print("Serial Execution")
tp = timeit.Timer("process()", "from __main__ import process")
average_duration_seconds = tp.timeit(number=configs.NUMBER_OF_REPEATS_TIMEIT) / configs.NUMBER_OF_REPEATS_TIMEIT # calls process function (for each process) NUMBER_OF_REPEATS_TIMEIT times.
if (nprocs > 1 and rank == configs.MASTER_PROCESS_RANK) or (nprocs == 1 and rank == 0):
outputter.output_timing_results(average_duration_seconds, START_TIME, nprocs, NUMBER_OF_ROWS_PROCESSED)
def process():
global NUMBER_OF_ROWS_PROCESSED
if nprocs > 1:
if rank != configs.MASTER_PROCESS_RANK: # if slave
df_correspondingRows = comm.recv(source=configs.MASTER_PROCESS_RANK) # process the urls assigned to this slave
comm.send(get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(df_correspondingRows) , dest=configs.MASTER_PROCESS_RANK) # send processed results to master
else: # if master
all_dfs = readAllFiles_and_return_df()
NUMBER_OF_ROWS_PROCESSED = all_dfs.shape[0]
print("Total #of rows processed is: {0} ({1}% of each of the input csv file rows are processed)\n".format(NUMBER_OF_ROWS_PROCESSED, configs.READING_RATIO_FOR_INPUT_CSVs * 100))
################## LOAD BALANCE THE DATAFRAME ROWS ACROSS ALL PROCESSES ##################
distributed_dfs_forEachProcess, startAndEnds_for_distributed_dfs_forEachProcess = loadBalance_dataframe_toProcesses(all_dfs, nprocs-1)
distributed_dfs_index = 0
for proc_index in range(nprocs):
if proc_index != configs.MASTER_PROCESS_RANK:
print("Proccess {0} is responsible for the rows between {1} and {2}\n".format(proc_index, *startAndEnds_for_distributed_dfs_forEachProcess[distributed_dfs_index] ) )
comm.send(distributed_dfs_forEachProcess[distributed_dfs_index], dest=proc_index)
distributed_dfs_index += 1
wordCloudDict_merged = {}
bagOfWords_dict_merged = OrderedDict()
sentimentAnalysis_dict_merged = OrderedDict()
df_sentimentAnalysis_merged = pd.DataFrame()
for proc_index in range(nprocs):
if proc_index != configs.MASTER_PROCESS_RANK:
wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis = comm.recv(source=proc_index)
wordCloud.append_wordCloudDict(wordCloudDict_merged, wordCloudDict)
bagOfWords.append_bagOfWords_dict(bagOfWords_dict_merged, bagOfWords_dict)
sentimentAnalysis.append_sentimentAnalysis_dict(sentimentAnalysis_dict_merged, sentimentAnalysis_dict)
df_sentimentAnalysis_merged = appendAndReturn_df(df_sentimentAnalysis_merged, df_sentimentAnalysis)
outputter.finalize_wordCloud_bagOfWords_sentimentAnalysis_outputs(wordCloudDict_merged, bagOfWords_dict_merged, sentimentAnalysis_dict_merged, df_sentimentAnalysis_merged)
else: # IF A SINGLE PROCESS RUNS ONLY (nprocs == 1, process with rank 0)
all_dfs = readAllFiles_and_return_df()
NUMBER_OF_ROWS_PROCESSED = all_dfs.shape[0]
print("Total #of rows processed is: {0} ({1}% of each of the input csv file rows are processed)\n".format(NUMBER_OF_ROWS_PROCESSED, configs.READING_RATIO_FOR_INPUT_CSVs * 100))
wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis = get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(all_dfs)
outputter.finalize_wordCloud_bagOfWords_sentimentAnalysis_outputs(wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis)
def readAllFiles_and_return_df():
category_for_each_row = []
subcategory_for_each_row = []
df_list = []
for file_to_read in FILES_TO_READ:
curr_df = read_csv_custom(file_to_read)
df_list .append(curr_df)
category, subcategory = get_category_subcategory(file_to_read)
category_for_each_row .extend( [category] * curr_df.shape[0] )
subcategory_for_each_row .extend( [subcategory] * curr_df.shape[0] )
all_dfs = pd.concat(df_list, ignore_index=True)
all_dfs['Category'] = category_for_each_row
all_dfs['Subcategory'] = subcategory_for_each_row
all_dfs = all_dfs[all_dfs['Product Ratings']!='Product Ratings'] # remove multiple headers (multiple headers can be produced if we run webscraper multiple times to create the output .csv category)
all_dfs['Product Ratings'] = pd.to_numeric(all_dfs['Product Ratings'], downcast='integer')
return all_dfs
def appendAndReturn_df(df_merged, df_to_append):
'''
pandas dataframe does not support in-place append; so we return the new dataframe
Assign the result to "df_merged" in the calling function to see the effect (to update the original df_merged)
'''
if not df_to_append.empty:
return df_merged.append(df_to_append, ignore_index=not df_merged.empty)
return df_merged
def read_csv_custom(file_to_read):
'''
Parameters:
file_to_read (str): csv file name to read without the extension
Returns:
pandas dataframe as a result of reading the file while also considering 'READING_RATIO_FOR_INPUT_CSVs' config parameter.
'''
df = pd.read_csv(file_to_read+".csv", quotechar='"', encoding='utf-8')
numberOfRows_toProcess = int(configs.READING_RATIO_FOR_INPUT_CSVs * df.shape[0])
return df[0:numberOfRows_toProcess]
def loadBalance_dataframe_toProcesses(df_to_distribute, numberOfSlaveProcesses):
'''
Parameters:
- df_to_distribute (pd.DataFrame object) The whole dataframe to be divided among multiple processes
- numberOfSlaveProcesses (int): #of worker processes that the dataframe should be distributed to equally (or almost equally)
Returns:
- distributed_dfs_forEachProcess: A list of pd.DataFrame objects for each process respectively (the object at index 0, 1, 2 represents the dataframe to process for process 0, 1, 2 ... etc.). At each index, this variable contains a certain portion (some rows) of the 'df_to_distribute' input parameter.
- startAndEnds_for_distributed_dfs_forEachProcess: A list of (start, end) index pairs to know starting / ending rows for each process to process.
NOTE: This function is only meaningful when nprocs > 1 is True
'''
distributed_dfs_forEachProcess = []
startAndEnds_for_distributed_dfs_forEachProcess = []
number_of_rows_to_process = df_to_distribute.shape[0]
# number_of_rows_each_process holds the #of rows distributed to each process (e.g. for a total of 299 rows and 3 slave processes: 100, 100 and 99 rows respectively for process 0, 1 and 2 respectively.)
least_number_of_rows_for_each_process=number_of_rows_to_process // numberOfSlaveProcesses
number_of_processes_with_one_extra_row=number_of_rows_to_process % numberOfSlaveProcesses
number_of_rows_each_process=[least_number_of_rows_for_each_process+1 if i<number_of_processes_with_one_extra_row
else least_number_of_rows_for_each_process
for i in range(numberOfSlaveProcesses)]
# send relevant portions of the dataframe to corresponding processes (e.g. for 299 dataframes and 3 slave processes: 0:100, 100:200, 200:299 for process 0, 1 and 2 respectively)
start = 0
end = 0
for slave_proc_index in range(numberOfSlaveProcesses):
end = number_of_rows_each_process[slave_proc_index] + end
startAndEnds_for_distributed_dfs_forEachProcess.append((start, end))
distributed_dfs_forEachProcess.append(df_to_distribute[start:end])
start = end
return distributed_dfs_forEachProcess, startAndEnds_for_distributed_dfs_forEachProcess
def get_wordCloud_bagOfWords_dicts_and_getSentimentAnalysis_df(df_correspondingRows):
# print("category is: " + category)
# print("subcategory is: " + subcategory)
wordCloudDict = {}
bagOfWords_dict = OrderedDict()
sentimentAnalysis_dict = OrderedDict()
df_sentimentAnalysis = pd.DataFrame()
################# HANDLE WORD CLOUD #################
if configs.CREATE_WORD_CLOUD:
wordCloudDict = wordCloud.get_wordCloudDict_forEachRating(df_correspondingRows)
################# HANDLE BAG OF WORDS #################
if configs.CREATE_BAG_OF_WORDS:
bagOfWords_dict = bagOfWords.get_bagOfWords_dict(df_correspondingRows)
################# HANDLE SENTIMENT ANALYSIS #################
if configs.CREATE_SENTIMENT_ANALYSIS_RESULTS:
sentimentAnalysis_dict = sentimentAnalysis.get_sentimentAnalysis_dict( df_correspondingRows)
df_sentimentAnalysis = sentimentAnalysis.create_sentimentAnalysis_dataframe(sentimentAnalysis_dict) # create sentiment analysis dataframe to be outputted to a csv file
return wordCloudDict, bagOfWords_dict, sentimentAnalysis_dict, df_sentimentAnalysis
def get_category_subcategory(file_to_read):
'''
Returns cached result if file_to_read processed before using a global dict to hold results to improve performance
NOTE: We get category and subcategory information from the file name assuming the information inside the parantheses is a subcategory and the rest indicates the category name. A simple string find method would suffice; but I wanted to use a regex :)
1) USED (TL;DR)
Explanation of the regex "(.*)\((.*)\)(.*)" (which is what we use):
This regex provides what we want like this example: 'ELECTRONICS (LAPTOPS) ITEMS' -> group(1): ELECTRONICS (with possibly trailing spaces), group(2): LAPTOPS, group(3): ITEMS (with possibly leading spaces)
2) NOT USED (This part can be skipped, not used in the code)
Explanation of the regex "(.*(?=\(.*\)))\(.*\)\K.*":
NOTE: The regex below works in PHP but not in Python; so instead of the method below, I will just use grouping.
.*(?=\(.*\)) matches until seeing the paranthesed clause;
Then we group it with () since we are going to start another search after the paranthesed clause;
but we need discard the paranthesed part which \K helps us to (\K discards everything found up until this part which is not grouped with ();
In our case it will discard only the paranthesed clause but not the part before the paranthesed clause that we already grouped)
and then matches the rest with .*
category = regex.search(r'(.*(?=\(.*\)))\(.*\)\K.*', category).group() # match anything which is not in parantheses
example 'ELECTRONICS (LAPTOPS)' or 'ELECTRONICS (LAPTOPS) ITEMS' ELECTRONICS ITEMS is category and LAPTOPS is subcategory
'''
if not hasattr(get_category_subcategory, "category_dict"): #checking category_dict is enough (no need for subcategory_dict too )
get_category_subcategory.category_dict = {}
get_category_subcategory.subcategory_dict = {}
if file_to_read in get_category_subcategory.category_dict: # return the cached result if any.
return get_category_subcategory.category_dict[file_to_read], get_category_subcategory.subcategory_dict[file_to_read]
category = file_to_read
category_search = re.search(r'(.*)\((.*)\)(.*)', file_to_read) # use search instead of match method since match expects the match to be starting from the beginning
if category_search: # if there is no paranthesis, there will be no match. If match; then a subcategory is indicated (in case there is something in paranteses)
category = category_search.group(1).strip() + (" " + category_search.group(3) if category_search.group(3).strip() else "") # if anything comes after paranthesis add it to category as well; if not do not add anything
subcategory = category_search.group(2).strip() if category_search.group(2) else "General" # if nothing inside the parantheses, assume it is "General"
else:
subcategory = "General"
get_category_subcategory.category_dict[file_to_read] = category
get_category_subcategory.subcategory_dict[file_to_read] = subcategory
return category, subcategory
if __name__ == "__main__":
main() |
<reponame>solcummings/ntire2021-sar
import os
import torch
import torch.nn as nn
import torchvision
class InitializationMixin:
"""
Mixin for pytorch models that allows pretraining of Imagenet models and initialization
of parameters.
methods:
pretrain_file: loads model from file to state_dict
pretrain_torchvision: loads torchvision model to self.torchvision_model
initialize_parameters: recursively initializes parameters
"""
def pretrain_file(self, pretrained):
# when given state_dict
if isinstance(pretrained, dict):
self.load_state_dict(pretrained)
print('--> Pretrained from state dict')
# when given path
elif isinstance(pretrained, str):
checkpoint_dict = torch.load(pretrained)
pretrained_params = checkpoint_dict['model_state_dict']
self.load_state_dict(pretrained_params)
print('--> Pretrained from {}'.format(pretrained))
def pretrain_torchvision(self, model_depth, pretrained):
torchvision_implementation_dict = {
'mobilenet_v2': torchvision.models.mobilenet_v2,
'mobilenet_v3_large': torchvision.models.mobilenet_v3_large,
'mobilenet_v3_small': torchvision.models.mobilenet_v3_small,
}
if isinstance(model_depth, str):
from_torchvision = any([model_depth == t for t in torchvision_implementation_dict.keys()])
if any([from_torchvision]) and pretrained == True:
if from_torchvision:
print('--> Pretrained from torchvision')
self.torchvision_model = torchvision_implementation_dict[model_depth](pretrained)
elif pretrained == True:
print(
'No official implementations of {}, continuing without pretraining'.format(model_depth)
)
else:
pass
def initialize_parameters(self, params, method='kaiming_normal', activation='relu'):
def recursive_initialization(p, **kwargs):
if any(hasattr(p, i) for i in ['weight', 'bias']):
self.__initialize(p, **kwargs)
elif callable(p.children):
for m in p.children():
recursive_initialization(m, **kwargs)
recursive_initialization(params, method=method, activation=activation)
def __initialize(self, params, method, activation):
initialization_implementation_dict = {
'normal': nn.init.normal_,
'xavier_normal': nn.init.xavier_normal_,
'xavier_uniform': nn.init.kaiming_normal_,
'kaiming_normal': nn.init.kaiming_normal_,
'kaiming_uniform': nn.init.kaiming_uniform_,
}
initialization_args = {
'normal': {'mean': 0, 'std': 0.01},
'xavier': {'gain': nn.init.calculate_gain(activation)},
'kaiming': {'mode': 'fan_out', 'nonlinearity': activation},
}
if isinstance(params, (nn.Conv2d, nn.ConvTranspose2d)):
initialization_implementation_dict[method](
params.weight, **initialization_args[method.split('_')[0]])
# 1706.02677 Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
# zeroing weights in last bn in res/bottleneck blocks improves 0.2~0.3%
elif isinstance(params, (nn.BatchNorm2d, nn.GroupNorm, nn.LayerNorm, nn.InstanceNorm2d)):
nn.init.constant_(params.weight, 1)
elif isinstance(params, nn.Linear):
initialization_implementation_dict['normal'](
params.weight, **initialization_args['normal'])
# zero all biases
# 1812.01187 Bag of Tricks for Image Classification with Convolutional Neural
# Networks
# in regard to wd,
# "biases and gamma and beta in BN layers, are left unregularized" p.3
if params.bias is not None:
nn.init.constant_(params.bias, 0)
# saves model and other variables as checkpoint
def save_checkpoint(path, model: nn.Module, **kwargs):
# save in first rank when distributed to reduce write overhead
os.makedirs(os.path.dirname(path), exist_ok=True)
save_dict = {}
if torch.cuda.device_count() > 1:
save_dict['model_state_dict'] = model.module.state_dict()
else:
save_dict['model_state_dict'] = model.state_dict()
for key in kwargs.keys():
if key == 'optimizer':
save_dict['optimizer_state_dict'] = kwargs[key].state_dict()
elif key == 'scheduler':
save_dict['scheduler_state_dict'] = kwargs[key].state_dict()
else:
save_dict[key] = kwargs[key]
torch.save(save_dict, path)
# loads model and other variables as dict
def load_checkpoint(path) -> dict:
return torch.load(path)
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import os
import shutil
from os.path import join, abspath, dirname, exists, basename
r=dirname(__file__)
ROOT = abspath(join(r, "..","..",".."))
sys.path.insert(0, ROOT)
from tools.export import EXPORTERS
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.project_api import setup_project, perform_export, print_results, get_test_from_name, get_lib_symbols
from project_generator_definitions.definitions import ProGenDef
from tools.utils import args_error
class ProgenBuildTest():
def __init__(self, desired_ides, targets):
#map of targets and the ides that can build programs for them
self.target_ides = {}
for target in targets:
self.target_ides[target] =[]
for ide in desired_ides:
if target in EXPORTERS[ide].TARGETS:
#target is supported by ide
self.target_ides[target].append(ide)
if len(self.target_ides[target]) == 0:
del self.target_ides[target]
@staticmethod
def get_pgen_targets(ides):
#targets supported by pgen and desired ides for tests
targs = []
for ide in ides:
for target in TARGET_NAMES:
if target not in targs and hasattr(TARGET_MAP[target],'progen') \
and ProGenDef(ide).is_supported(TARGET_MAP[target].progen['target']):
targs.append(target)
return targs
@staticmethod
def handle_project_files(project_dir, mcu, test, tool, clean=False):
log = ''
if tool == 'uvision' or tool == 'uvision5':
log = os.path.join(project_dir,"build","build_log.txt")
elif tool == 'iar':
log = os.path.join(project_dir, 'build_log.txt')
try:
with open(log, 'r') as f:
print f.read()
except:
return
prefix = "_".join([test, mcu, tool])
log_name = os.path.join(os.path.dirname(project_dir), prefix+"_log.txt")
#check if a log already exists for this platform+test+ide
if os.path.exists(log_name):
#delete it if so
os.remove(log_name)
os.rename(log, log_name)
if clean:
shutil.rmtree(project_dir, ignore_errors=True)
return
def generate_and_build(self, tests, clean=False):
#build results
successes = []
failures = []
skips = []
for mcu, ides in self.target_ides.items():
for test in tests:
#resolve name alias
test = get_test_from_name(test)
for ide in ides:
lib_symbols = get_lib_symbols(None, None, test)
project_dir, project_name, project_temp = setup_project(mcu, ide, test)
dest_dir = os.path.dirname(project_temp)
destination = os.path.join(dest_dir,"_".join([project_name, mcu, ide]))
tmp_path, report = perform_export(project_dir, project_name, ide, mcu, destination,
lib_symbols=lib_symbols, progen_build = True)
if report['success']:
successes.append("build for %s::%s\t%s" % (mcu, ide, project_name))
elif report['skip']:
skips.append("%s::%s\t%s" % (mcu, ide, project_name))
else:
failures.append("%s::%s\t%s for %s" % (mcu, ide, report['errormsg'], project_name))
ProgenBuildTest.handle_project_files(destination, mcu, project_name, ide, clean)
return successes, failures, skips
if __name__ == '__main__':
accepted_ides = ["iar", "uvision", "uvision5"]
accepted_targets = sorted(ProgenBuildTest.get_pgen_targets(accepted_ides))
default_tests = ["MBED_BLINKY"]
parser = argparse.ArgumentParser(description = "Test progen builders. Leave any flag off to run with all possible options.")
parser.add_argument("-i", "--IDEs",
nargs = '+',
dest="ides",
help="tools you wish to perfrom build tests. (%s)" % ', '.join(accepted_ides),
default = accepted_ides)
parser.add_argument("-n",
nargs='+',
dest="tests",
help="names of desired test programs",
default = default_tests)
parser.add_argument("-m", "--mcus",
nargs='+',
dest ="targets",
help="generate project for the given MCUs (%s)" % '\n '.join(accepted_targets),
default = accepted_targets)
parser.add_argument("-c", "--clean",
dest="clean",
action = "store_true",
help="clean up the exported project files",
default=False)
options = parser.parse_args()
tests = options.tests
ides = [ide.lower() for ide in options.ides]
targets = [target.upper() for target in options.targets]
if any(get_test_from_name(test) is None for test in tests):
args_error(parser, "[ERROR] test name not recognized")
if any(target not in accepted_targets for target in targets):
args_error(parser, "[ERROR] mcu must be one of the following:\n %s" % '\n '.join(accepted_targets))
if any(ide not in accepted_ides for ide in ides):
args_error(parser, "[ERROR] ide must be in %s" % ', '.join(accepted_ides))
build_test = ProgenBuildTest(ides, targets)
successes, failures, skips = build_test.generate_and_build(tests, options.clean)
print_results(successes, failures, skips)
sys.exit(len(failures))
|
import nibabel
import numpy as np
from ..heart import ahaseg
import matplotlib.pyplot as plt
from os.path import join, basename
def get_loc(num, got_apex):
loc = dict()
if got_apex> 0:
#got_apex = True
loc[3] = [1]*1 + [2]*1 + [3]*1
loc[4] = [1]*1 + [2]*2 + [3]*1
loc[5] = [1]*2 + [2]*2 + [3]*1
loc[6] = [1]*2 + [2]*2 + [3]*2
loc[7] = [1]*2 + [2]*3 + [3]*2
loc[8] = [1]*3 + [2]*3 + [3]*2
loc[9] = [1]*3 + [2]*3 + [3]*3
loc[10] = [1]*3 + [2]*4 + [3]*3
loc[11] = [1]*4 + [2]*4 + [3]*3
loc[12] = [1]*4 + [2]*4 + [3]*4
loc[13] = [1]*4 + [2]*5 + [3]*4
loc[14] = [1]*5 + [2]*5 + [3]*4
loc[15] = [1]*5 + [2]*5 + [3]*5
loc[16] = [1]*5 + [2]*6 + [3]*5
loc[17] = [1]*6 + [2]*6 + [3]*5
loc[18] = [1]*6 + [2]*6 + [3]*6
loc[19] = [1]*6 + [2]*7 + [3]*6
loc[20] = [1]*7 + [2]*7 + [3]*6
else:
loc[3] = [1]*1 + [2]*1 + [3]*1
loc[4] = [1]*1 + [2]*1 + [3]*1 + [4]*1
loc[5] = [1]*1 + [2]*2 + [3]*1 + [4]*1
loc[6] = [1]*2 + [2]*2 + [3]*1 + [4]*1
loc[7] = [1]*2 + [2]*2 + [3]*2 + [4]*1
loc[8] = [1]*2 + [2]*3 + [3]*2 + [4]*1
loc[9] = [1]*3 + [2]*3 + [3]*2 + [4]*1
loc[10] = [1]*3 + [2]*3 + [3]*3 + [4]*1
loc[11] = [1]*3 + [2]*3 + [3]*3 + [4]*2
loc[12] = [1]*3 + [2]*4 + [3]*3 + [4]*2
loc[13] = [1]*4 + [2]*4 + [3]*3 + [4]*2
loc[14] = [1]*4 + [2]*4 + [3]*4 + [4]*2
loc[15] = [1]*4 + [2]*4 + [3]*4 + [4]*3
loc[16] = [1]*4 + [2]*5 + [3]*4 + [4]*3
loc[17] = [1]*5 + [2]*5 + [3]*4 + [4]*3
loc[18] = [1]*5 + [2]*5 + [3]*5 + [4]*3
loc[19] = [1]*5 + [2]*6 + [3]*5 + [4]*3
loc[20] = [1]*6 + [2]*6 + [3]*5 + [4]*3
return loc[num]
def get_slice_label(heart_xyzt):
#basal: 1, 5
#mid: 2
#apical: 3
#apex: 4
# 4 and 5 are slices without RV mask
heart = heart_xyzt.copy()
curve = np.zeros((heart.shape[2],))
sys_frame, dia_frame = pyheart.get_frame(heart)
for slicen in range(heart.shape[2]):
heart_mask_xyt = heart[:, :, slicen, :]
if np.unique(heart_mask_xyt[..., sys_frame]).sum() + np.unique(heart_mask_xyt[..., dia_frame]).sum() == 12:
curve[slicen] = 1
curve_and = curve.astype(np.int)
curve_or = (np.sum(heart, axis=(0, 1, 3)) > 0).astype(np.int)
curve_diff = curve_or - curve_and
diff = np.diff(np.sum(heart==1, axis=(0, 1, 3))) * curve_and[1:]
diff = np.median(diff[curve_and[1:] > 0])
slice_label = curve_and * 0
mid_point = curve_and.size//2
curve_apex = curve_diff.copy()
curve_basal = curve_diff.copy()
if diff < 0:
basal_first = True
curve_apex[:mid_point] = 0
curve_basal[mid_point:] = 0
else:
basal_first = False
curve_apex[mid_point:] = 0
curve_basal[:mid_point] = 0
got_apex = np.sum(curve_apex) > 0
if basal_first:
slice_label[curve_and > 0] = get_loc(np.sum(curve_and), got_apex)
slice_label[curve_apex > 0] = 4
slice_label[curve_basal > 0] = 5
else:
slice_label[curve_and > 0] = get_loc(np.sum(curve_and), got_apex)[::-1]
slice_label[curve_apex > 0] = 4
slice_label[curve_basal > 0] = 5
return slice_label
def convert(f, result_dir=None):
if isinstance(f, str):
temp = nibabel.load(f)
heart = temp.get_fdata()
affine = temp.affine
file_input = True
else:
heart = f.copy()
file_input = False
reverse = np.median(np.diff(np.sum(heart, axis=(0,1,3)))) > 0
if reverse:
heart = heart[:,:, ::-1, :]
slice_label = get_slice_label(heart)
#print(slice_label)
offset = dict()
offset[1] = 0
offset[2] = 6
offset[3] = 12
count = -1
heart_aha17_4d = heart * 0
for ii in range(slice_label.size):
#print(ii)
#print(slice_label[ii])
count = count + 1
if (slice_label[ii] == 0) or (slice_label[ii] == 5):
continue
if slice_label[ii] == 4:
temp = heart[:, :, ii, :].copy()
temp[temp==2] = 17
temp[temp==1] = 0
temp[temp==3] = 0
heart_aha17_4d[:, :, ii, :] = temp
continue
heart_xyt = heart[:, :, ii, :].copy()
curve = np.sum(heart_xyt, axis=(0, 1))
dia_frame = np.argmax(curve)
curve[curve==0] = 1e20
sys_frame = np.argmin(curve)
#print(dia_frame, sys_frame)
heart_xy_dia = heart_xyt[..., dia_frame]
heart_xy_sys = heart_xyt[..., sys_frame]
if slice_label[ii] == 3:
nseg = 4
else:
nseg = 6
if (np.sum(heart_xy_dia==3) < 5) or (np.sum(heart_xy_sys==3) < 5):
slice_label[ii] = 5
continue
dia_seg = ahaseg.get_seg((heart_xy_dia==1, heart_xy_dia==2, heart_xy_dia==3), nseg)
sys_seg = ahaseg.get_seg((heart_xy_sys==1, heart_xy_sys==2, heart_xy_sys==3), nseg)
dia_seg[dia_seg > 0] = dia_seg[dia_seg > 0] + offset[slice_label[ii]]
sys_seg[sys_seg > 0] = sys_seg[sys_seg > 0] + offset[slice_label[ii]]
heart_aha17_4d[:, :, ii, dia_frame] = dia_seg
heart_aha17_4d[:, :, ii, sys_frame] = sys_seg
#print('reverse:', reverse)
if reverse:
heart_aha17_4d = heart_aha17_4d[:,:, ::-1, :]
if (result_dir is not None) and file_input:
result_f = join(result_dir, basename(f))
nii_label = nibabel.Nifti1Image(heart_aha17_4d.astype(np.uint8), affine)
nibabel.save(nii_label, result_f)
return heart_aha17_4d, sys_frame, dia_frame
def auto_crop(heart_xyzt, crop=None):
if crop is None:
heart_xyz = np.sum(heart_xyzt, axis=-1)
xx, yy, zz = np.nonzero(heart_xyz)
minx, maxx = max(0, np.min(xx) - 10), min(heart_xyz.shape[0], np.max(xx) + 10)
miny, maxy = max(0, np.min(yy) - 10), min(heart_xyz.shape[1], np.max(yy) + 10)
minz, maxz = np.min(zz), np.max(zz)
else:
minx, maxx, miny, maxy, minz, maxz = crop
heart_crop = heart_xyzt[minx:maxx, miny:maxy, minz:maxz, :]
return heart_crop, (minx, maxx, miny, maxy, minz, maxz)
def get_frame(heart_mask):
shape = len(heart_mask.shape)
if shape == 3: #xyt
curve = np.sum(heart_mask==1, axis=(0, 1))
elif shape == 4: #xyzt
curve = np.sum(heart_mask==1, axis=(0, 1, 2))
else:
sys_frame = -1
dia_frame = -1
return sys_frame, dia_frame
curve = curve.astype(np.float)
dia_frame = np.argmax(curve)
curve[curve==0] = 1e20
sys_frame = np.argmin(curve)
return sys_frame, dia_frame
|
<gh_stars>1-10
# %%
from pathlib import Path
import PIL
import matplotlib.pyplot as plt
import numpy as np
import os
import SimpleITK as sitk
# enable lib loading even if not installed as a pip package or in PYTHONPATH
# also convenient for relative paths in example config files
os.chdir(Path(__file__).resolve().parent.parent)
from adpkd_segmentation.utils.nifti_utils import ( # noqa
load_nifti,
nifti_to_png_array,
process_nifti_dirs
)
from adpkd_segmentation.data.data_utils import path_2dcm_int16 # noqa
# %%
TEST_FOLDER = Path("nifti_tests/annotation4_completed")
study_dirs = [TEST_FOLDER / "4", TEST_FOLDER / "12"]
example_dir = TEST_FOLDER / "4" / WC-ADPKD-____-
dcm_example = example_dir / "DICOM_anon"
nifti_example = dcm_example / "Untitled.nii.gz"
# %%
reader = sitk.ImageSeriesReader()
series_IDs = reader.GetGDCMSeriesIDs(str(dcm_example.resolve()))
# %%
dicom_files = reader.GetGDCMSeriesFileNames(
str(dcm_example.resolve()), series_IDs[0]
)
# %%
# check one example
nifti_array = load_nifti(nifti_example)[:, :, 35]
png_array = nifti_to_png_array(nifti_array)
dcm_data = path_2dcm_int16(dicom_files[35])
# %%
plt.imshow(nifti_array)
# %%
plt.imshow(png_array)
# %%
plt.imshow(dcm_data)
# %%
# previous data
png = "data_copy/training_data-61-110MR_AX_SSFSE_ABD_PEL_50/WC-ADPKD_AB9-001467-MR1/Ground/00022_2.16.840.1.113669.632.21.1761676154.1761676154.36448706562526961.png" # noqa
dcm = "data_copy/training_data-61-110MR_AX_SSFSE_ABD_PEL_50/WC-ADPKD_AB9-001467-MR1/DICOM_anon/00022_2.16.840.1.113669.632.21.1761676154.1761676154.36448706562526961.dcm" # noqa
im = PIL.Image.open(png)
im_array = np.asarray(im)
dcm_array = path_2dcm_int16(dcm)
# %%
plt.imshow(im_array)
# %%
plt.imshow(dcm_array)
# %%
# different data
# more series_IDs
diff_example_dir = Path(
"nifti_tests/WC-ADPKD_KJ9-002316MR3-AXL FIESTA "
) # noqa
diff_dcm_example = example_dir / "DICOM_anon"
diff_nifti_example = dcm_example / "Untitled.nii.gz"
# %%
# folder processing test
test_target_dir = Path("nifti_tests/parsed_studies")
process_nifti_dirs(TEST_FOLDER, test_target_dir)
# %%
# checks
test_dcm = "nifti_tests/parsed_studies/WC-ADPKD_AM9-002358MR1-AXL FIESTA /DICOM_anon/00010_2.16.840.1.113669.632.21.136842060.136842060.7615641522567231422.dcm" # noqa
test_png = test_dcm.replace(".dcm", ".png").replace("DICOM_anon", "Ground")
im = PIL.Image.open(test_png)
im_array = np.asarray(im)
dcm_array = path_2dcm_int16(test_dcm)
# %%
plt.imshow(im_array)
# %%
plt.imshow(dcm_array)
# %%
# REAL RUN
real_source = Path("data/annotation_completed")
real_target = Path("data/processed_studies_nov_2")
process_nifti_dirs(real_source, real_target)
# %%
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IdentitySignInsConfiguration
from .operations import datapolicyoperationsdatapolicyoperationOperations
from .operations import identityOperations
from .operations import identityconditionalaccessOperations
from .operations import identityprovidersidentityproviderOperations
from .operations import informationprotectioninformationprotectionOperations
from .operations import informationprotectionOperations
from .operations import informationprotectionthreatassessmentrequestsOperations
from .operations import invitationsinvitationOperations
from .operations import invitationsOperations
from .operations import oauth2permissiongrantsoauth2permissiongrantOperations
from .operations import oauth2permissiongrantsOperations
from .operations import organizationOperations
from .operations import policiespolicyrootOperations
from .operations import policiesOperations
from .operations import policiespermissiongrantpoliciesOperations
from .. import models
class IdentitySignIns(object):
"""IdentitySignIns.
:ivar datapolicyoperationsdatapolicyoperation: datapolicyoperationsdatapolicyoperationOperations operations
:vartype datapolicyoperationsdatapolicyoperation: identity_sign_ins.aio.operations.datapolicyoperationsdatapolicyoperationOperations
:ivar identity: identityOperations operations
:vartype identity: identity_sign_ins.aio.operations.identityOperations
:ivar identityconditionalaccess: identityconditionalaccessOperations operations
:vartype identityconditionalaccess: identity_sign_ins.aio.operations.identityconditionalaccessOperations
:ivar identityprovidersidentityprovider: identityprovidersidentityproviderOperations operations
:vartype identityprovidersidentityprovider: identity_sign_ins.aio.operations.identityprovidersidentityproviderOperations
:ivar informationprotectioninformationprotection: informationprotectioninformationprotectionOperations operations
:vartype informationprotectioninformationprotection: identity_sign_ins.aio.operations.informationprotectioninformationprotectionOperations
:ivar informationprotection: informationprotectionOperations operations
:vartype informationprotection: identity_sign_ins.aio.operations.informationprotectionOperations
:ivar informationprotectionthreatassessmentrequests: informationprotectionthreatassessmentrequestsOperations operations
:vartype informationprotectionthreatassessmentrequests: identity_sign_ins.aio.operations.informationprotectionthreatassessmentrequestsOperations
:ivar invitationsinvitation: invitationsinvitationOperations operations
:vartype invitationsinvitation: identity_sign_ins.aio.operations.invitationsinvitationOperations
:ivar invitations: invitationsOperations operations
:vartype invitations: identity_sign_ins.aio.operations.invitationsOperations
:ivar oauth2permissiongrantsoauth2permissiongrant: oauth2permissiongrantsoauth2permissiongrantOperations operations
:vartype oauth2permissiongrantsoauth2permissiongrant: identity_sign_ins.aio.operations.oauth2permissiongrantsoauth2permissiongrantOperations
:ivar oauth2permissiongrants: oauth2permissiongrantsOperations operations
:vartype oauth2permissiongrants: identity_sign_ins.aio.operations.oauth2permissiongrantsOperations
:ivar organization: organizationOperations operations
:vartype organization: identity_sign_ins.aio.operations.organizationOperations
:ivar policiespolicyroot: policiespolicyrootOperations operations
:vartype policiespolicyroot: identity_sign_ins.aio.operations.policiespolicyrootOperations
:ivar policies: policiesOperations operations
:vartype policies: identity_sign_ins.aio.operations.policiesOperations
:ivar policiespermissiongrantpolicies: policiespermissiongrantpoliciesOperations operations
:vartype policiespermissiongrantpolicies: identity_sign_ins.aio.operations.policiespermissiongrantpoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = IdentitySignInsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.datapolicyoperationsdatapolicyoperation = datapolicyoperationsdatapolicyoperationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.identity = identityOperations(
self._client, self._config, self._serialize, self._deserialize)
self.identityconditionalaccess = identityconditionalaccessOperations(
self._client, self._config, self._serialize, self._deserialize)
self.identityprovidersidentityprovider = identityprovidersidentityproviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.informationprotectioninformationprotection = informationprotectioninformationprotectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.informationprotection = informationprotectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.informationprotectionthreatassessmentrequests = informationprotectionthreatassessmentrequestsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.invitationsinvitation = invitationsinvitationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.invitations = invitationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth2permissiongrantsoauth2permissiongrant = oauth2permissiongrantsoauth2permissiongrantOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth2permissiongrants = oauth2permissiongrantsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.organization = organizationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policiespolicyroot = policiespolicyrootOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policies = policiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policiespermissiongrantpolicies = policiespermissiongrantpoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IdentitySignIns":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
from .Ticket import Ticket, StateTicket
################################################################################
################################################################################
################################################################################
################################################################################
class HitByPitch(Ticket):
def getStateTicket(self, diamondState):
stateTicket = None
if diamondState == "firstBase_secondBase_thirdBase":
stateTicket = BasesLoadedHitByPitch()
elif diamondState == "firstBase_thirdBase":
stateTicket = FirstThirdHitByPitch()
elif diamondState == "firstBase_secondBase":
stateTicket = FirstSecondHitByPitch()
elif diamondState == "firstBase":
stateTicket = FirstHitByPitch()
else: #Bases Empty
stateTicket = HitByPitchState()
return stateTicket
################################################################################
################################################################################
class BasesLoadedHitByPitch(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterHBP(batterId)
scoreKeeper.recordPitcherHBP(pitcherId)
# Runners on second and third score
for base in ("thirdBase",):
runnerId, onHook = diamond.popBase(base)
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
diamond.moveBase("secondBase", "thirdBase")
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstThirdHitByPitch(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterHBP(batterId)
scoreKeeper.recordPitcherHBP(pitcherId)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstSecondHitByPitch(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterHBP(batterId)
scoreKeeper.recordPitcherHBP(pitcherId)
def moveBases(self, diamond):
diamond.moveBase("secondBase", "thirdBase")
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstHitByPitch(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterHBP(batterId)
scoreKeeper.recordPitcherHBP(pitcherId)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class HitByPitchState(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterHBP(batterId)
scoreKeeper.recordPitcherHBP(pitcherId)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
|
<reponame>gokudomatic/cobiv
from collections import deque
from datetime import datetime
import copy
from cobiv.libs.templite import Templite
from cobiv.modules.core.entity import Entity
from cobiv.modules.core.session.cursor import Cursor
class CoreVariables:
def __init__(self, session):
self.session = session
session.fields['file_size'] = self.get_file_size
session.fields['image_size'] = self.get_image_size
session.fields['file_format'] = self.get_image_format
session.fields['file_date'] = self.get_file_date
session.fields['filename'] = lambda: self.session.cursor.filename
session.fields['currentset_position'] = lambda: (
(self.session.cursor.pos + 1) if self.session.cursor.pos is not None else "0"
) if not self.session.cursor.is_eol() else "EOL"
session.fields['currentset_count'] = lambda: len(
self.session.cursor) if self.session.cursor.pos is not None else "0"
def get_simple_field(self, category, field_name, formatter=None):
if self.session.cursor.file_id is None:
return "N/A"
if not field_name in self.session.cursor.get_tags()[category]:
return "N/A"
values = self.session.cursor.get_tags()[category][field_name]
value = values[0] if len(values) > 0 else None
if formatter is None:
return value
else:
return formatter(value)
def get_file_size(self):
return self.get_simple_field(0, 'size')
def get_file_date(self):
mod_date = self.get_simple_field(0, 'file_date')
if mod_date != "N/A":
mod_date = datetime.fromtimestamp(float(mod_date)).strftime('%Y-%m-%d %H:%M:%S')
return mod_date
def get_image_size(self):
if self.session.cursor.file_id is not None:
tags = self.session.cursor.get_tags()
width, height = None, None
if tags[0]['file_type'][0] == 'file':
width = tags[0]['width'][0]
height = tags[0]['height'][0]
if width is not None and height is not None:
return str(width) + " x " + str(height)
return "N/A"
def get_image_format(self):
return self.get_simple_field(0, 'format')
@staticmethod
def sizeof_fmt(num, suffix='B'):
num = int(num)
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Y', suffix)
class HistoryContext:
def __init__(self):
self.fn = None
self.args = {}
self.category = None
def clone(self):
clone = HistoryContext()
clone.fn = self.fn
clone.args = copy.deepcopy(self.args)
clone.category = self.category
return clone
class Session(Entity):
cursor = None
fields = {}
active_fs = {}
cmd_actions = {}
cmd_hotkeys = {}
mimetype_actions = {}
view_context = {}
view_category = None
view_category_history = deque()
view_history = deque()
max_view_history_size = 20
skip_push_context = False
def __init__(self):
self.cursor = Cursor()
CoreVariables(self)
def set_cursor(self, new_cursor):
self.cursor.unbind(file_id=self.on_file_id_change)
self.cursor = new_cursor
self.cursor.bind(file_id=self.on_file_id_change)
def on_file_id_change(self, instance, value):
pass
def fill_text_fields(self, original_text):
return Templite(original_text.replace("%{", "${write(").replace("}%", ")}$")).render(**self.fields)
def get_filesystem(self, key):
return self.active_fs[key]
def add_filesystem(self, key, filesystem):
self.active_fs[key] = filesystem
def set_action(self, name, fn, profile="default"):
if name in self.cmd_actions:
self.cmd_actions[name][profile] = fn
else:
self.cmd_actions[name] = {profile: fn}
def set_hotkey(self, key, command, modifier=0, profile="default"):
if key in self.cmd_hotkeys:
hotkey = self.cmd_hotkeys[key]
if profile in hotkey:
hotkey[profile][modifier] = command
else:
hotkey[profile] = {modifier: command}
else:
self.cmd_hotkeys[key] = {profile: {modifier: command}}
def get_hotkey_command(self, key, modifier=0, profile="default"):
hotkeys_profiles = self.cmd_hotkeys[key]
if profile in hotkeys_profiles:
hotkeys = hotkeys_profiles[profile]
if modifier in hotkeys:
return hotkeys[modifier]
return False
def register_mimetype_action(self, mimetype, action, fn):
self.mimetype_actions.setdefault(mimetype, {})[action] = fn
def get_mimetype_action(self, mimetype, action, default=None):
if mimetype in self.mimetype_actions:
if action in self.mimetype_actions[mimetype]:
return self.mimetype_actions[mimetype][action]
else:
return default
def get_context(self, category):
return self.view_context.setdefault(category, HistoryContext())
def push_context(self, category):
if not self.skip_push_context:
self.view_category_history.append(category)
self.view_context[category].category = category
self.view_history.append(self.view_context[category].clone())
def pop_context(self):
if len(self.view_history) > 0:
view_category = self.view_category_history.pop()
self.view_context[view_category] = self.view_history.pop()
return self.view_context[view_category]
else:
return None
|
"""
Calculates pixels shifts between two COS NUV spectra.
This script can be used to determine the shift (in pixels) of one
spectrum with respect to another. The cross correlation between
S1 and S2 is determined and a non-linear fit to the peak of the
correlation is used to determine the exact offset.
:requires: Python 2.5 (not 3.0 compatible)
:requires: PyFits
:requires: NumArray (for boxcar smoothing)
:requires: NumPy (could be written without)
:author: <NAME> for STScI, 01/26/2009
:history: 01/26/09: Initial Release, version 0.1
"""
import math
import numpy as N
import sys
import pyfits as pf
import pylab as P
__author__ = '<NAME>'
__version__ = 0.1
#This is a helper function. Code adapted from the orignal IDL code:
#http://www.astro.washington.edu/docs/idl/cgi-bin/getpro/library43.html?HRS_OFFSET
def SpectrumOffset(s1, s2, ishift=0, width=15, i1=0, i2=0):
"""
This function calculates the shift (in pixels) of one spectrum
with respect to another. The cross correlation between spectrum1
and spectrum2 is determed and a non-linear fit to the peak of
the correlation is used to determine the exact offset.
input:
:param s1: the first spectrum
:type s1: ndarray
:param s2: the second spectrum
:type s2: ndarray
optional input:
ishift - guess of the intial shift in pixels, int
width - width of the search area in pixels, int
i1 - spectrum starting point in pixels, int
i2 - spectrum ending point in pixels, int
returns:
a list with offset and correlations in every bin
"""
approx = long(ishift + 100000.5) - 100000
ns = len(s1)
if i2 == 0: i2 = ns - 1
#extract template from specturm 2
ns2 = ns / 2
width2 = width / 2
it2_start = 0
it2_end = 0
if (i1 - approx + width2) > 0: it2_start = (i1 - approx + width2)
if (i2 - approx - width2) < (ns - 1):
it2_end = (i2 - approx - width2)
else:
it2_end = (ns - 1)
nt = it2_end - it2_start + 1
if nt < 1:
print 'CROSS_CORRELATE - region too small, or WIDTH too large, or ISHIFT too large'
offset = 0.0
return
template2 = s2[it2_start:it2_end + 1]
#correlate
corr = []
mean2 = N.sum(template2) / nt
sig2 = math.sqrt(N.sum((template2 - mean2) ** 2.))
diff2 = template2 - mean2
#find region in the first spectrum
for i in xrange(width):
it1_start = it2_start - width2 + approx + i
it1_end = it1_start + nt - 1
template1 = s1[it1_start:it1_end + 1]
mean1 = N.sum(template1) / nt
sig1 = math.sqrt(N.sum((template1 - mean1) ** 2.))
diff1 = template1 - mean1
if sig1 == 0 or sig2 == 0:
print 'CROSS_CORRELATE - zero variance computed'
offset = 0.0
return offset, corr
corr.append(N.sum(diff1 * diff2) / sig1 / sig2)
#find maximum
maxc = N.max(corr)
K = corr.index(maxc)
#in the IDL code this was k=!c
#The system variable !C is set to the one-dimensional subscript of the maximum element
if K == 0 or K == width - 1:
print'CROSS_CORRELATE- maximum on edge of search area'
offset = 0.0
return offset, corr
#Use quandratic refinement
Kmin = (corr[K - 1] - corr[K]) / (corr[K - 1] + corr[K + 1] - 2. * corr[K]) - 0.5
offset = K + Kmin - width2 + approx
return offset, corr
def writeOutput(filename, data, header, separator=' '): #frmt
"""
This function can be used to write tabular data to a file with selected separator.
"""
output = open(filename, 'w')
output.write(header)
for line in data:
tmpstr = ' '
for cell in line:
tmpstr += str(cell) + separator
tmpstr += '\n'
output.write(tmpstr)
output.close()
def get_NUV_PSA_WCA(psalist, wcalist, scale=False, width=512, ishift=1, extrakeys=False, debug=False):
"""
Does a cross-correlation between a pair of x1d files containing the PSA and WCA spectra for the same central wavelegth.
input:
psalist - a list containing filenames of the x1d spectra for the PSA
wcalist - a list containing filenames of the x1d spectra for the WCA
optional input:
scale - whether wca spectrum is multiplied with boxcar smoothing factor of the
ratio between psa and wca spectrum
ishift - guess of the intial shift in pixels, int
width - width of the search area in pixels, int
returns:
a list with central wavelengths, stripes, and calculated offset values.
"""
if scale: from numarray.convolve import boxcar as bc
if extrakeys: import glob
lpsa = len(psalist)
lwca = len(wcalist)
result = []
if debug: print '%i and %i PSA and WCA files will be processed, respectively' % (lpsa, lwca)
if lpsa != lwca:
print 'The lists of filenames do not have the same number of elements.'
print 'psalist has %i elements while wcalist has %i' % (lpsa, lwca)
print 'Will exit now...'
sys.exit(-1)
for psafile, wcafile in zip(psalist, wcalist):
if debug: print 'Running files %s and %s' % (psafile, wcafile)
try:
#psadata, psahdr = pf.getdata(psafile, header = True)
#wcadata, wcahdr = pf.getdata(wcafile, header = True)
#Above did not return the whole header for some reason?
psa = pf.open(psafile)
wca = pf.open(wcafile)
psahdr = psa[0].header
wcahdr = wca[0].header
psadata = psa[1].data
wcadata = wca[1].data
psa.close()
wca.close()
except:
print 'Error while reading data...'
if extrakeys:
try:
#path = '/Volumes/cos/PreLaunch/Data/TV06/FITS/Test_Processing/Jan_15_2009_fixed/'
#spt = path + psafile[:21] + '_spt.fits'
path = '/Volumes/cos/PreLaunch/Data/TV03/FITS/Test_Processing/Jan_05_2009/'
spt = path + psafile[50:-19] + '_spt.fits'
sptlist = pf.open(spt)
spthdr = sptlist[2].header
sptlist.close()
except:
print 'Error while opening %s file...' % spt
cenwav = psahdr['CENWAVE']
stripe = psahdr['SEGMENT']
grating = psahdr['OPT_ELEM']
fppos = psahdr['FPPOS']
psay = psadata[0][1]
wcay = wcadata[0][1]
ldstp = -999.
ldvdt = -999.
lxstp = -999.
lxvdt = -999.
if extrakeys:
try:
ldstp = spthdr['LAPDSTP']
ldvdt = spthdr['LAPDLVDT']
lxstp = spthdr['LAPXSTP']
lxvdt = spthdr['LAPXLVDT']
except:
print 'Error while reading extra keys...'
if cenwav != wcahdr['CENWAVE']:
print 'Error - PSA and WCA files are not at same CENWAVE'
print 'Will skip the files'
continue
if stripe != wcahdr['SEGMENT']:
print 'Error - PSA and WCA files are not from the same STRIPE'
print 'Will skip the files'
continue
if debug: print 'Processing the central wavelenght of %i Angstroms' % cenwav
if debug: print 'Processing the %s segment' % stripe
if scale:
mpsay = max(bc(psay, (5,)))
mwcay = max(bc(wcay, (5,)))
factor = mpsay / mwcay
wcay *= factor
print 'Boxcar smoothing for psa: %s and wca %s' % (mpsay, mwcay)
#correlation:
#correlation2 = correlate(psay, wcay, mode = conv.VALID)
#correlation2 = correlate(psay, wcay, mode = conv.FULL)
#correlation2 = correlate(psay, wcay)
#VALID gives the same result as this
#t = Numeric.cross_correlate(psay, wcay)
offs, correlation = SpectrumOffset(psay, wcay, width=width, i1=ishift)
if debug: print 'Correlation: %s' % correlation
if debug: print 'Offset %8.6f found' % offs
#NOTE:
#there is - in front of the offs
#fix this if used properly calibrated data!!!
if extrakeys:
result.append([cenwav, stripe, -offs, psafile, grating, fppos, ldstp, ldvdt, lxstp, lxvdt])
else:
result.append([cenwav, stripe, -offs, psafile, grating, fppos])
return result
def plotOffsets(results):
nuva = []
nuvb = []
nuvc = []
for line in results:
if line[1] == 'NUVA': nuva.append([line[0], line[2]])
if line[1] == 'NUVB': nuvb.append([line[0], line[2]])
if line[1] == 'NUVC': nuvc.append([line[0], line[2]])
P.plot([line[0] for line in nuva], [line[1] for line in nuva], 'ro', label='NUVA')
P.plot([line[0] for line in nuvb], [line[1] for line in nuvb], 'gs', label='NUVB')
P.plot([line[0] for line in nuvc], [line[1] for line in nuvc], 'bD', label='NUVC')
P.legend(loc='upper right', shadow=True)
P.ylim(-10, 10)
P.title('PSA WCA Separation (TV06 based on Katya\'s List)')
P.xlabel('CENWAVE (Angstrom)')
P.ylabel('Separation (pixels)')
P.savefig('offset')
if __name__ == '__main__':
import glob
plot = True
extrakeys = True
usedwidth = 20
sfiles = glob.glob('*x1d_s*.fits')
cfiles = glob.glob('*x1d_c*.fits')
results = get_NUV_PSA_WCA(sfiles, cfiles, width=usedwidth, ishift=1, extrakeys=extrakeys, debug=False)
header = '#CENWAV STRIPE OFFSET PSAFILE GRATING FPPOS\n'
if extrakeys:
header = '#CENWAV STRIPE OFFSET PSAFILE GRATING FPPOS LAPDSTP LAPDLVDT LAPXSTP LAPXLVDT\n'
filename = 'results.output'
writeOutput(filename, results, header)
plotOffsets(results)
|
# coding=utf-8
"""
Tests provided sorting algorithms under many cases.
"""
import random
import unittest
from unittest.mock import Mock
from numpy import testing as nptest
from collections import namedtuple
from acnportal.algorithms import *
from acnportal.algorithms.tests.generate_test_cases import *
from acnportal.algorithms.tests.testing_interface import TestingInterface
from acnportal.algorithms import UpperBoundEstimatorBase
CURRENT_TIME = 0
PERIOD = 5
# -----------------------------------------------------------------------------
# Algorithms to Test
# -----------------------------------------------------------------------------
algorithms = {
"FCFS": SortedSchedulingAlgo(first_come_first_served),
"LLF": SortedSchedulingAlgo(least_laxity_first),
"EDF": SortedSchedulingAlgo(earliest_deadline_first),
"LCFS": SortedSchedulingAlgo(last_come_first_served),
"LRPT": SortedSchedulingAlgo(largest_remaining_processing_time),
"RR": RoundRobin(first_come_first_served),
}
# -----------------------------------------------------------------------------
# Test Suite
# -----------------------------------------------------------------------------
Scenario = namedtuple(
"Scenario",
["name", "interface", "assert_at_max", "uninterrupted", "estimate_max_rate"],
)
class BaseAlgorithmTest(unittest.TestCase):
def setUp(self) -> None:
"""
Tests that a given algorithm provides feasible schedules to a simulation.
The elements of feasibility tested here are:
- A given charging rate is <= the maximum rate of the EVSE it is sent to.
- A given charging rate is <= the maximum rate of the Session it is charging.
- A given charging rate is in the allowable rate set of the Session it is
charging.
- No session is given more energy than it requested.
- Infrastructure limits are satisfied.
- TODO: what is the function of the assert at max rate?
- Charging is uninterrupted (never goes to zero during the session unless the
vehicle is done charging) if required.
- A max rate estimation, if provided, is not exceeded during the session.
Each algorithm test class has an algorithm (set by overriding this function
and setting an actual algorithm) and a max_rate_estimation, which provides the
return value of a mocked UpperBoundEstimator (used if max rate estimation is
tested).
The implementation of the _get_scenarios method details which charging
scenarios should be run in this test class. A scenario is defined by a name,
interface (usually a testing interface with static simulator data,
see TestingInterface), and attributes assert_at_max, uninterrupted,
and estimate_max rate, which dictate additional constraints under which the
algorithm should operate.
Returns:
None.
"""
self.algo = None
self.max_rate_estimation = {}
@staticmethod
def _get_scenarios() -> List[Scenario]:
return []
def test_output_feasible(self) -> None:
scenarios = self._get_scenarios()
for scenario in scenarios:
self.algo.register_interface(scenario.interface)
self.algo.uninterrupted = scenario.uninterrupted
estimator_mock = UpperBoundEstimatorBase()
estimator_mock.get_maximum_rates = Mock(
return_value=self.max_rate_estimation
)
self.algo.max_rate_estimator = estimator_mock
self.algo.estimate_max_rate = scenario.estimate_max_rate
schedule = self.algo.run()
self._run_tests(
scenario.name,
scenario.interface.active_sessions(),
schedule,
scenario.interface,
scenario.assert_at_max,
scenario.uninterrupted,
)
def _run_tests(
self,
name: str,
sessions: List[SessionInfo],
schedule: Dict[str, List[float]],
interface: Interface,
assert_at_max: bool = False,
uninterrupted: bool = False,
) -> None:
with self.subTest(msg=f"test_all_rates_less_than_evse_limit - {name}"):
self._test_all_rates_less_than_evse_limit(schedule, interface)
with self.subTest(msg=f"test_all_rates_less_than_session_max_rates - {name}"):
self._test_all_rates_less_than_session_max_rates(sessions, schedule)
with self.subTest(
msg=f"test_all_rates_greater_than_session_min_rates - {name}"
):
self._test_all_rates_greater_than_session_min_rates(
sessions, interface, schedule
)
with self.subTest(f"test_in_allowable_rates - {name}"):
self._test_in_allowable_rates(sessions, schedule, interface)
with self.subTest(f"test_energy_requested_not_exceeded - {name}"):
self._test_energy_requested_not_exceeded(sessions, schedule, interface)
with self.subTest(f"test_infrastructure_limits_satisfied - {name}"):
self._test_infrastructure_limits_satisfied(schedule, interface)
if assert_at_max:
with self.subTest(f"test_all_rates_at_max - {name}"):
self._test_all_rates_at_max(sessions, schedule, interface)
if uninterrupted:
with self.subTest(f"test_charging_not_interrupted - {name}"):
self._test_charging_not_interrupted(sessions, schedule, interface)
if self.algo.estimate_max_rate:
with self.subTest(f"test_max_rate_estimator_not_exceeded - {name}"):
self._test_max_rate_estimator_not_exceeded(sessions, schedule)
def _test_all_rates_less_than_evse_limit(self, schedule, interface) -> None:
for station_id, rates in schedule.items():
self.assertLessEqual(rates, interface.max_pilot_signal(station_id))
def _test_all_rates_less_than_session_max_rates(self, sessions, schedule) -> None:
for session in sessions:
station_id = session.station_id
self.assertLessEqual(schedule[station_id][0], session.max_rates[0])
def _test_all_rates_greater_than_session_min_rates(
self, sessions, interface, schedule
) -> None:
infrastructure = interface.infrastructure_info()
for session in sessions:
station_id = session.station_id
station_index = infrastructure.get_station_index(session.station_id)
threshold = (
infrastructure.min_pilot[station_index]
* infrastructure.voltages[station_index]
/ (60 / interface.period)
/ 1000
)
if session.remaining_demand > threshold:
self.assertGreaterEqual(schedule[station_id][0], session.min_rates[0])
else:
self.assertEqual(schedule[station_id][0], 0)
def _test_in_allowable_rates(self, sessions, schedule, interface) -> None:
for session in sessions:
station_id = session.station_id
(is_continuous, allowable,) = interface.allowable_pilot_signals(station_id)
if is_continuous:
self.assertGreaterEqual(schedule[station_id], allowable[0])
self.assertLessEqual(schedule[station_id], allowable[1])
else:
self.assertIn(schedule[station_id], allowable)
def _test_energy_requested_not_exceeded(
self, sessions, schedule, interface
) -> None:
for session in sessions:
station_id = session.station_id
self.assertLessEqual(
schedule[station_id], interface.remaining_amp_periods(session),
)
def _test_infrastructure_limits_satisfied(self, schedule, interface) -> None:
self.assertTrue(interface.is_feasible(schedule))
# noinspection PyMethodMayBeStatic
def _test_all_rates_at_max(
self, sessions, schedule, interface
) -> None: # pylint: disable=no-self-use
infrastructure = interface.infrastructure_info()
for session in sessions:
i = infrastructure.get_station_index(session.station_id)
ub = min(infrastructure.max_pilot[i], session.max_rates[0])
rates = schedule[session.station_id]
nptest.assert_almost_equal(rates, ub, decimal=4)
def _test_charging_not_interrupted(self, sessions, schedule, interface) -> None:
for session in sessions:
scheduled = schedule[session.station_id]
minimum_pilot = interface.min_pilot_signal(session.station_id)
remaining_energy = interface.remaining_amp_periods(session)
# Algorithm should not exceed remaining energy in order to meet minimum
# pilot.
if minimum_pilot < remaining_energy:
self.assertGreaterEqual(scheduled, minimum_pilot)
def _test_max_rate_estimator_not_exceeded(self, sessions, schedule) -> None:
for session in sessions:
self.assertLessEqual(
np.array(schedule[session.station_id]),
self.max_rate_estimation[session.session_id],
)
# Two Station Test Case
def two_station(
limit: float,
continuous: bool,
session_max_rate: float,
session_min_rate: float = 0,
remaining_energy: Optional[List[float]] = None,
estimated_departure: Optional[List[float]] = None,
) -> TestingInterface:
""" Two EVSEs with the same phase, one constraint, and allowable rates from 0 to 32
if continuous; integers between 8 and 32 if not. Also provides 2 sessions arriving
and departing at the same time, with the same energy demands. """
if continuous:
allowable: List[np.ndarray] = [np.array([0, 32])] * 2
else:
allowable: List[np.ndarray] = [np.array([0] + list(range(8, 33)))] * 2
if remaining_energy is None:
remaining_energy: List[float] = [3.3, 3.3]
network: InfrastructureDict = single_phase_single_constraint(
2, limit, allowable_pilots=allowable, is_continuous=np.array([continuous] * 2)
)
sessions: List[SessionDict] = session_generator(
num_sessions=2,
arrivals=[0] * 2,
departures=[11, 12],
estimated_departures=estimated_departure,
requested_energy=[3.3] * 2,
remaining_energy=remaining_energy,
min_rates=[session_min_rate] * 2,
max_rates=[session_max_rate] * 2,
)
data = {
"active_sessions": sessions,
"infrastructure_info": network,
"current_time": CURRENT_TIME,
"period": PERIOD,
}
return TestingInterface(data)
def big_three_phase_network(
num_sessions: int = 30, limit: float = 1000
) -> TestingInterface:
"""
Network is WAY over designed to deal with unbalance and ensure all EVs can charge
at their maximum rate.
"""
network = three_phase_balanced_network(num_sessions // 3, limit)
sessions = session_generator(
num_sessions=num_sessions,
arrivals=[0] * num_sessions,
departures=[2] * num_sessions,
requested_energy=[10] * num_sessions,
remaining_energy=[10] * num_sessions,
max_rates=[32] * num_sessions,
)
random.shuffle(sessions)
data = {
"active_sessions": sessions,
"infrastructure_info": network,
"current_time": CURRENT_TIME,
"period": PERIOD,
}
return TestingInterface(data)
class TestTwoStationsBase(BaseAlgorithmTest):
def setUp(self) -> None:
""" See BaseAlgorithmTest.setUp. """
self.algo = None
self.max_rate_estimation = {"0": 16, "1": 12}
@staticmethod
def _get_scenarios() -> List[Scenario]:
scenarios = []
# Test one limit where constraints are binding (40 A),
# one where constraint will be met exactly by charging at max (64 A),
# and one where constraints are not binding at all (80 A).
for limit in [40, 64, 80]:
# To ensure that both station limits and session limits are
# satisfied, consider several max_rate parameters.
# At 16 A, session limit is below station limit (32)
# At 32 A, session limit equals station limit
# At 40 A, session limit is greater than station limit.
for session_max_rate in [16, 32, 40]:
# Consider both continuous and discrete pilot signals
for session_min_rate in [0, 8]:
# The latter case below tests when the remaining amp periods is
# small enough to trigger a pilot signal going to 0 while there is
# still demand remaining.
for session_energy_demands in [[3.3, 3.3], [0.3, 0.05]]:
# Consider continuous and discrete EVSEs
for continuous in [True, False]:
# Consider both interruptable and uninterrupted charging
for uninterrupted in [True, False]:
for estimate_max_rate in [True, False]:
if (
limit < 64
or session_energy_demands == [0.3, 0.05]
or estimate_max_rate
):
assert_at_max = False
else:
assert_at_max = True
interface: TestingInterface = two_station(
limit,
continuous,
session_max_rate,
session_min_rate,
remaining_energy=session_energy_demands,
)
scenario_name = (
f"capacity: {limit}, "
f"session max: {session_max_rate}, "
f"session min: {session_min_rate}, "
f"continuous pilot: {continuous}, "
f"uninterrupted: {uninterrupted}, "
f"estimate_max_rate: {estimate_max_rate} "
)
scenarios.append(
Scenario(
scenario_name,
interface,
assert_at_max,
uninterrupted,
estimate_max_rate,
)
)
return scenarios
class TestThirtyStationsBase(BaseAlgorithmTest):
def setUp(self) -> None:
""" See BaseAlgorithmTest.setUp. """
self.algo = None
self.max_rate_estimation = {} # Don't use max_rate_estimation for this test.
@staticmethod
def _get_scenarios() -> List[Scenario]:
scenarios = []
for limit in [1500, 3200]:
interface: TestingInterface = big_three_phase_network(limit=limit)
scenario_name = f"capacity: {limit} "
scenarios.append(Scenario(scenario_name, interface, False, False, False))
return scenarios
# --------------------------------------------------------------------------------------
# Tests
#
# As the functionality tested in each class is evident from the class names,
# the setUp methods are left without docstrings (hence the noinspection
# comments).
# --------------------------------------------------------------------------------------
class TestTwoStationsMinRatesInfeasible(unittest.TestCase):
""" Check that error is thrown when minimum rates are not feasible. """
def test_sorted_min_rates_infeasible(self) -> None:
limit = 16
max_rate = 32
min_rate = 16
for continuous in [True, False]:
interface: TestingInterface = two_station(
limit, continuous, max_rate, min_rate
)
for algo_name, algo in algorithms.items():
scenario_name = (
f"algorithm: {algo_name}, "
f"capacity: {limit}, "
f"continuous pilot: {continuous}"
)
with self.subTest(msg=f"{scenario_name}"):
algo.register_interface(interface)
with self.assertRaisesRegex(
ValueError,
"Charging all sessions at "
"their lower bound is not "
"feasible.",
):
algo.run()
class TestTwoStationsFCFS(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(first_come_first_served)
class TestThirtyStationsFCFS(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(first_come_first_served)
class TestTwoStationsEDF(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(earliest_deadline_first)
class TestThirtyStationsEDF(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(earliest_deadline_first)
class TestTwoStationsLLF(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(least_laxity_first)
class TestThirtyStationsLLF(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(least_laxity_first)
class TestTwoStationsLCFS(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(last_come_first_served)
class TestThirtyStationsLCFS(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(last_come_first_served)
class TestTwoStationsLRPT(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(largest_remaining_processing_time)
class TestThirtyStationsLRPT(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = SortedSchedulingAlgo(largest_remaining_processing_time)
class TestTwoStationsRR(TestTwoStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = RoundRobin(first_come_first_served)
class TestThirtyStationsRR(TestThirtyStationsBase):
# noinspection PyMissingOrEmptyDocstring
def setUp(self) -> None:
super().setUp()
self.algo = RoundRobin(first_come_first_served)
class TestEarliestDeadlineFirstOrder(unittest.TestCase):
def test_estimated_departure_matches_real(self):
interface = two_station(limit=100, continuous=True, session_max_rate=32, estimated_departure=[11, 12])
sorted_sessions = earliest_deadline_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, "0")
self.assertEqual(sorted_sessions[1].session_id, "1")
def test_estimated_departure_does_not_match_real(self):
interface = two_station(limit=100, continuous=True, session_max_rate=32, estimated_departure=[12, 11])
sorted_sessions = earliest_deadline_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, "1")
self.assertEqual(sorted_sessions[1].session_id, "0")
class TestLeastLaxityFirstOrder(unittest.TestCase):
def test_estimated_departure_matches_real(self):
interface = two_station(limit=100, continuous=True, session_max_rate=32, estimated_departure=[11, 12])
sorted_sessions = least_laxity_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, "0")
self.assertEqual(sorted_sessions[1].session_id, "1")
def test_estimated_departure_does_not_match_real(self):
interface = two_station(limit=100, continuous=True, session_max_rate=32, estimated_departure=[12, 11])
sorted_sessions = least_laxity_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, "1")
self.assertEqual(sorted_sessions[1].session_id, "0")
del BaseAlgorithmTest
del TestTwoStationsBase
del TestThirtyStationsBase
# del TestTwoStationsMinRatesFeasibleBase
|
# -*- coding:utf-8 -*-
from collections import OrderedDict
import torch.nn as nn
from mmdet.models.utils import brick as vn_layer
class TinyYolov3(nn.Module):
def __init__(self, pretrained=None):
super(TinyYolov3, self).__init__()
# Network
layer0 = [
# backbone
OrderedDict([
('0_convbatch', vn_layer.Conv2dBatchLeaky(3, 16, 3, 1)),
('1_max', nn.MaxPool2d(2, 2)),
('2_convbatch', vn_layer.Conv2dBatchLeaky(16, 32, 3, 1)),
('3_max', nn.MaxPool2d(2, 2)),
('4_convbatch', vn_layer.Conv2dBatchLeaky(32, 64, 3, 1)),
]),
OrderedDict([
('5_max', nn.MaxPool2d(2, 2)),
('6_convbatch', vn_layer.Conv2dBatchLeaky(64, 128, 3, 1)),
]),
OrderedDict([
('7_max', nn.MaxPool2d(2, 2)),
('8_convbatch', vn_layer.Conv2dBatchLeaky(128, 256, 3, 1)),
]),
# backbone
OrderedDict([
('9_max', nn.MaxPool2d(2, 2)),
('10_convbatch', vn_layer.Conv2dBatchLeaky(256, 512, 3, 1)),
('10_zero_pad', nn.ZeroPad2d((0, 1, 0, 1))),
('11_max', nn.MaxPool2d(2, 1)),
('12_convbatch', vn_layer.Conv2dBatchLeaky(512, 1024, 3, 1)),
('13_convbatch', vn_layer.Conv2dBatchLeaky(1024, 256, 1, 1)),
]),
]
head0 = [
OrderedDict([
('14_convbatch', vn_layer.Conv2dBatchLeaky(256, 512, 3, 1)),
('15_conv', nn.Conv2d(512, 3 * (5 + 80), 1)),
]),
OrderedDict([
('18_convbatch', vn_layer.Conv2dBatchLeaky(256, 128, 1, 1)),
('19_upsample', nn.Upsample(scale_factor=2)),
]),
# stage5 / head
OrderedDict([
('21_convbatch', vn_layer.Conv2dBatchLeaky(256 + 128, 256, 3, 1)),
('22_conv', nn.Conv2d(256, 3 * (5 + 80), 1)),
]),
]
self.layer0 = nn.ModuleList([nn.Sequential(layer_dict) for layer_dict in layer0])
self.head0 = nn.ModuleList([nn.Sequential(layer_dict) for layer_dict in head0])
self.init_weights(pretrained)
def __modules_recurse(self, mod=None):
""" This function will recursively loop over all module children.
Args:
mod (torch.nn.Module, optional): Module to loop over; Default **self**
"""
if mod is None:
mod = self
for module in mod.children():
if isinstance(module, (nn.ModuleList, nn.Sequential)):
yield from self.__modules_recurse(module)
else:
yield module
def init_weights(self, pretrained=None):
if pretrained is not None:
weights = vn_layer.WeightLoader(pretrained)
for module in self.__modules_recurse():
try:
weights.load_layer(module)
print(f'Layer loaded: {module}')
if weights.start >= weights.size:
print(f'Finished loading weights [{weights.start}/{weights.size} weights]')
break
except NotImplementedError:
print(f'Layer skipped: {module.__class__.__name__}')
def forward(self, x):
stem = self.layers[0](x)
stage4 = self.layers[1](stem)
stage5 = self.layers[2](stage4)
stage6 = self.layers[3](stage5)
features = [stage6, stage5, stage4]
return features
if __name__ == '__main__':
import torch
# darknet 权重路径 https://github.com/AlexeyAB/darknet
tiny_yolov3_weights_path = '/home/pi/yolo权重/tiny-yolov3/yolov3-tiny.weights'
tiny_yolov3 = TinyYolov3(pretrained=tiny_yolov3_weights_path)
new_state_dict = OrderedDict()
for k, v in tiny_yolov3.state_dict().items():
if k.startswith('layer0'):
name = k.replace('layer0', 'backbone.layers')
elif k.startswith('head0'):
name = k.replace('head0', 'bbox_head.layers')
else:
if k.startswith('head1.0'):
name = k.replace('head1.0', 'bbox_head.layers.1')
elif k.startswith('head1.1'):
name = k.replace('head1.1', 'bbox_head.layers.2')
else:
name = k.replace('head1', 'bbox_head.layers')
new_state_dict[name] = v
data = {"state_dict": new_state_dict}
torch.save(data, '../../tiny_yolov3.pth')
|
<filename>lite/tests/unittest_py/model_test/run_model_test.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
URL = "url"
MODEL_NAME = "model_name"
FILE_NAME = "file_name"
INPUT_SHAPES = "input_shapes"
all_configs = []
MobileNetV1_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV1.tar.gz",
"model_name": "MobileNetV1",
"file_name": "MobileNetV1.tar.gz",
"input_shapes": ["1,3,224,224"]
}
MobileNetV2_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV2.tar.gz",
"model_name": "MobileNetV2",
"file_name": "MobileNetV2.tar.gz",
"input_shapes": ["1,3,224,224"]
}
MobileNetV3_large_x1_0_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV3_large_x1_0.tar.gz",
"model_name": "MobileNetV3_large_x1_0",
"file_name": "MobileNetV3_large_x1_0.tar.gz",
"input_shapes": ["1,3,224,224"]
}
MobileNetV3_small_x1_0_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/MobileNetV3_small_x1_0.tar.gz",
"model_name": "MobileNetV3_small_x1_0",
"file_name": "MobileNetV3_small_x1_0.tar.gz",
"input_shapes": ["1,3,224,224"]
}
ResNet50_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/ResNet50.tar.gz",
"model_name": "ResNet50",
"file_name": "ResNet50.tar.gz",
"input_shapes": ["1,3,224,224"]
}
ssdlite_mobilenet_v3_large_config = {
"url":
"https://paddle-inference-dist.bj.bcebos.com/AI-Rank/mobile/ssdlite_mobilenet_v3_large.tar.gz",
"model_name": "ssdlite_mobilenet_v3_large",
"file_name": "ssdlite_mobilenet_v3_large.tar.gz",
"input_shapes": ["1,3,320,320"]
}
all_configs.append(MobileNetV1_config)
all_configs.append(MobileNetV2_config)
all_configs.append(MobileNetV3_large_x1_0_config)
all_configs.append(MobileNetV3_small_x1_0_config)
all_configs.append(ResNet50_config)
all_configs.append(ssdlite_mobilenet_v3_large_config)
parser = argparse.ArgumentParser()
parser.add_argument("--target", help="set target, default=X86", default="X86")
args = parser.parse_args()
for config in all_configs:
input_info_str = ""
for input_shape in config[INPUT_SHAPES]:
input_info_str = input_info_str + " --input_shapes={}".format(
input_shape)
if args.target == "X86":
command = "python3.7 {}/model_test_base.py --target=X86 --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "Host":
command = "python3.7 {}/model_test_base.py --target=Host --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "ARM":
command = "python3.8 {}/model_test_base.py --target=ARM --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "OpenCL":
command = "python3.8 {}/model_test_base.py --target=OpenCL --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "Metal":
command = "python3.8 {}/model_test_base.py --target=Metal --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
print(command)
os.system(command)
|
<filename>python/dlxapi/models/expand_component.py
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dlxapi.configuration import Configuration
class ExpandComponent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'path': 'str',
'match': 'str',
'all_possible': 'bool',
'unique': 'bool',
'limit': 'int',
'offset': 'int',
'order_by': 'str'
}
attribute_map = {
'path': 'path',
'match': 'match',
'all_possible': 'allPossible',
'unique': 'unique',
'limit': 'limit',
'offset': 'offset',
'order_by': 'orderBy'
}
def __init__(self, path=None, match=None, all_possible=None, unique=None, limit=None, offset=None, order_by=None, _configuration=None): # noqa: E501
"""ExpandComponent - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._path = None
self._match = None
self._all_possible = None
self._unique = None
self._limit = None
self._offset = None
self._order_by = None
self.discriminator = None
if path is not None:
self.path = path
if match is not None:
self.match = match
if all_possible is not None:
self.all_possible = all_possible
if unique is not None:
self.unique = unique
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if order_by is not None:
self.order_by = order_by
@property
def path(self):
"""Gets the path of this ExpandComponent. # noqa: E501
path to the collection. example - items.fieldValues # noqa: E501
:return: The path of this ExpandComponent. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ExpandComponent.
path to the collection. example - items.fieldValues # noqa: E501
:param path: The path of this ExpandComponent. # noqa: E501
:type: str
"""
self._path = path
@property
def match(self):
"""Gets the match of this ExpandComponent. # noqa: E501
filter the collection. example - \"field.id:123-12321-321312-e3e21w\" # noqa: E501
:return: The match of this ExpandComponent. # noqa: E501
:rtype: str
"""
return self._match
@match.setter
def match(self, match):
"""Sets the match of this ExpandComponent.
filter the collection. example - \"field.id:123-12321-321312-e3e21w\" # noqa: E501
:param match: The match of this ExpandComponent. # noqa: E501
:type: str
"""
self._match = match
@property
def all_possible(self):
"""Gets the all_possible of this ExpandComponent. # noqa: E501
return all possible values in the collection even if they are not assigned to projects currently like all STATUS values. # noqa: E501
:return: The all_possible of this ExpandComponent. # noqa: E501
:rtype: bool
"""
return self._all_possible
@all_possible.setter
def all_possible(self, all_possible):
"""Sets the all_possible of this ExpandComponent.
return all possible values in the collection even if they are not assigned to projects currently like all STATUS values. # noqa: E501
:param all_possible: The all_possible of this ExpandComponent. # noqa: E501
:type: bool
"""
self._all_possible = all_possible
@property
def unique(self):
"""Gets the unique of this ExpandComponent. # noqa: E501
return only unique values in the collection # noqa: E501
:return: The unique of this ExpandComponent. # noqa: E501
:rtype: bool
"""
return self._unique
@unique.setter
def unique(self, unique):
"""Sets the unique of this ExpandComponent.
return only unique values in the collection # noqa: E501
:param unique: The unique of this ExpandComponent. # noqa: E501
:type: bool
"""
self._unique = unique
@property
def limit(self):
"""Gets the limit of this ExpandComponent. # noqa: E501
pagination - limit # noqa: E501
:return: The limit of this ExpandComponent. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ExpandComponent.
pagination - limit # noqa: E501
:param limit: The limit of this ExpandComponent. # noqa: E501
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ExpandComponent. # noqa: E501
pagination - offset # noqa: E501
:return: The offset of this ExpandComponent. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ExpandComponent.
pagination - offset # noqa: E501
:param offset: The offset of this ExpandComponent. # noqa: E501
:type: int
"""
self._offset = offset
@property
def order_by(self):
"""Gets the order_by of this ExpandComponent. # noqa: E501
order the values returned in the collection # noqa: E501
:return: The order_by of this ExpandComponent. # noqa: E501
:rtype: str
"""
return self._order_by
@order_by.setter
def order_by(self, order_by):
"""Sets the order_by of this ExpandComponent.
order the values returned in the collection # noqa: E501
:param order_by: The order_by of this ExpandComponent. # noqa: E501
:type: str
"""
self._order_by = order_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExpandComponent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExpandComponent):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExpandComponent):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.animation
import matplotlib.pyplot as plt
import numpy as np
from ..misc import NeuroKitWarning
def complexity_embedding(signal, delay=1, dimension=3, show=False):
"""Time-delay embedding of a signal
A dynamical system can be described by a vector of numbers, called its 'state', that aims to provide
a complete description of the system at some point in time. The set of all possible states is called
the 'state space'.
Takens's (1981) embedding theorem suggests that a sequence of measurements of a dynamic system includes
in itself all the information required to completely reconstruct the state space. Delay coordinate
embedding attempts to identify the state s of the system at some time t by searching the past history
of observations for similar states, and, by studying the evolution of similar states, infer information
about the future of the system.
How to visualize the dynamics of a system? A sequence of state values over time is called a trajectory.
Depending on the system, different trajectories can evolve to a common subset of state space called
an attractor. The presence and behavior of attractors gives intuition about the underlying dynamical
system. We can visualize the system and its attractors by plotting the trajectory of many different
initial state values and numerically integrating them to approximate their continuous time evolution
on discrete computers.
This function is adapted from `EntroPy <https://github.com/raphaelvallat/entropy>`_ and is equivalent
to the `delay_embedding()` function from 'nolds'.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003), or
to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding returns
an array with two columns corresponding to the original signal and its delayed (by Tau) version.
show : bool
Plot the reconstructed attractor.
Returns
-------
array
Embedded time-series, of shape (n_times - (order - 1) * delay, order)
See Also
------------
embedding_delay, embedding_dimension
Examples
---------
>>> import neurokit2 as nk
>>>
>>> # Basic example
>>> signal = [1, 2, 3, 2.5, 2.0, 1.5]
>>> embedded = nk.complexity_embedding(signal, delay = 2, dimension = 2, show=True) #doctest: +SKIP
>>>
>>> # Artifical example
>>> signal = nk.signal_simulate(duration=2, frequency=5, noise=0.01)
>>>
>>> embedded = nk.complexity_embedding(signal, delay=50, dimension=2, show=True) #doctest: +SKIP
>>> embedded = nk.complexity_embedding(signal, delay=50, dimension=3, show=True) #doctest: +SKIP
>>> embedded = nk.complexity_embedding(signal, delay=50, dimension=4, show=True) #doctest: +SKIP
>>>
>>> # Realistic example
>>> ecg = nk.ecg_simulate(duration=60*4, sampling_rate=200)
>>> signal = nk.ecg_rate(nk.ecg_peaks(ecg, sampling_rate=200)[0], sampling_rate=200, desired_length=len(ecg))
>>>
>>> embedded = nk.complexity_embedding(signal, delay=250, dimension=2, show=True) #doctest: +SKIP
>>> embedded = nk.complexity_embedding(signal, delay=250, dimension=3, show=True) #doctest: +SKIP
>>> embedded = nk.complexity_embedding(signal, delay=250, dimension=4, show=True) #doctest: +SKIP
References
-----------
- <NAME>., <NAME>., & <NAME>, <NAME>. (2003, April). A differential entropy based method
for determining the optimal embedding parameters of a signal. In 2003 IEEE International Conference
on Acoustics, Speech, and Signal Processing, 2003. Proceedings.(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
"""
N = len(signal)
# Sanity checks
if isinstance(delay, float):
warn("`delay` must be an integer. Running `int(delay)`", category=NeuroKitWarning)
delay = int(delay)
if isinstance(dimension, float):
warn("`dimension` must be an integer. Running `int(dimension)`", category=NeuroKitWarning)
dimension = int(dimension)
if dimension * delay > N:
raise ValueError(
"NeuroKit error: complexity_embedding(): dimension * delay should be lower than",
" the length of the signal.",
)
if delay < 1:
raise ValueError("NeuroKit error: complexity_embedding(): 'delay' has to be at least 1.")
Y = np.zeros((dimension, N - (dimension - 1) * delay))
for i in range(dimension):
Y[i] = signal[i * delay : i * delay + Y.shape[1]]
embedded = Y.T
if show is True:
_embedding_plot(embedded)
return embedded
# =============================================================================
# Internals
# =============================================================================
def _embedding_plot(embedded):
"""Plot reconstructed attractor.
The input for this function must be obtained via `nk.complexity_embedding()`
"""
if embedded.shape[1] == 2:
figure = _embedding_plot_2D(embedded)
elif embedded.shape[1] == 3:
figure = _embedding_plot_3D(embedded)
else:
figure = _embedding_plot_4D(embedded)
return figure
# =============================================================================
# Internal plots
# =============================================================================
def _embedding_plot_2D(embedded):
return plt.plot(embedded[:, 0], embedded[:, 1], color="#3F51B5")
def _embedding_plot_3D(embedded):
return _plot_3D_colored(
x=embedded[:, 0], y=embedded[:, 1], z=embedded[:, 2], color=embedded[:, 2], rotate=False
)
def _embedding_plot_4D(embedded):
return _plot_3D_colored(
x=embedded[:, 0], y=embedded[:, 1], z=embedded[:, 2], color=embedded[:, 3], rotate=False
)
# =============================================================================
# Plotting
# =============================================================================
def _plot_3D_colored(x, y, z, color=None, rotate=False):
if color is None:
color = z
# Create a set of line segments
points = np.array([x, y, z]).T.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# Color
norm = plt.Normalize(color.min(), color.max())
cmap = plt.get_cmap("plasma")
colors = cmap(norm(color))
# Plot
fig = plt.figure()
ax = plt.axes(projection="3d")
for i in range(len(x) - 1):
seg = segments[i]
(l,) = ax.plot(seg[:, 0], seg[:, 1], seg[:, 2], color=colors[i])
l.set_solid_capstyle("round")
if rotate is True:
fig = _plot_3D_colored_rotate(fig, ax)
return fig
def _plot_3D_colored_rotate(fig, ax):
def rotate(angle):
ax.view_init(azim=angle)
fig = matplotlib.animation.FuncAnimation(
fig, rotate, frames=np.arange(0, 361, 1), interval=10, cache_frame_data=False
)
return fig
|
from sigman import analyzer
import numpy as np
import pickle
from sigman.analyzer import InvalidArgumentError
procedure_type = 'points'
description = (
"""
Procedure searching for dicrotic notches based on BP and ECG signals
by using a trained neural network.
It also makes use of SBP points to narrow the searching area.
""")
author = 'kcybulski'
arguments = {
'net':"Path to the pickled neural network file",
'focus_range':("Time range from an SBP point in which DNs are searched "
"for; two numbers separated by a comma."),
'test_every':("Distance between points tested within focus_range. "
"The less the more accurate.")}
default_arguments = {'net':'procedures/default_dn_net.pickle',
'focus_range':'0.1,0.5', 'test_every':0.005}
output_type = 'dn'
required_waves = ['bp','ecg']
required_points = ['sbp']
class Temp_Network():
def __init__(self,network):
self.input_point_count = network.shape[0]
self.sample_length = network.sample_length
self.detection_point_offset = network.detection_point
self.w = network.w
self.b = network.b
self.activation = np.tanh
def forward(self, x):
a = x
for i in range(len(self.w)):
z = a.dot(self.w[i])+self.b[i]
a = self.activation(z)
return a
def interpret_arguments(waves, points, arguments):
# net
try:
net = pickle.load(open(arguments['net'], 'rb'))
except:
raise InvalidArgumentError("Invalid neural net file")
# focus_range
try:
focus_range = [float(string) for string in
arguments['focus_range'].split(',')]
except:
raise InvalidArgumentError("Invalid focus range")
if len(focus_range) != 2:
raise InvalidArgumentError("Invalid number of focus range values")
try:
test_every = float(arguments['test_every'])
except:
raise InvalidArgumentError("Invalid `test_every` value")
return {
'net':net,
'focus_range':focus_range,
'test_every':test_every}
def _generate_input_data_sample(bp_line, ecg_line, test_point, sample_length,
detection_point_offset, input_point_count):
"""Generates a single set of input data to analyse with the neural net."""
begin_time = test_point - detection_point_offset
end_time = begin_time + sample_length
bp_data = bp_line.data_slice(begin_time, end_time,
value_count = int(input_point_count/2))
ecg_data = ecg_line.data_slice(begin_time, end_time,
value_count = int(input_point_count/2))
# Normalisation
bp_data-=np.min(bp_data)
bp_data/=np.max(bp_data)
bp_data*=2
bp_data-=1
ecg_data-=np.min(ecg_data)
ecg_data/=np.max(ecg_data)
ecg_data*=2
ecg_data-=1
input_data = np.concatenate((bp_data,ecg_data))
return input_data
def procedure(waves, points, begin_time, end_time, arguments):
ecg_line = waves['ecg']
bp_line = waves['bp']
sbp_points = points['sbp']
focus_range = arguments['focus_range']
test_every = arguments['test_every']
net = arguments['net']
sample_length = net.sample_length
detection_point_offset = net.detection_point_offset
input_point_count = net.input_point_count
sbp_x, sbp_y = sbp_points.data_slice(begin_time, end_time, left_offset = 1)
dn_x = []
dn_y = []
for helper_x in sbp_x:
if helper_x + focus_range[0] - sample_length < begin_time:
continue
if helper_x + focus_range[1] - detection_point_offset + sample_length > end_time:
break
focus_begin_time = helper_x + focus_range[0]
focus_end_time = helper_x + focus_range[1]
max_val = -1
max_x = 0
for test_x in np.arange(focus_begin_time, focus_end_time, test_every):
input_data = _generate_input_data_sample(
bp_line, ecg_line, test_x,
sample_length, detection_point_offset,
input_point_count)
val = net.forward(input_data)[0][0]
if val > max_val:
max_val = val
max_x = test_x
dn_x.append(max_x)
dn_y.append(bp_line.value_at(max_x))
dn_x = np.array(dn_x)
dn_y = np.array(dn_y)
return dn_x, dn_y
def execute(waves, points, begin_time, end_time, arguments):
arguments = interpret_arguments(waves, points, arguments)
return procedure(waves, points, begin_time, end_time, arguments)
|
import sys
from termcolor import colored, cprint
def debug(*objects): print(objects)
# def debug(*objects): 1
# dims = [300, 275]
# ur_pos = [150, 150]
# g_pos = [185, 100]
# dist = 500
# dims = [42, 59]
# ur_pos = [34, 44]
# g_pos = [6, 34]
# dist = 5000
################################
'''
## Forewords:
+ BUG report: I'm not an advanced Python user (I didn't know Python much, before FooBar challenges),
but I believe there is a bug in FooBar that global variables are shared across 10 test cases.
Please see at the end to see more details.
+ Luckily, the test cases don't include an edge case that `distance` = 10_000, and `dimensions` = (2, 3).
## Algorithm summary:
+ This bouncing problem is similar to infinite mirrors effect. This link can illustrate this very well:
https://gamedev.stackexchange.com/a/154482/117081
+ So, the solution to this bouncing problem can be simplified to couting all those mirrored rooms,
which are inside a circle with the radius equal to `distance` input.
'''
import math
def set_global_variables(dims, ur_pos, guard_pos, dist):
global DIMS, UR_POS, GUARD_POS, DIST_SQUARE, HIT_GUARD_ANGLES, HIT_YOU_ANGLES
DIMS = dims
UR_POS = ur_pos
GUARD_POS = guard_pos
DIST_SQUARE = dist * dist
HIT_GUARD_ANGLES = dict()
HIT_YOU_ANGLES = dict()
def answer(dims, ur_pos, guard_pos, dist):
set_global_variables(dims, ur_pos, guard_pos, dist)
room_x_count = int(dist / dims[0]) + 1
room_y_count = int(dist / dims[1]) + 1
for room_y_idx in range(0, room_y_count + 1):
for room_x_idx in range(0, room_x_count + 1):
quadrants = set([ (room_x_idx, room_y_idx), (room_x_idx, -room_y_idx),
(-room_x_idx, room_y_idx), (-room_x_idx, -room_y_idx) ])
for room_index in quadrants:
check_guard_in_mirrored_room(room_index)
return len(HIT_GUARD_ANGLES)
def check_guard_in_mirrored_room(room_index):
mirr_guard_pos = get_mirror_pos(room_index, GUARD_POS)
mirr_ur_pos = get_mirror_pos(room_index, UR_POS)
guard_in_circle = is_in_circle(mirr_guard_pos)
your_in_circle = is_in_circle(mirr_ur_pos)
mirr_guard_angle = get_angle(mirr_guard_pos)
mirr_ur_angle = get_angle(mirr_ur_pos)
if your_in_circle and tuple(UR_POS) != mirr_ur_pos:
HIT_YOU_ANGLES[mirr_ur_angle] = True
if guard_in_circle and mirr_guard_angle not in HIT_YOU_ANGLES:
HIT_GUARD_ANGLES[mirr_guard_angle] = True
def get_mirror_pos(room_index, orig_pos):
room_x_pos = room_index[0] * DIMS[0]
room_y_pos = room_index[1] * DIMS[1]
mirr_x = orig_pos[0]
mirr_y = orig_pos[1]
if room_index[0] & 1: # check is odd
mirr_x = DIMS[0] - orig_pos[0] # flip position as in mirror
if room_index[1] & 1:
mirr_y = DIMS[1] - orig_pos[1]
return (mirr_x + room_x_pos, mirr_y + room_y_pos)
def is_in_circle(x_pos):
dx = x_pos[0] - UR_POS[0]
dy = x_pos[1] - UR_POS[1]
return (dx*dx + dy*dy <= DIST_SQUARE)
def get_angle(x_pos):
return math.atan2((x_pos[1] - UR_POS[1]), (x_pos[0] - UR_POS[0]))
'''
Below is the codes to reproduce the bug in which global variables are shared in 10 test cases in the same session.
Personal note:
+ I used global variables just to avoid passing around the test's inputs.
I'm not an advanced Python user so there may be a better approach than using global variables.
+ It was a painful time for me before finding the bug. My codes always failed at test 10. After a massive amount
of retries and brute force, I found the expected result of test 10 was the same value as my codes' result
running in my localhost but they still always failed! After more massive amount of retries, and a bit of
luck, I found it was because of the bug.
+ When my test always failed at test 10, I even suspected that FooBar sandbox somehow used less precision
in math.atan2 intentionally to trick me (I then confirmed it doesn't); or I even suspected that the hashing algorithm
in dict() in FooBar sandbox was broken, having collisions so the len of dict() went 'random'.
'''
'''
global_increment_count = 0
def answer(dims, ur_pos, guard_pos, dist):
global global_increment_count
test_case_4 = ( dist == 25 and dims[0] == 1000 and dims[1] == 1000
and ur_pos[0] == 250 and ur_pos[1] == 25
and guard_pos[0] == 257 and guard_pos[1] == 49
)
global_increment_count += 1
# force all test cases (except case 4) to always fail
if not test_case_4: return -1
# global_increment_count increments in each test so it reaches to 4 when in test 4.
if global_increment_count == 4:
return -1 # enforce test 4 to fail when `global_increment_count` reaches 4
else:
return 1 # successful result of test 4. If test 4 passes, then `global_increment_count` does not reach 4
'''
################################
result = answer( dims, ur_pos, g_pos, dist )
debug('RESULT: ', result)
|
import os
import uuid
import logging
from typing import Union
from pydano.cardano_cli import CardanoCli
from pydano.cardano_temp import tempdir
from pydano.query.protocol_param import ProtocolParam
from pydano.transaction.transaction_config import TransactionConfig
from pydano.transaction.miniting_config import MintingConfig
class Transaction(CardanoCli):
def __init__(
self,
transaction_config: Union[TransactionConfig, MintingConfig],
testnet: bool = True,
):
self.transaction_config = transaction_config
self.transaction_uuid = str(uuid.uuid4())
super().__init__(testnet)
"""
prepare_raw_transaction: Prepare raw transaction mostly to calculate block chain fees
@returns return the file location, which contains draft transaction to calculate fees.
"""
def prepare_transaction(self) -> str:
self.prepared_transaction = ""
pass
"""
run_transaction: This actually calls the prepare_transaction to prepare command
and run it using subprocess
"""
def run_transaction(self):
self.prepare_transaction()
logging.debug(f"Running transaction: {' '.join(self.prepared_transaction)}")
return self.run_command(self.prepared_transaction)
class RawTransaction(Transaction):
transaction_file = None
def run_raw_transaction(self):
self.run_transaction()
if self.transaction_file == None:
raise ValueError("Intial transaction did not complete")
calc_fee = CalculateMinFeeTransaction(
self.transaction_config, self.transaction_file, testnet=self.testnet
)
fees_command_stdout = calc_fee.run_transaction()
min_fees = fees_command_stdout.stdout.split()[0].strip()
if type(min_fees) == bytes:
min_fees = min_fees.decode()
if not min_fees.isnumeric():
raise ValueError("Error getting minfees")
min_fees = int(min_fees)
self.transaction_config.fees = min_fees
self.run_transaction()
return self
class SignTransaction(Transaction):
def __init__(self, transaction: Transaction, signing_key: str):
super().__init__(transaction.transaction_config, transaction.testnet)
self.raw_transaction = transaction.transaction_file
self.transaction_uuid = transaction.transaction_uuid
self.signing_key = signing_key
@property
def base_command(self):
return ["cardano-cli", "transaction", "sign"]
def prepare_transaction(self):
base_transaction = self.base_command
base_transaction.append("--tx-body-file")
base_transaction.append(self.raw_transaction)
if type(self.signing_key) == list:
for key in self.signing_key:
base_transaction.append("--signing-key-file")
base_transaction.append(key)
else:
base_transaction.append("--signing-key-file")
base_transaction.append(self.signing_key)
base_transaction.append("--out-file")
self.signed_file = os.path.join(tempdir.name, f"{self.transaction_uuid}.signed")
base_transaction.append(self.signed_file)
self.prepared_transaction = base_transaction
class SubmitTransaction(Transaction):
def __init__(self, signed_transaction: SignTransaction):
super().__init__(
signed_transaction.transaction_config, signed_transaction.testnet
)
self.raw_transaction = signed_transaction.signed_file
self.transaction_uuid = signed_transaction.transaction_uuid
@property
def base_command(self):
return ["cardano-cli", "transaction", "submit"]
def prepare_transaction(self):
base_transaction = self.base_command
base_transaction = self.apply_blockchain(base_transaction)
base_transaction.append("--tx-file")
base_transaction.append(self.raw_transaction)
self.prepared_transaction = base_transaction
class SignAndSubmit:
def submit(self, signing_key):
print("Signing Transaction")
st = SignTransaction(self, signing_key)
st.run_transaction()
print("Submitting Transaction")
st = SubmitTransaction(st)
st.run_transaction()
class BuildTransaction(Transaction, SignAndSubmit):
raw = False
minting = False
def __init__(
self,
transaction_config: Union[TransactionConfig, MintingConfig],
testnet: bool = True,
):
super().__init__(transaction_config, testnet)
self.protocol_file = ProtocolParam(testnet).protocol_params()
@property
def base_command(self):
return ["cardano-cli", "transaction", "build", "--alonzo-era"]
def build_base_transaction(self):
command = self.base_command
if not self.raw:
command = self.apply_blockchain(command)
else:
command.extend(["--fee", str(self.transaction_config.fees)])
command.extend(self.transaction_config.input_utxos_args())
command.extend(self.transaction_config.out_tx_args())
if self.minting:
command.extend(self.transaction_config.mint_args())
return command
def build_output_file(self, command, version="draft"):
command.append("--out-file")
transaction_file = os.path.join(
tempdir.name, f"{self.transaction_uuid}.{version}"
)
self.transaction_file = transaction_file
command.append(transaction_file)
return command
def prepare_transaction(self):
base_transaction = self.build_base_transaction()
if not self.raw:
base_transaction.append("--change-address")
base_transaction.append(self.transaction_config.change_address)
base_transaction.append("--protocol-params-file")
base_transaction.append(self.protocol_file)
complete_trans = self.build_output_file(base_transaction)
self.prepared_transaction = complete_trans
class CalculateMinFeeTransaction(Transaction):
def __init__(
self,
transaction_config: Union[TransactionConfig, MintingConfig],
raw_transaction: str,
testnet: bool = True,
):
super().__init__(transaction_config, testnet)
self.raw_transaction = raw_transaction
self.protocol_file = ProtocolParam(testnet).protocol_params()
@property
def base_command(self):
return ["cardano-cli", "transaction", "calculate-min-fee"]
def prepare_transaction(self):
command = self.base_command
command.append("--tx-body-file")
command.append(self.raw_transaction)
len_output_txs = len(self.transaction_config.output_txs)
command.extend(
[
"--tx-in-count",
"1",
"--tx-out-count",
str(len_output_txs),
"--witness-count",
str(1 + len_output_txs),
]
)
command = self.apply_blockchain(command)
command.append("--protocol-params-file")
command.append(self.protocol_file)
self.prepared_transaction = command
class BuildRawTransaction(BuildTransaction, RawTransaction, SignAndSubmit):
raw = True
@property
def base_command(self):
return ["cardano-cli", "transaction", "build-raw", "--alonzo-era"]
class MintRawTransaction(BuildTransaction, RawTransaction, SignAndSubmit):
raw = True
minting = True
@property
def base_command(self):
return ["cardano-cli", "transaction", "build-raw", "--alonzo-era"]
|
import uuid
import pytest
from aiobaro import __version__
def test_version():
assert __version__ == "0.1.0"
@pytest.mark.asyncio
async def test_login_info(matrix_client):
result = await matrix_client.login_info()
assert result.ok
@pytest.mark.asyncio
async def test_register(matrix_client):
result = await matrix_client.register(
"test_user", password="<PASSWORD>"
)
assert result.ok
@pytest.mark.asyncio
async def test_login(matrix_client):
result = await matrix_client.login("test_user", password="<PASSWORD>")
assert result.ok
@pytest.mark.asyncio
async def test_room_create(matrix_client, seed_data):
room_alias_name = None
name = "Room"
topic = None
room_version = None
federate = True
is_direct = False
preset = None
invite = None
initial_state = None
power_level_override = None
result = await matrix_client.room_create(
name=name,
room_alias_name=room_alias_name,
topic=topic,
room_version=room_version,
federate=federate,
is_direct=is_direct,
preset=preset,
invite=invite,
initial_state=initial_state,
power_level_override=power_level_override,
)
assert result.ok
assert result.json().get("room_id")
@pytest.mark.asyncio
async def test_sync(matrix_client):
since = None
timeout = None
data_filter = None
full_state = None
set_presence = None
result = await matrix_client.sync(
since=since,
timeout=timeout,
data_filter=data_filter,
full_state=full_state,
set_presence=set_presence,
)
assert result.ok
@pytest.mark.asyncio
async def test_room_send(matrix_client, seed_data):
room_id = seed_data.room.json()["room_id"]
event_type = "m.aiobaro.text.msg"
body = {"body": "hello"}
tx_id = str(uuid.uuid1())
result = await matrix_client.room_send(
room_id,
event_type,
body,
tx_id,
)
assert result.ok
assert result.json()["event_id"]
@pytest.mark.asyncio
async def test_room_get_event(matrix_client, seed_data):
# Create an event
room_id = seed_data.room.json()["room_id"]
result = await matrix_client.room_send(
room_id,
"m.aiobaro.text.msg",
{"body": "TEST 0"},
str(uuid.uuid1()),
)
assert result.ok
event_id = result.json()["event_id"]
# get the event
result = await matrix_client.room_get_event(room_id, event_id)
assert result.ok
assert result.json()["content"]["body"] == "TEST 0"
@pytest.mark.asyncio
async def test_room_put_state(matrix_client, seed_data):
result = await matrix_client.room_put_state(
room_id=seed_data.room.json()["room_id"],
event_type="m.aiobaro.state.event.tests",
body={"test.key": "test.value"},
state_key="state-key-test",
)
assert result.ok
assert result.json()["event_id"]
@pytest.mark.asyncio
async def test_room_get_state_event(matrix_client, seed_data):
room_id = seed_data.room.json()["room_id"]
event_type = "m.aiobaro.state.event.tests"
state_key = "state-key-test"
result = await matrix_client.room_put_state(
room_id=room_id,
event_type="m.aiobaro.state.event.tests",
body={"test.key": "test.value.1"},
state_key=state_key,
)
assert result.ok
result = await matrix_client.room_get_state_event(
room_id, event_type, state_key=state_key
)
assert result.ok
assert result.json()["test.key"] == "test.value.1"
@pytest.mark.asyncio
async def test_room_get_state(matrix_client, seed_data):
room_id = seed_data.room.json()["room_id"]
result = await matrix_client.room_get_state(room_id)
assert result.ok
async def test_room_redact(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_redact(*args, **kwargs)
assert result.ok
async def test_room_kick(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_kick(*args, **kwargs)
assert result.ok
async def test_room_ban(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_ban(*args, **kwargs)
assert result.ok
async def test_room_unban(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_unban(*args, **kwargs)
assert result.ok
async def test_room_invite(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_invite(*args, **kwargs)
assert result.ok
async def test_join(matrix_client):
args, kwargs = [], {}
result = await matrix_client.join(*args, **kwargs)
assert result.ok
async def test_room_leave(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_leave(*args, **kwargs)
assert result.ok
async def test_room_forget(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_forget(*args, **kwargs)
assert result.ok
async def test_room_messages(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_messages(*args, **kwargs)
assert result.ok
async def test_keys_upload(matrix_client):
args, kwargs = [], {}
result = await matrix_client.keys_upload(*args, **kwargs)
assert result.ok
async def test_keys_query(matrix_client):
args, kwargs = [], {}
result = await matrix_client.keys_query(*args, **kwargs)
assert result.ok
async def test_keys_claim(matrix_client):
args, kwargs = [], {}
result = await matrix_client.keys_claim(*args, **kwargs)
assert result.ok
@pytest.mark.asyncio
async def test_to_device(matrix_client, seed_data):
devices_info = seed_data.devices
device_id = devices_info.json()["devices"][0]["device_id"]
event_type = "m.new_device"
tx_id = str(uuid.uuid1())
user = seed_data.users[0].json()
content = {
"messages": {
user["user_id"]: {device_id: {"example_content_key": "value"}}
}
}
result = await matrix_client.to_device(event_type, content, tx_id)
assert result.ok
@pytest.mark.asyncio
async def test_devices(matrix_client, seed_data):
result = await matrix_client.devices()
assert result.ok
assert len(result.json()["devices"]) == 2
@pytest.mark.asyncio
async def test_update_device(matrix_client, seed_data):
devices_info = seed_data.devices
assert (
devices_info.json()["devices"][0]["display_name"]
== "Seed_user_1' device"
)
device_id = devices_info.json()["devices"][0]["device_id"]
content = {"display_name": "<NAME>"}
result = await matrix_client.update_device(device_id, content)
assert result.ok
devices = await matrix_client.devices()
assert list(
filter(
lambda d: d["display_name"] == "<NAME>",
devices.json()["devices"],
)
)
@pytest.mark.asyncio
async def test_delete_devices(matrix_client, seed_data):
devices_to_delete = [seed_data.devices.json()["devices"][0]["device_id"]]
user = await matrix_client.whoami()
assert user.ok
auth = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": user.json()["user_id"]},
"password": "<PASSWORD>",
}
result = await matrix_client.delete_devices(devices_to_delete, auth=auth)
assert result.ok
devices = await matrix_client.devices()
assert devices.ok
assert not set(
map(lambda d: d["device_id"], devices.json()["devices"])
) & set(devices_to_delete)
async def test_joined_members(matrix_client):
args, kwargs = [], {}
result = await matrix_client.joined_members(*args, **kwargs)
assert result.ok
async def test_joined_rooms(matrix_client):
args, kwargs = [], {}
result = await matrix_client.joined_rooms(*args, **kwargs)
assert result.ok
async def test_room_resolve_alias(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_resolve_alias(*args, **kwargs)
assert result.ok
async def test_room_typing(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_typing(*args, **kwargs)
assert result.ok
async def test_update_receipt_marker(matrix_client):
args, kwargs = [], {}
result = await matrix_client.update_receipt_marker(*args, **kwargs)
assert result.ok
async def test_room_read_markers(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_read_markers(*args, **kwargs)
assert result.ok
async def test_content_repository_config(matrix_client):
args, kwargs = [], {}
result = await matrix_client.content_repository_config(*args, **kwargs)
assert result.ok
async def test_upload(matrix_client):
args, kwargs = [], {}
result = await matrix_client.upload(*args, **kwargs)
assert result.ok
async def test_download(matrix_client):
args, kwargs = [], {}
result = await matrix_client.download(*args, **kwargs)
assert result.ok
async def test_thumbnail(matrix_client):
args, kwargs = [], {}
result = await matrix_client.thumbnail(*args, **kwargs)
assert result.ok
@pytest.mark.asyncio
async def test_profile_set_displayname(matrix_client):
await test_register(matrix_client)
await test_login(matrix_client)
user_id = "@test_user:baro"
display_name = "Test USER"
result = await matrix_client.profile_set_displayname(user_id, display_name)
assert result.ok
@pytest.mark.asyncio
async def test_profile_get_displayname(matrix_client):
await test_profile_set_displayname(matrix_client)
user_id = "@test_user:baro"
result = await matrix_client.profile_get_displayname(user_id)
assert result.ok
assert result.json().get("displayname") == "Test USER"
@pytest.mark.asyncio
async def test_profile_set_avatar(matrix_client):
await test_register(matrix_client)
await test_login(matrix_client)
user_id = "@test_user:baro"
avatar_url = "mxc://matrix.org/avatar_url"
result = await matrix_client.profile_set_avatar(user_id, avatar_url)
assert result.ok
@pytest.mark.asyncio
async def test_profile_get_avatar(matrix_client):
await test_profile_set_avatar(matrix_client)
user_id = "@test_user:baro"
result = await matrix_client.profile_get_avatar(user_id)
assert result.ok
assert result.json().get("avatar_url") == "mxc://matrix.org/avatar_url"
@pytest.mark.asyncio
async def test_profile_get(matrix_client):
await test_register(matrix_client)
await test_login(matrix_client)
user_id = "@test_user:baro"
result = await matrix_client.profile_get(user_id)
assert result.ok
assert result.json().get("displayname") == "test_user"
@pytest.mark.asyncio
async def test_set_presence(matrix_client):
await test_register(matrix_client)
await test_login(matrix_client)
user_id = "@test_user:baro"
presence = "online"
result = await matrix_client.set_presence(user_id, presence)
assert result.ok
@pytest.mark.asyncio
async def test_get_presence(matrix_client):
await test_set_presence(matrix_client)
user_id = "@test_user:<PASSWORD>o"
result = await matrix_client.get_presence(user_id)
assert result.ok
assert result.json().get("presence") == "online"
@pytest.mark.asyncio
async def test_whoami(matrix_client):
await test_register(matrix_client)
await test_login(matrix_client)
result = await matrix_client.whoami()
assert result.ok
assert result.json().get("user_id")
async def test_room_context(matrix_client):
args, kwargs = [], {}
result = await matrix_client.room_context(*args, **kwargs)
assert result.ok
async def test_upload_filter(matrix_client):
args, kwargs = [], {}
result = await matrix_client.upload_filter(*args, **kwargs)
assert result.ok
async def test_set_pushrule(matrix_client):
args, kwargs = [], {}
result = await matrix_client.set_pushrule(*args, **kwargs)
assert result.ok
async def test_delete_pushrule(matrix_client):
args, kwargs = [], {}
result = await matrix_client.delete_pushrule(*args, **kwargs)
assert result.ok
async def test_enable_pushrule(matrix_client):
args, kwargs = [], {}
result = await matrix_client.enable_pushrule(*args, **kwargs)
assert result.ok
async def test_set_pushrule_actions(matrix_client):
args, kwargs = [], {}
result = await matrix_client.set_pushrule_actions(*args, **kwargs)
assert result.ok
@pytest.mark.asyncio
async def test_logout(matrix_client):
result = await matrix_client.logout()
assert result.ok
|
<gh_stars>10-100
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc
def _make_frame(tc):
schema = [('name',str), ('age', int), ('tenure', int), ('phone', str)]
rows = [['Fred', 39, 16, '555-1234'], ['Susan', 33, 3, '555-0202'], ['Thurston', 65, 26, '555-4510'], ['Judy', 44, 14, '555-2183']]
frame = tc.frame.create(rows, schema)
return frame
def test_take_python_backend(tc):
frame = _make_frame(tc)
data1 = frame.take(2, columns=['name', 'phone'])
assert(data1 == [['Fred', '555-1234'], ['Susan', '555-0202']])
data2 = frame.take(2, offset=2)
assert(data2 == [['Thurston', 65, 26, '555-4510'], ['Judy', 44, 14, '555-2183']])
data3 = frame.take(2, offset=2, columns=['name', 'tenure'])
assert(data3 == [['Thurston', 26], ['Judy', 14]])
data4 = frame.take(0, offset=2, columns=['name', 'tenure'])
assert(data4 == [])
data5 = frame.take(10)
assert(data5 == [[u'Fred', 39, 16, u'555-1234'], [u'Susan', 33, 3, u'555-0202'], [u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data6 = frame.take(3, offset=3)
assert(data6 == [[u'Judy', 44, 14, u'555-2183']])
data7 = frame.take(3, offset=3, columns=['name', 'tenure'])
assert(data7 == [['Judy', 14]])
data8 = frame.take(2, offset=6, columns=['name', 'tenure']) # offset beyond
assert(data8 == [])
def test_take_scala_backend(tc):
frame = _make_frame(tc)
frame._scala
data1 = frame.take(2, columns=['name', 'phone'])
assert(data1 == [[u'Fred', u'555-1234'], [u'Susan', u'555-0202']])
data2 = frame.take(2, offset=2)
assert(data2 == [[u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data3 = frame.take(2, offset=2, columns=['name', 'tenure'])
assert(data3 == [[u'Thurston', 26], [u'Judy', 14]])
data4 = frame.take(0, offset=2, columns=['name', 'tenure'])
assert(data4 == [])
data5 = frame.take(10)
assert(data5 == [[u'Fred', 39, 16, u'555-1234'], [u'Susan', 33, 3, u'555-0202'], [u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data6 = frame.take(3, offset=3)
assert(data6 == [[u'Judy', 44, 14, u'555-2183']])
data7 = frame.take(3, offset=3, columns=['name', 'tenure'])
assert(data7 == [['Judy', 14]])
data8 = frame.take(2, offset=6, columns=['name', 'tenure']) # offset beyond
assert(data8 == [])
def test_take_python_backend_negative(tc):
frame = _make_frame(tc)
try:
frame.take(-1)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
try:
frame.take(3, offset=-10)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
def test_take_scala_backend_negative(tc):
frame = _make_frame(tc)
frame._scala
try:
frame.take(-1)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
try:
frame.take(3, offset=-10)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
|
import logging
import random
import re
import tempfile
import uuid
from enum import Enum, auto
from importlib import import_module
from pathlib import Path
from typing import List, Callable, Any, Dict, Union, Optional
import attr
from configuror import Config
from fake_useragent import UserAgent, FakeUserAgentError
from .message_pack import datetime_encoder, datetime_decoder
logger = logging.getLogger('scalpel')
def check_value_greater_or_equal_than_0(_, attribute: attr.Attribute, value: int) -> None:
if value < 0:
message = f'{attribute.name} must be a positive integer'
logger.exception(message)
raise ValueError(message)
def check_max_delay_greater_or_equal_than_min_delay(instance: 'Configuration', attribute: attr.Attribute,
value: int) -> None:
if instance.min_request_delay > value:
message = f'{attribute.name} must be greater or equal than min_request_delay'
logger.exception(message)
raise ValueError(message)
def check_file_presence(_, attribute: attr.Attribute, filename: str) -> None:
path = Path(filename)
if not path.exists():
message = f'File {filename} does not exist'
logger.exception(f'attribute {attribute.name} does not have a valid path: {message}')
raise FileNotFoundError(message)
def check_driver_presence(config: 'Configuration', attribute: attr.Attribute, filename: str) -> None:
if filename in ['chromedriver', 'geckodriver']:
return
check_file_presence(config, attribute, filename)
def validate_robots_folder(_, attribute: attr.Attribute, path: Path) -> None:
if not path.exists():
message = f'{attribute.name} does not exist'
logger.exception(message)
raise FileNotFoundError(message)
dummy_file = path / 'dummy_file'
try:
dummy_file.write_text('hello')
except PermissionError:
logger.exception(f'Cannot write file in {path}')
raise
try:
dummy_file.read_text()
except PermissionError:
logger.exception(f'Cannot read file in {path}')
raise
dummy_file.unlink()
def check_file_can_be_created(_, _attribute: attr.Attribute, value: str) -> None:
if value is not None:
p = Path(value)
# touch helps to see if a file can be created with the given path
p.touch()
# we don't want to have a created file if other attributes validation failed
p.unlink()
# I could just use return type "Any" but I want to insist on the fact that the function must
# first return a boolean and in the other cases, the value given at input
def bool_converter(value: Any) -> Union[bool, Any]:
if not isinstance(value, str):
logger.debug('%s is not a string, returned it as it is', value)
return value
if value.lower() in ['1', 'true', 'yes', 'y']:
logger.debug('converts %s to True', value)
return True
elif value.lower() in ['0', 'false', 'no', 'n']:
logger.debug('converts %s to False', value)
return False
else:
message = f'{value} does not represent a boolean'
logger.exception(message)
raise ValueError(message)
def get_callable_from_string(callable_string: str) -> Callable:
parts = callable_string.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
return getattr(module, parts[-1])
# The same logic as the bool converter applies to the type of return
def callable_list_converter(value: Any) -> Union[List[Callable], Any]:
if isinstance(value, list):
if not all(isinstance(item, str) for item in value):
logger.debug('not all items in the list are a string, returned it as it: %s', value)
return value
str_callable_list = value
elif isinstance(value, str):
str_callable_list = re.split(r',\s*|;\s*|:\s*|\s+', value)
else:
logger.debug('%s is not a string or a list of strings, returned it as it is', value)
return value
callables = []
for str_callable in str_callable_list:
callables.append(get_callable_from_string(str_callable))
logger.debug('returning callables: %s', callables)
return callables
def msgpack_converter(value: Any) -> Union[Callable, Any]:
if not isinstance(value, str):
logger.debug(f'{value} is not a string, returning it as it')
return value
return get_callable_from_string(value)
def str_converter(value: Any) -> Optional[str]:
if value is None:
return
if isinstance(value, Path):
return str(value.absolute())
return str(value)
class Browser(Enum):
"""An enum with different browser values."""
FIREFOX = auto()
CHROME = auto()
def browser_converter(value: Any) -> Any:
if isinstance(value, str):
upper_value = value.upper()
if upper_value == Browser.FIREFOX.name:
return Browser.FIREFOX
if upper_value == Browser.CHROME.name:
return Browser.CHROME
return value
positive_int_validators = [attr.validators.instance_of(int), check_value_greater_or_equal_than_0]
max_delay_validators = [*positive_int_validators, check_max_delay_greater_or_equal_than_min_delay]
positive_float_validators = [attr.validators.instance_of(float), check_value_greater_or_equal_than_0]
middleware_validator = attr.validators.deep_iterable(
member_validator=attr.validators.is_callable(),
iterable_validator=attr.validators.instance_of((list, tuple))
)
backup_filename_validators = [attr.validators.instance_of(str), check_file_can_be_created]
selenium_path_validators = [attr.validators.optional(attr.validators.instance_of(str)), check_file_can_be_created]
@attr.s(frozen=True)
class Configuration:
"""
Configure variables for your spider.
**Parameters:**
* **min_request_delay:** The minimum delay to wait between two http requests. Defaults to 0s.
* **max_request_delay:** The maximum delay to wait between two http requests. Defaults to 0s.
* **fetch_timeout:** The timeout to fetch http resources using the inner
[httpx](https://www.python-httpx.org/) client. Defaults to 5s.
* **selenium_find_timeout:** The timeout for selenium driver to find an element in a page. Defaults to 10s.
* **selenium_driver_log_file:** The file where the browser log debug messages. Defaults to *driver.log*.
If you want to not create one, just pass `None`.
* **selenium_browser:** The browser to use with the selenium spider. You can use the `Browser` enum to specify the
value. Possible values are `Browser.FIREFOX` and `Browser.CHROME`. Defaults to `Browser.FIREFOX`.
* **selenium_driver_executable_path:** The path to the browser driver. Defaults to *geckodriver* if
`Browser.FIREFOX` is selected as *selenium_browser*, otherwise defaults to *chromedriver*.
* **user_agent:** The user agent to fake. Mainly useful for the static spider. Defaults to a random value provided
by [fake-useragent](https://pypi.org/project/fake-useragent/) and if it does not work, fallback to
*Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36*
* **follow_robots_txt:** Decide whether or not the spider should follow robots.txt rules on the website you are
scraping. Defaults to `False`.
* **robots_cache_folder:** A folder to cache content of different website robots.txt file to avoid retrieving
it each time you want to analyze an html page. Default to the system temporary directory.
* **backup_filename:** The filename were scraped items will be written. If you don't want one, simple pass `None`.
Defaults to *backup-{uuid}.mp* where uuid is a `uuid.uuid4` string value. Note that values inserted in this file
are streamed using `msgpack`. Look at the documentation to see how to use it.
* **response_middlewares:** A list of callables that will be called with the callable that fetch the http resource.
This parameter is only useful for the **static spider**. Defaults to an empty list.
* **item_processors:** A list of callables that will be called with a scraped item. Defaults to an empty list.
* **msgpack_encoder:** A callable that will be called when `msgpack` serializes an item.
Defaults to `scalpel.datetime_encoder`.
* **msgpack_decoder:** A callable that will be called when `msgpack` deserializes an item.
Defaults to `scalpel.datetime_decoder`.
Usage:
```
from scalpel import Configuration, Browser
config = Configuration(
min_request_delay=1, max_request_delay=3, follow_robots_txt=True, selenium_browser=Browser.CHROME
)
```
"""
min_request_delay: int = attr.ib(default=0, converter=int, validator=positive_int_validators)
max_request_delay: int = attr.ib(default=0, converter=int, validator=max_delay_validators)
fetch_timeout: float = attr.ib(default=5.0, converter=float, validator=positive_float_validators)
selenium_find_timeout: float = attr.ib(default=10.0, converter=float, validator=positive_float_validators)
selenium_driver_log_file: Optional[str] = attr.ib(
converter=str_converter, default='driver.log', validator=selenium_path_validators
)
selenium_browser: Browser = attr.ib(
default=Browser.FIREFOX, converter=browser_converter, validator=attr.validators.in_(Browser)
)
# default value of this attribute depends if selenium browser, so the order is important here
selenium_driver_executable_path: str = attr.ib(
converter=str_converter, validator=[attr.validators.instance_of(str), check_driver_presence]
)
user_agent: str = attr.ib(validator=attr.validators.instance_of(str))
follow_robots_txt: bool = attr.ib(
default=False, converter=bool_converter, validator=attr.validators.instance_of(bool)
)
robots_cache_folder: Path = attr.ib(converter=Path, validator=validate_robots_folder)
backup_filename: str = attr.ib(validator=backup_filename_validators)
response_middlewares: List[Callable] = attr.ib(
repr=False, converter=callable_list_converter, factory=list, validator=middleware_validator
)
item_processors: List[Callable] = attr.ib(
repr=False, converter=callable_list_converter, factory=list, validator=middleware_validator
)
msgpack_encoder: Callable = attr.ib(
repr=False, converter=msgpack_converter, default=datetime_encoder, validator=attr.validators.is_callable()
)
msgpack_decoder: Callable = attr.ib(
repr=False, converter=msgpack_converter, default=datetime_decoder, validator=attr.validators.is_callable()
)
@user_agent.default
def _get_default_user_agent(self) -> str:
try:
ua = UserAgent()
user_agent = ua.random
logger.debug('returning a random user agent: %s', user_agent)
return user_agent
except FakeUserAgentError:
# for the fallback, I use a recent version found on http://useragentstring.com/
# not sure if this is the best strategy but we will stick with it for now
fallback = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/41.0.2225.0 Safari/537.36'
logger.debug('returning fallback value for user agent: %s', fallback)
return fallback
@robots_cache_folder.default
def _get_robots_cache_folder(self) -> Path:
temp_dir = Path(tempfile.mkdtemp(prefix='robots_'))
logger.debug('returning default created temporary directory: %s', temp_dir)
return temp_dir
@backup_filename.default
def _get_backup_filename(self) -> str:
name = f'backup-{uuid.uuid4()}.mp'
logger.debug('returning computed backup filename: %s', name)
return name
@selenium_driver_executable_path.default
def _get_driver_executable_path(self) -> str:
executable = 'geckodriver' if self.selenium_browser is Browser.FIREFOX else 'chromedriver'
# I don't use self.selenium_browser.name attribute because some tests fail here when testing browser attribute
# with a string
logger.debug(
'returning default executable path to %s since browser selected is %s', executable,
self.selenium_browser
)
return executable
@property
def request_delay(self) -> int:
"""
A read-only property which is a random value between `min_request_delay` and `max_request_delay`
(both sides included) and used to wait between two http requests.
"""
# for bandit, using random module to generate pseudo-random values is not a good
# idea for cryptography / security purposes, but since we are not in this case, we just
# ignore this warning.
# More about the warning: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
delay = random.randint(self.min_request_delay, self.max_request_delay) # nosec
logger.debug('returning computed request delay: %s s', delay)
return delay
@staticmethod
def _get_dict_with_lower_keys(data: Dict[str, Any]) -> Dict[str, Any]:
data = {key.lower(): value for key, value in data.items()}
logger.debug('returning dict with lower keys: %s', data)
return data
@classmethod
def _scalpel_attributes(cls, data: Dict[str, Any]) -> Dict[str, Any]:
data_key = 'scalpel'
attributes = {}
if data_key not in data:
logger.debug('no namespace "scalpel" in %s, returning empty attributes', data)
return attributes
data = cls._get_dict_with_lower_keys(data[data_key])
for attribute in attr.fields(cls):
if attribute.name != '_config' and attribute.name in data:
attributes[attribute.name] = data[attribute.name]
logger.debug('returning scalpel attributes: %s', attributes)
return attributes
@staticmethod
def _check_file(config_file: Union[Path, str], file_type: str) -> None:
if not isinstance(config_file, (Path, str)):
error_message = f'{file_type} file must be of type Path or str but you provided {type(config_file)}'
logger.exception(error_message)
raise TypeError(error_message)
config_file = Path(config_file)
if not config_file.is_file():
error_message = f'file {config_file} does not exist'
logger.exception(error_message)
raise FileNotFoundError(error_message)
@classmethod
def load_from_yaml(cls, yaml_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a yaml file.
**Returns:** `Configuration`
Usage:
```yaml
# conf.yaml
scalpel:
fetch_timeout: 4.0
user_agent: Mozilla/5.0
follow_robots_txt: true
```
```
from scalpel import Configuration
conf = Configuration.load_from_yaml('conf.yaml')
conf.fetch_timeout # equals to 4.0
```
"""
cls._check_file(yaml_file, 'yaml')
configuror = Config(mapping_files={'yaml': [f'{yaml_file}']})
logger.debug('loading configuration from yaml file: %s', f'{yaml_file}')
return cls(**cls._scalpel_attributes(configuror))
@classmethod
def load_from_toml(cls, toml_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a toml file.
**Returns:** `Configuration`
Usage:
```toml
# conf.toml
[scalpel]
user_agent = "Mozilla/5.0"
fetch_timeout = 4.0
follow_robots_txt = true
```
```
from scalpel import Configuration
conf = Configuration.load_from_toml('conf.toml')
conf.fetch_timeout # equals to 4.0
```
"""
cls._check_file(toml_file, 'toml')
configuror = Config(mapping_files={'toml': [f'{toml_file}']})
logger.debug('loading configuration from toml file: %s', f'{toml_file}')
return cls(**cls._scalpel_attributes(configuror))
@classmethod
def load_from_dotenv(cls, env_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a .env file.
**Returns:** `Configuration`
Usage:
```bash
# .env
SCALPEL_USER_AGENT = Mozilla/5.0
SCALPEL_FETCH_TIMEOUT = 4.0
SCALPEL_FOLLOW_ROBOTS_TXT = yes
```
```
from scalpel import Configuration
conf = Configuration.load_from_dotenv('.env')
conf.follow_robots_txt # equals to True
```
"""
cls._check_file(env_file, 'env')
configuror = Config(mapping_files={'env': [f'{env_file}']})
data = configuror.get_dict_from_namespace('SCALPEL_')
data = {'scalpel': data} # little trick to search attributes using _scalpel_attributes class method
logger.debug('loading configuration from .env file: %s', f'{env_file}')
return cls(**cls._scalpel_attributes(data))
|
<gh_stars>0
import json
import logging
import re
import traceback
from django.http import HttpResponse
from django.conf import settings
from .oauthclient import *
from requests.exceptions import RequestException
log = logging.getLogger('app_logging')
important_headers = (
'HTTP_ACCESSTOKEN',
'HTTP_ACCEPT',
)
def request2str(r):
str = ''
str += r.scheme + ' path: ' + r.path + ' ' + r.method + '\n'
str += 'GET: '
for g in r.GET:
str += g + ': ' + repr(r.GET[g])
str += 'COOKIES: '
for g in r.COOKIES:
str += g + ': ' + repr(r.COOKIES[g])
str += 'Headers: '
for h in r.META:
if h in important_headers:
str += h + ': ' + repr(r.META[h]) + ' '
str += 'Body: ' + repr(r.body)
return str
def log_view(f):
def log_request(request):
log.debug('===Request=== ' + request2str(request))
def wrap_view(request, *args, **kwargs):
log_request(request)
res = f(request, *args, **kwargs)
log.debug('===Response=== ' + repr(res) + '\n' + \
repr(res.serialize()) + ' content: ' + res.content.decode('utf-8'))
return res
return wrap_view
def _check_authorization(f, r, rexp, *args, **kwargs):
# get authorization from request
token = r.META.get('HTTP_ACCESSTOKEN', None)
if token:
# build client and verify token
auth_url = 'http://' + settings.AUTH_SERVER
tp = TokenPlugin(atoken=token, rtoken=None)
client = OAuthClient(auth_url, tp, settings.CLIENT_ID,
settings.SECRET_KEY, 'localhost')
try:
me = client.me()
me['token'] = token
log.debug('token Auth OK: ' + repr(me))
match = rexp.match(me['uname'])
if match:
log.debug('uname match OK')
else:
log.debug('uname match failed')
return HttpResponse('403 Forbidden', status=403)
except UnauthorizedException:
return HttpResponse('401 Unauthorized', status=401)
except ExpiredException:
return HttpResponse('440 Token expired', status=440)
except Exception as e:
log.debug('Error while verifying token: ' + traceback.format_exc())
return HttpResponse('503 Service unavailiable', status=503)
return f(r, me, *args, **kwargs)
else:
return HttpResponse('401 Unauthorized', status=401)
def authorize_request_admin(f):
uname_re = re.compile('^admin$')
def _do_authorize(r, *args, **kwargs):
return _check_authorization(f, r, uname_re, *args, **kwargs)
return _do_authorize
def authorize_request(f):
uname_re = re.compile('.+')
def _do_authorize(r, *args, **kwargs):
return _check_authorization(f, r, uname_re, *args, **kwargs)
return _do_authorize
def http_exception_guard(f):
def catch_http_exceptions(r, *args, **kwargs):
try:
res = f(r, *args, **kwargs)
return res
except RequestException:
log.debug('RequestException caught:\n' + traceback.format_exc())
return HttpResponse('503 Service unavailiable', status=503)
return catch_http_exceptions
def paginate_list(l, request):
page = request.GET.get('page', '0')
page = int(page)
size = request.GET.get('size', '0')
size = int(size)
if size > 0:
return l[page * size : (page + 1) * size]
else:
return l
# common http base class
class HttpOauthClient():
def __init__(self, url, auth_client):
self.url = url
self.auth_client = auth_client
self.s = requests.Session()
def _authed_request_refresh(self, refresh, method, *args, **kwargs):
atoken = self.auth_client.get_token()
headers = {'ACCESSTOKEN': atoken}
k_headers = kwargs.get('headers', {})
k_headers.update(headers)
kwargs['headers'] = k_headers
r = method(*args, **kwargs)
if r.status_code == 403:
raise ForbiddenException('server returned 403 Forbidden')
elif r.status_code == 401:
raise UnauthorizedException('server returned 401 Unauthorized')
elif refresh and r.status_code == 440:
self.auth_client.issue_tokens()
return self._authed_request_refresh(False, method, *args, **kwargs)
else:
return r
def _authed_request(self, method, *args, **kwargs):
return self._authed_request_refresh(True, method, *args, **kwargs)
# common exception
class AlreadyExists(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various utility functions and classes for active imitation learning.
The classes are an agent class for learning a behavior policy and
a discriminator class used to compute where to collect expert data.
The utils are for collecting rollout trajectories and adaptive sampling for
expert data.
"""
import collections
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
class Agent(object):
"""A class to represent an agent."""
def __init__(self, in_dim, out_dim, action_type, action_bound=0, hidden=256):
self.action_type = action_type
self.action_bound = action_bound
self.discriminator = None
self.policy = tf.keras.Sequential()
kernel_init = tf.keras.initializers.Orthogonal(gain=1.0)
self.policy.add(tf.keras.layers.Dense(
hidden,
input_shape=in_dim,
kernel_initializer=kernel_init))
self.policy.add(tf.keras.layers.BatchNormalization())
self.policy.add(tf.keras.layers.Activation(tf.keras.activations.relu))
self.policy.add(tf.keras.layers.Dense(hidden))
self.policy.add(tf.keras.layers.BatchNormalization())
self.policy.add(tf.keras.layers.Activation(tf.keras.activations.relu))
# Differentiate action types of discrete actions and continuous ones.
if action_type == 'discrete':
self.policy.add(tf.keras.layers.Dense(out_dim))
self.policy.add(tf.keras.layers.BatchNormalization())
self.policy.add(tf.keras.layers.Activation(
tf.keras.activations.softmax))
else:
self.policy.add(tf.keras.layers.Dense(out_dim))
self.policy.add(tf.keras.layers.BatchNormalization())
self.policy.add(tf.keras.layers.Activation(tf.keras.activations.tanh))
self.policy.add(
tf.keras.layers.Lambda(lambda x: x * action_bound))
self.policy.compile(optimizer='adam',
loss='mean_squared_error')
def action(self, obs):
"""Compute an action given an observation."""
if self.action_type == 'discrete':
out = self.policy.predict(tf.expand_dims(obs, 0))
action = np.argmax(out)
else:
out = self.policy.predict(tf.expand_dims(obs, 0))
action = self.clip_action(out, self.action_bound)
return action[0]
def clip_action(self, action, bound):
"""Clip a continous action to a valid bound."""
return np.clip(action, -bound, bound)
class Discriminator(object):
"""Implementation of a discriminator network."""
def __init__(self, input_dim, hidden=256):
"""Initializes a discriminator.
Args:
input_dim: size of the input space.
hidden: the number of hidden units.
"""
kernel_init = tf.keras.initializers.Orthogonal(gain=1.0)
self.model = tf.keras.Sequential()
self.model.add(tf.keras.layers.Dense(
units=hidden,
input_shape=(input_dim,),
kernel_initializer=kernel_init))
self.model.add(tf.keras.layers.BatchNormalization())
self.model.add(tf.keras.layers.Activation(tf.keras.activations.relu))
self.model.add(tf.keras.layers.Dense(
units=hidden,
kernel_initializer=kernel_init))
self.model.add(tf.keras.layers.BatchNormalization())
self.model.add(tf.keras.layers.Activation(tf.keras.activations.relu))
self.model.add(tf.keras.layers.Dense(
units=1,
kernel_initializer=kernel_init))
def train(self, agent_data, expert_data, epoch, batch_size=1024):
"""Train the discriminator with data from the current agent and the expert.
Args:
agent_data: a list of state-action pairs from agent's trajectories.
expert_data: a list of state-action pairs from expert feedback.
epoch: the number of epoches to train.
batch_size: the number of samples to sample for each batch.
Returns:
validation accuracy and validation loss.
"""
self.model.compile(
optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.BinaryAccuracy(threshold=0.0)])
early_stopping = EarlyStopping(patience=5)
agent_s_a = []
expert_s_a = []
labels = []
# sample a batch from both agent_data and expert_data.
agent_batches = random.sample(range(len(agent_data)),
k=min(batch_size, len(agent_data)))
expert_batches = random.sample(range(len(expert_data)),
k=min(batch_size, len(expert_data)))
for i, j in zip(agent_batches, expert_batches):
agent_s_a.append(np.concatenate((agent_data[i][0], agent_data[i][1])))
labels.append(0)
expert_s_a.append(np.concatenate((expert_data[j][0], expert_data[j][1])))
labels.append(1)
shuffle_order = tf.random.shuffle(range(len(agent_s_a) * 2))
s_a = agent_s_a + expert_s_a
s_a = tf.gather(s_a, shuffle_order)
labels = tf.gather(labels, shuffle_order)
history = tf.keras.callbacks.History()
self.model.fit(tf.convert_to_tensor(s_a),
tf.convert_to_tensor(labels),
epochs=epoch,
validation_split=0.2,
callbacks=[early_stopping,
history])
return (history.history['val_binary_accuracy'][-1],
history.history['val_loss'][-1])
def policy_rollout(agent, env, num_traj, horizon):
"""Rollout an agent to collect trajectories.
Args:
agent: an agent to rollout.
env: an environment to perform rollouts.
num_traj: the number of trajectories to collect.
horizon: the maximal number of steps for each trajectory.
Returns:
states, actions, rewards and observations from rollout trajectories.
"""
traj_states = []
traj_actions = []
traj_rewards = []
traj_observations = []
for _ in range(num_traj):
time_step = env.reset()
states = []
rewards = []
actions = []
observations = []
for _ in range(horizon):
# MuJoCo specific operations.
states.append(env._gym_env.get_state()) # pylint: disable=protected-access
observations.append(time_step)
action = agent.action(time_step.observation)
actions.append(action)
time_step = env.step(action)
rewards.append(float(time_step.reward))
if time_step.is_last():
break
traj_states.append(states)
traj_actions.append(actions)
traj_rewards.append(rewards)
traj_observations.append(observations)
return traj_states, traj_actions, traj_rewards, traj_observations
def query_expert(obs, expert):
"""Query the expert policy for actions at selected observations.
Args:
obs: a list of observations.
expert: an expert policy.
Returns:
observations and actions from the expert policy.
"""
expert_obs = []
expert_actions = []
for ob in obs:
action = expert.action(ob).action
expert_obs.append(ob.observation)
expert_actions.append(action)
return expert_obs, expert_actions
def bucket_select(scores, num_bucket, num_sample, reverse=False):
"""Select a subset of scores by dividing them into buckets and pick the highest/lowest scores within each bucket.
Args:
scores: a list of scores.
num_bucket: the number of bucket to divide into.
num_sample: the number of samples to return.
reverse: if True, return the lowest scores, otherwise, return the highest
ones.
Returns:
selected indices.
"""
selected = []
num_bucket = min(num_bucket, len(scores))
# split the complete range into buckets.
bucket_range = np.array_split(range(len(scores)),
min(len(scores), num_bucket))
# sample buckets with replacement.
selected_bucket = random.choices(range(num_bucket), k=num_sample)
# compute how many times each bucket is selected.
num_selected = collections.defaultdict(lambda: 0)
for bucket in selected_bucket:
num_selected[bucket] += 1
# within each bucket, select the items.
for idx, bucket in enumerate(bucket_range):
sub_scores = scores[bucket[0] : bucket[-1] + 1]
argsort = np.argsort(sub_scores)
for i in range(min(num_selected[idx], len(sub_scores))):
if reverse:
selected.append(argsort[i] + bucket[0])
else:
selected.append(argsort[-i-1] + bucket[0])
return selected
def behavior_cloning(agent, expert_data):
"""Train the model with expert data via behavior cloning."""
states = []
actions = []
for i in range(len(expert_data)):
state, action = expert_data[i]
states.append(state)
actions.append(action)
states = tf.convert_to_tensor(states)
actions = tf.convert_to_tensor(actions)
agent.policy.compile(optimizer='adam',
loss='mean_squared_error')
early_stopping = EarlyStopping(patience=5)
history = tf.keras.callbacks.History()
agent.policy.fit(states, actions,
batch_size=256,
validation_split=0.2,
epochs=1000,
callbacks=[early_stopping,
history])
return history.history['val_loss'][-1]
def top_discriminator(discriminator, traj_obs, traj_actions, expert, num_bucket,
num_sample):
"""Select states to collect according to lowest discriminator scores.
Args:
discriminator: a discriminator.
traj_obs: observations from a list of trajectories.
traj_actions: corresponding actions for observations.
expert: an expert policy.
num_bucket: the number of buckets.
num_sample: the number of samples to collect.
Returns:
new expert data and the agent data from current trajectories.
"""
expert_data = []
agent_data = []
for i in range(len(traj_obs)):
obs = traj_obs[i]
acts = traj_actions[i]
inputs = []
for ob, act in zip(obs, acts):
inputs.append(np.concatenate((ob.observation, act)))
agent_data.append((ob.observation, act))
# compute the discriminator scores for state-action pairs from a single
# trajectory.
dis_scores = tf.squeeze(
discriminator.model.predict(tf.convert_to_tensor(inputs)))
selected = bucket_select(dis_scores,
num_bucket,
num_sample,
reverse=True)
# query expert actions on the selected states.
for j in selected:
expert_action = expert.action(obs[j]).action
expert_data.append((obs[j].observation, expert_action))
return expert_data, agent_data
def random_samples(traj_obs, expert, num_sample):
"""Randomly sample a subset of states to collect expert feedback.
Args:
traj_obs: observations from a list of trajectories.
expert: an expert policy.
num_sample: the number of samples to collect.
Returns:
new expert data.
"""
expert_data = []
for i in range(len(traj_obs)):
obs = traj_obs[i]
random.shuffle(obs)
new_expert_data = []
chosen = np.random.choice(range(len(obs)),
size=min(num_sample, len(obs)),
replace=False)
for ch in chosen:
state = obs[ch].observation
action_step = expert.action(obs[ch])
action = action_step.action
new_expert_data.append((state, action))
expert_data.extend(new_expert_data)
return expert_data
|
# This file will track sectors and will provide get/set interface for sectors.
import numpy as np
import math
import time
import rospy
import yaml
from grid_map_msgs.srv import GetGridMap
import multiprocessing as mp
import queue
class RayTracer:
NUMBER_OF_PROCESSES = 4
def __init__(self):
mp.set_start_method("forkserver")
self.width = None
self.height = None
self.map = None
self.task_queue = None
self.done_queue = None
self.workers = None
self.start_workers()
def get_map(self):
m = np.min(self.map)
return self.map + abs(m if m < 0 else 0)
def set_map(self, init_map):
self.map = init_map
self.width, self.height = self.map.shape
def start(self):
nans = np.argwhere(np.isnan(self.map))
quarters = []
quarters.append(
np.logical_and(nans[:, 0] >= self.height / 2, nans[:, 1] >= self.width / 2)
)
quarters.append(
np.logical_and(nans[:, 0] >= self.height / 2, nans[:, 1] < self.width / 2)
)
quarters.append(
np.logical_and(nans[:, 0] < self.height / 2, nans[:, 1] >= self.width / 2)
)
quarters.append(
np.logical_and(nans[:, 0] < self.height / 2, nans[:, 1] < self.width / 2)
)
# Start jobs
for q in quarters:
self.task_queue.put(
(np.copy(self.map), list(nans[q]), (self.width, self.height))
)
# Wait jobs
for _ in range(len(quarters)):
mask = np.isnan(self.map)
self.map[mask] = self.done_queue.get(block=True)[mask]
self.map[np.isnan(self.map)] = np.nanmedian(self.map)
def start_workers(self):
self.task_queue = mp.Queue()
self.done_queue = mp.Queue()
rospy.loginfo("Starting workers")
self.workers = [
mp.Process(
target=self.worker, args=(self.task_queue, self.done_queue, None)
)
for _ in range(self.NUMBER_OF_PROCESSES)
]
for p in self.workers:
p.start()
rospy.loginfo("Workers ready")
def stop_workers(self):
for p in self.workers:
p.terminate()
def worker(self, task_queue, done_queue, args):
while 1:
try:
item = task_queue.get(block=True, timeout=0.05)
except queue.Empty:
continue
if item == "STOP":
break
dtm, nans, (width, height) = item
vx, vy = (width - 1) / 2, (height - 1) / 2
nans.sort(
key=lambda coord: math.sqrt((vy - coord[0]) ** 2 + (vx - coord[1]) ** 2)
)
for py, px in nans:
if not np.isnan(dtm[py, px]):
continue
try:
# * Extract the line containing viewer and this point
ray = self.calc_line(px, py, vx=vx, vy=vy)
# * Find the border point
if abs(py - vy) > abs(px - vx):
by = height - 1 if py - vy > 0 else 0
if not py is None and py > 0:
bx = by * px / py
else:
bx = 0
bx = (
bx
if ray[0][1] - ray[len(ray) - 1][1] > 0
else width - 1 - bx
)
elif abs(py - vy) == abs(px - vx):
by = height - 1 if py - vy > 0 else 0
bx = width - 1 if px - vx > 0 else 0
else:
bx = width - 1 if px - vx > 0 else 0
if not px is None and px > 0:
by = bx * py / px
else:
by = 0
by = (
by
if ray[0][0] - ray[len(ray) - 1][0] > 0
else height - 1 - by
)
rest = self.calc_line(bx, by, vx=px, vy=py)
ray = np.array(
[[y, x, dtm[y, x]] for y, x in np.concatenate((ray, rest[1:]))]
)
is_rest_nan = (
lambda arr: np.count_nonzero(~np.isnan(arr[:, 2])) == 0
)
for i, (ry, rx, _) in enumerate(ray):
if not np.isnan(dtm[math.floor(ry), math.floor(rx)]):
continue
# * Check if we need to place mirror or wood
if is_rest_nan(ray[i:]):
for i, (rpy, rpx, _) in enumerate(ray[i:]):
dtm[math.floor(ry), math.floor(rx)] = ray[:i][
-(i % len(ray[:i]))
]
else:
start_h = ray[i - 1]
end_i = np.argwhere(~np.isnan(ray[i:][:, 2]))[0]
end_h = ray[i:][end_i][0]
patch_distance = math.sqrt(
(end_h[0] - start_h[0]) ** 2
+ (end_h[1] - start_h[1]) ** 2
)
ratio = (end_h[2] - start_h[2]) / patch_distance
step = patch_distance / end_i
for pi, (rpy, rpx, _) in enumerate(ray[i : i + end_i[0]]):
height = (pi * step[0]) * ratio
dtm[math.floor(rpy), math.floor(rpx)] = height
except Exception:
dtm[py, px] = 0
done_queue.put(dtm)
def calc_line(self, pox, poy, vx, vy):
"""
Returns the path to point and expected heights
"""
if abs(pox - vx) > abs(poy - vy):
if pox > vx:
xs = np.arange(vx, pox, step=1)
else:
xs = np.flip(np.arange(pox, vx, step=1))
if poy > vy:
ys = np.linspace(vy, poy, num=len(xs))
else:
ys = np.flip(np.linspace(poy, vy, num=len(xs)))
else:
if poy > vy:
ys = np.arange(vy, poy, step=1)
else:
ys = np.flip(np.arange(poy, vy, step=1))
if pox > vx:
xs = np.linspace(vx, pox, num=len(ys))
else:
xs = np.flip(np.linspace(pox, vx, num=len(ys)))
xs = xs.astype(int)
ys = ys.astype(int)
arr = np.column_stack((ys, xs))
fy, fx = arr[0]
if fx != vx or fy != vy:
arr = np.insert(arr, 0, [vy, vx], axis=0)
fy, fx = arr[len(arr) - 1]
if fx != pox or fy != poy:
arr = np.insert(arr, len(arr), [poy, pox], axis=0)
return arr
class SectorService:
def __init__(self):
self.config = yaml.load(open("./config/config.yaml"), Loader=yaml.FullLoader)
self.terrain = None
self.pose = None
assert self.config["local_size"] / self.config["resolution"] % 2 == 0
self.RT = RayTracer()
self.get_submap = rospy.ServiceProxy(
"/elevation_mapping/get_submap", GetGridMap
)
def shutdown(self):
self.RT.stop_workers()
def update_terrain(self):
payload = self.get_submap(
"odom",
self.pose.position.x,
self.pose.position.y,
self.config["local_size"],
self.config["local_size"],
["elevation"],
).map
raw = payload.data[0]
self.terrain = np.array(raw.data, dtype=float)
self.terrain.shape = (raw.layout.dim[0].size, raw.layout.dim[1].size)
self.terrain = np.rot90(self.terrain, k=2) * 100
def get_extent(self):
c = (self.config["local_size"] / self.config["resolution"]) / 2 + 1
x = self.pose.position.x
y = self.pose.position.y
xmin = x - (c * self.config["resolution"])
ymin = y - (c * self.config["resolution"])
return (xmin, xmin + self.config["local_size"], ymin, ymin + self.config["local_size"])
def update_pose(self, payload):
self.pose = payload.pose
def start_alpha_thread(self):
"""
Alpha Thread is for applying ray-tracing on the local terrain.
"""
try:
self.RT.set_map(self.terrain)
self.RT.start()
final = self.RT.get_map()
except Exception as why:
print(repr(why))
return None
return final
|
<filename>sdks/python/apache_beam/moremmr/file_storage.py
import os
import uuid
import pandas as pd
from azure.storage.blob import AppendBlobService, BlockBlobService, PublicAccess
class FileStorage(object):
def __init__(self, container_name):
self.account_name = 'moremmrparsingstorage'
self.account_key = '<KEY>
self.container_name = container_name
self.blob_service = None
self.append_blob_service = None
def connect_blob_service(self, reconnect=False):
if type(self.blob_service).__name__ != 'BlockBlobService' or reconnect:
try:
self.blob_service = BlockBlobService(account_name=self.account_name,
account_key=self.account_key)
self.blob_service.set_container_acl(self.container_name, public_access=PublicAccess.Container)
except Exception as ex:
print('Exception in connection to blob service: {0}'.format(ex))
return False
return True
def connect_append_blob_service(self, reconnect=False):
if type(self.append_blob_service).__name__ != 'AppendBlobService' or reconnect:
try:
self.append_blob_service = AppendBlobService(account_name=self.account_name,
account_key=self.account_key)
except Exception as ex:
print('Exception in connection to append blob service: {0}'.format(ex))
return False
return True
def get_file(self, blob_name, result_file_path):
blob_service_connected = self.connect_blob_service()
if not blob_service_connected:
return False
else:
try:
self.blob_service.get_blob_to_path(container_name=self.container_name,
blob_name=blob_name,
file_path=result_file_path)
except Exception as ex:
if ex.error_code != 'BlobNotFound':
print('Getting file from blob exception: {0}:'.format(ex))
return False
return True
def put_file(self, blob_name, local_file_path, remove_local_file_after=False):
blob_service_connected = self.connect_blob_service()
if not blob_service_connected:
return False
else:
try:
self.blob_service.create_blob_from_path(container_name=self.container_name,
blob_name=blob_name,
file_path=local_file_path)
except Exception as ex:
print('Putting file to blob exception: {0}:'.format(ex))
return False
if remove_local_file_after:
try:
os.remove(local_file_path)
except OSError:
pass
return True
def append_from_file(self, blob_name, local_file_path, remove_local_file_after=False):
blob_service_connected = self.connect_append_blob_service()
if not blob_service_connected:
return False
else:
try:
self.append_blob_service.append_blob_from_path(container_name=self.container_name,
blob_name=blob_name,
file_path=local_file_path)
except Exception as ex:
print('Append from file in blob exception: {0}:'.format(ex))
return False
if remove_local_file_after:
try:
os.remove(local_file_path)
except OSError:
pass
return True
def create_appended_blob(self, blob_name):
blob_service_connected = self.connect_append_blob_service()
if not blob_service_connected:
return False
else:
try:
self.append_blob_service.create_blob(container_name=self.container_name,
blob_name=blob_name)
except Exception as ex:
print('Creating appended file in blob exception: {0}:'.format(ex))
return False
return True
def exists(self, blob_name):
blob_service_connected = self.connect_blob_service()
exists = False
if blob_service_connected:
try:
exists = self.blob_service.exists(container_name=self.container_name, blob_name=blob_name)
except Exception as ex:
print('Checking file existence in blob exception: {0}:'.format(ex))
return exists
def delete_blob(self, blob_name):
blob_service_connected = self.connect_blob_service()
if blob_service_connected:
try:
self.blob_service.delete_blob(container_name=self.container_name,
blob_name=blob_name)
except Exception as ex:
print('Deleting blob exception: {0}:'.format(ex))
return False
return True
def put_array_to_blob(self, blob_name, array=[], compression=True):
res = False
local_file = '/tmp/{0}_{1}'.format(str(uuid.uuid4()), blob_name)
try:
df = pd.DataFrame(array)
except Exception:
df = pd.DataFrame()
if df.shape[0] > 0:
df.to_csv(path_or_buf=local_file,
header=False,
index=False,
compression=(None, 'zip')[compression])
res = self.put_file(blob_name, local_file, True)
return res
def get_blob_as_df(self, blob_name, names=[]):
df = pd.DataFrame()
local_file = '/tmp/{0}_{1}'.format(str(uuid.uuid4()), blob_name)
if self.get_file(blob_name, local_file):
try:
df = pd.read_csv(filepath_or_buffer=local_file, header=None, names=names)
except Exception:
df = pd.DataFrame()
try:
os.remove(local_file)
except OSError:
pass
return df
def append_df_to_blob(self, blob_name, df):
local_file = '/tmp/{0}_{1}'.format(str(uuid.uuid4()), blob_name)
df.to_csv(path_or_buf=local_file, header=None, index=False)
if not self.exists(blob_name):
self.create_appended_blob(blob_name)
self.append_from_file(blob_name, local_file, True)
return True
def list_blobs(self, num_results=5000):
blobs = []
blob_service_connected = self.connect_blob_service()
if blob_service_connected:
try:
blobs = self.blob_service.list_blobs(self.container_name,
num_results=num_results)
except Exception as ex:
print('List blobs exception: {0}'.format(ex))
return blobs
|
<reponame>Cinofix/secml
"""
.. module:: CPlot
:synopsis: A standard plot.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import inspect
import sys
from matplotlib.axes import Axes
from secml.core import CCreator
from secml.array import CArray
from secml.array.array_utils import tuple_sequence_tondarray
class CPlot(CCreator):
"""Interface for standard plots.
This class provides an interface and few other methods useful
for standard plot creation.
To be never explicitly instanced. Will be created by `CFigure`.
Parameters
----------
sp : Axes
Subplot to use for plotting. Instance of `matplotlib.axes.Axes`.
default_params : dict
Dictionary with default parameters.
See Also
--------
.CFigure : creates and handle figures.
"""
def __init__(self, sp, default_params):
if not isinstance(sp, Axes):
raise TypeError("`matplotlib.axes.Axes` instance is requested.")
# Target subplot reference
self._sp = sp
# Store default parameters
self._params = default_params
# Collect methods from subclasses
self._collect_spmethods()
# Callback parameter for showing the legend after
# applying custom plot parameters
self.show_legend = None
# Placeholders for plot parameters
self._ylabel = None
self._xlabel = None
self._yticks = None
self._yticklabels = None
self._xticks = None
self._xticklabels = None
self._xlim = None
self._ylim = None
def _collect_spmethods(self):
"""Collects methods from CPlot subclasses and attach them to self."""
c_list = CPlot.get_subclasses() # Retrieve all CPlot subclasses
methods_list = []
for c_info in c_list: # For each CPlot subclass (name, class)
if c_info[0] == CPlot.__name__:
# Avoid adding methods of CPlot to CPlot
continue
# Get methods of each CPlot subclasses,
# use isfunction for Py3, ismethod for Py2 # TODO: REMOVE Python 2
pred = inspect.isfunction # unbound methods or functions
if sys.version_info < (3, 0): # Py2 this covers unbound methods
pred = inspect.ismethod
c_methods = inspect.getmembers(c_info[1], pred)
for method in c_methods: # For each method (name, unbound method)
# Skip special methods and already added methods
if not method[0].startswith('__') and \
method[0] not in methods_list and \
not hasattr(self, method[0]):
methods_list.append(method)
# Add methods to CPlot. Use __get__ to bound method to CPlot instance
for method in methods_list:
setattr(self, method[0], method[1].__get__(self))
@property
def n_lines(self):
"""Returns the number of lines inside current subplot."""
return len(self.get_lines())
def _set_lines_params(self, kwargs):
"""Add lines-related parameters to input dictionary."""
# Parameters are updated/added only if not yet specified
if 'linewidth' not in kwargs:
kwargs['linewidth'] = self._params['lines.linewidth']
if 'markersize' not in kwargs:
kwargs['markersize'] = self._params['lines.markersize']
return kwargs
def set(self, param_name, param_value, copy=False):
raise NotImplementedError
def get_params(self):
raise NotImplementedError
def get_state(self):
raise NotImplementedError
def set_state(self, state_dict, copy=False):
raise NotImplementedError
def load_state(self, path):
raise NotImplementedError
def save_state(self, path):
raise NotImplementedError
def get_lines(self):
"""Return a list of lines contained by the subplot."""
return self._sp.get_lines()
def get_legend_handles_labels(self):
"""Return handles and labels for legend contained by the subplot."""
return self._sp.get_legend_handles_labels()
def get_xticks_idx(self, xticks):
"""Returns the position of markers to plot.
Parameters
----------
xticks : CArray
Ticks of x-axis where marker should be plotted.
Returns
-------
ticks_idx : list
List with the position of each xtick.
Notes
-----
If a given xtick is not exactly available,
the closest value's position will be returned.
"""
return xticks.binary_search(self._sp.get_xticks()).tolist()
def set_axisbelow(self, axisbelow=True):
"""Set axis ticks and gridlines below most artists."""
self._sp.set_axisbelow(axisbelow)
def merge(self, sp):
"""Merge input subplot to active subplot.
Parameters
----------
sp : CPlot
Subplot to be merged.
"""
for line in sp.get_lines():
self._sp.add_line(line)
if self.get_legend() is not None:
h, l = sp.get_legend_handles_labels()
self.legend(h, l)
def plot(self, x, y=None, *args, **kwargs):
"""Plot a line.
If only one array is given it is supposed to be the
y axis data. x axis values are set as index array 0..N-1 .
Parameters
----------
x : list or CArray
x axis values
y : list or CArray
y axis values
color : str
.. list-table::
:header-rows: 1
* - Character
- Color
* - 'b'
- blue
* - 'g'
- green
* - 'r'
- red
* - 'c'
- cyan
* - 'm'
- magenta
* - 'y'
- yellow
* - 'k'
- black
* - 'w'
- white
alpha : float, default 1.0
0.0 for transparent through 1.0 opaque
linestyle : character, default '-'
Can be one into this list : ['-' | '--' | '-.' | ':' | 'None' | ' ' | '']
linewidth : float
0.0 to 1.0
marker : str
.. list-table::
:header-rows: 1
* - Character
- Marker
* - '.'
- point marker
* - ','
- pixel marker
* - 'o'
- circle marker
* - 'v'
- triangle_down marker
* - '^'
- triangle_up marker
* - '<'
- triangle_left marker
* - '>'
- triangle_right marker
* - '1'
- tri_down marker
* - '2'
- tri_up marker
* - '3'
- tri_left marker
* - '4'
- tri_right marker
* - 's'
- square marker
* - 'p'
- pentagon marker
* - '*'
- star marker
* - 'h'
- hexagon1 marker
* - 'H'
- hexagon2 marker
* - '+'
- plus marker
* - 'x'
- x marker
* - 'D'
- diamond marker
* - 'd'
- thin_diamond marker
* - '|'
- vline marker
* - '_'
- hline marker
Examples
--------
.. plot:: pyplots/plot.py
:include-source:
"""
# Set lines-related parameters
kwargs = self._set_lines_params(kwargs)
# Convert sequences inside tuple to ndarray
x, y = tuple_sequence_tondarray((x, y))
if y is None:
self._sp.plot(x, *args, **kwargs)
else:
self._sp.plot(x, y, *args, **kwargs)
def semilogx(self, x, y=None, *args, **kwargs):
"""Plot with log scaling on the x axis.
If only one array is given it is supposed to be the
y axis data. x axis values are set as index array 0..N-1 .
Parameters
----------
x : list or CArray
x axis values
y : list or CArray
y axis values
basex : scalar > 1, default is 10
Base of the x logarithm
subsx : [ None | sequence ]
Where to place the subticks between each major tick.
Sequence of integers. For example, in a log10 scale:
[2, 3, 4, 5, 6, 7, 8, 9] will place 8 logarithmically
spaced minor ticks between each major tick.
nonposx : [ 'mask' | 'clip' ], default 'mask'
Non-positive values in x can be masked as invalid, or
clipped to a very small positive number
See Also
--------
.plot : Plot with standard axis.
Examples
--------
.. plot:: pyplots/semilogx.py
:include-source:
"""
if 'subsx' in kwargs and isinstance(kwargs['subsx'], CArray):
kwargs['subsx'] = kwargs['subsx'].tondarray()
# Set other lines-related parameters
kwargs = self._set_lines_params(kwargs)
# Convert sequences inside tuple to ndarray
x, y = tuple_sequence_tondarray((x, y))
if y is None:
self._sp.semilogx(x, *args, **kwargs)
else:
self._sp.semilogx(x, y, *args, **kwargs)
def semilogy(self, x, y=None, *args, **kwargs):
"""Plot with log scaling on the y axis.
If only one array is given it is supposed to be the
y axis data. x axis values are set as index array 0..N-1 .
Parameters
----------
x : list or CArray
x axis values.
y : list or CArray
y axis values.
basey : scalar > 1, default is 10
Base of the y logarithm
subsy : [ None | sequence ], default None
Where to place the subticks between each major tick.
Should be a sequence of integers.
For example, in a log10 scale: [2, 3, 4, 5, 6, 7, 8, 9]
will place 8 logarithmically spaced minor ticks between
each major tick.
nonposy : [ 'mask' | 'clip' ], default 'mask'
Non-positive values in x can be masked as invalid, or
clipped to a very small positive number.
See Also
--------
.plot : Plot with standard axis.
Examples
--------
.. plot:: pyplots/semilogy.py
:include-source:
"""
if 'subsy' in kwargs and isinstance(kwargs['subsy'], CArray):
kwargs['subsy'] = kwargs['subsy'].tondarray()
# Set other lines-related parameters
kwargs = self._set_lines_params(kwargs)
# Convert sequences inside tuple to ndarray
x, y = tuple_sequence_tondarray((x, y))
if y is None:
self._sp.semilogy(x, *args, **kwargs)
else:
self._sp.semilogy(x, y, *args, **kwargs)
def loglog(self, x, y=None, *args, **kwargs):
"""Plot with log scaling on both the x and y axis.
If only one array is given it is supposed to be the
y axis data. x axis values are set as index array 0..N-1 .
Parameters
----------
x : list or CArray
x axis values.
y : list or CArray
y axis values.
basex, basey : scalar > 1, default is 10
Base of the x/y logarithm.
subsx, subsy : [ None | sequence ]
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a
log10 scale: [2, 3, 4, 5, 6, 7, 8, 9] will place 8
logarithmically spaced minor ticks between each major tick.
nonposx, nonposy : ['mask' | 'clip' ], default 'mask'.
Non-positive values in x or y can be masked as invalid, or
clipped to a very small positive number.
See Also
--------
.plot : Plot with standard axis.
"""
if 'subsx' in kwargs and isinstance(kwargs['subsx'], CArray):
kwargs['subsx'] = kwargs['subsx'].tondarray()
if 'subsy' in kwargs and isinstance(kwargs['subsy'], CArray):
kwargs['subsy'] = kwargs['subsy'].tondarray()
# Set other lines-related parameters
kwargs = self._set_lines_params(kwargs)
# Convert sequences inside tuple to ndarray
x, y = tuple_sequence_tondarray((x, y))
if y is None:
self._sp.loglog(x, *args, **kwargs)
else:
self._sp.loglog(x, y, *args, **kwargs)
def scatter(self, x, y, s=20, c='b', *args, **kwargs):
"""Scatter plot of x vs y.
Parameters
----------
x, y : list or CArray
Input data. Both object must have the same size.
s : scalar or shape (n, ), optional, default: 20
size in points^2.
c : color or sequence of color, optional, default 'b'
c can be a single color format string, or a sequence
of color specifications of length N, or a sequence of
numbers with the same shape of x,y to be mapped to
colors using the cmap and norm specified via kwargs
(see below). Note that c should not be a single
numeric RGB or RGBA sequence because that is
indistinguishable from an array of values to be
colormapped. c can be a 2-D array in which the rows
are RGB or RGBA, however.
marker : MarkerStyle, optional, default: 'o'
See markers for more information on the different
styles of markers scatter supports.
cmap : Colormap, optional, default: None
A Colormap instance or registered name. cmap is only
used if c is an array of floats. If None, default
parameter image.cmap is used.
norm : Normalize, optional, default: None
A Normalize instance is used to scale luminance data
to 0, 1. norm is only used if c is an array of floats.
vmin, vmax : scalar, optional, default: None
vmin and vmax are used in conjunction with norm to
normalize luminance data. If either are None, the min
and max of the color array is used. Note if you pass a
norm instance, your settings for vmin and vmax will
be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of float.
Examples
--------
.. plot:: pyplots/scatter.py
:include-source:
"""
if 'linewidths' not in kwargs:
kwargs['linewidths'] = self._params['lines.linewidth']
# Convert sequences inside tuple to ndarray
if not isinstance(c, str):
x, y, c = tuple_sequence_tondarray((x, y, c))
else:
x, y = tuple_sequence_tondarray((x, y))
self._sp.scatter(x, y, s, c, *args, **kwargs)
def contour(self, x, y, z, *args, **kwargs):
"""Draw contour lines of a function.
Parameters
----------
x, y : CArray or list
specify the (x, y) coordinates of the surface.
X and Y must both be 2-D with the same shape
as Z, or they must both be 1-D such that len(X)
is the number of columns in Z and len(Y) is the
number of rows in Z.
z : CArray or list
value into (x, y) surface's position
colors : [ None | string | (mpl_colors) ]
If None, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will
be plotted in this color.
If a tuple of matplotlib color args (string, float,
rgb, etc), different levels will be plotted in
different colors in the order specified.
alpha : float
The alpha blending value
cmap : [ None | Colormap ]
A cm Colormap instance or None. If cmap is None and
colors is None, a default Colormap is used.
vmin, vmax : [ None | scalar ]
If not None, either or both of these values will be
supplied to the matplotlib.colors.
Normalize instance, overriding the default color
scaling based on levels.
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; e.g., to draw just the zero contour
pass levels=[0]
origin : [ None | 'upper' | 'lower' | 'image' ]
If None, the first value of Z will correspond to the
lower left corner, location (0,0). If 'image', the
default parameter value for image.origin will be used.
This keyword is not active if X and Y are specified
in the call to contour.
extent : [ None | (x0,x1,y0,y1) ]
If origin is not None, then extent is interpreted as
in matplotlib.pyplot.imshow(): it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner.
If origin is None, then (x0, y0) is the position of Z[0,0],
and (x1, y1) is the position of Z[-1,-1].
This keyword is not active if X and Y are specified in
the call to contour.
extend : [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data are included.
These added ranges are then mapped to the special colormap
values which default to the ends of the colormap range.
antialiased : [ True | False ]
enable antialiasing, overriding the defaults.
For filled contours, the default is True. For line
contours, it is taken from default_parameters ['lines.antialiased'].
linewidths : [ None | number | tuple of numbers ]
If linewidths is None, the default width in lines.linewidth
default_parameters is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified.
linestyles : [ None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If linestyles is None, the default is 'solid' unless the
lines are monochrome. In that case, negative contours will
take their linestyle from the matplotlibrc `contour.negative_`
linestyle setting.
linestyles can also be an iterable of the above strings
specifying a set of linestyles to be used. If this iterable
is shorter than the number of contour levels it will be
repeated as necessary.
Examples
--------
.. plot:: pyplots/contour.py
:include-source:
"""
if 'linewidths' not in kwargs:
kwargs['linewidths'] = self._params['lines.linewidth']
# Convert sequences inside tuple to ndarray
x, y, z = tuple_sequence_tondarray((x, y, z))
return self._sp.contour(x, y, z, *args, **kwargs)
def contourf(self, x, y, z, *args, **kwargs):
"""Draw filled contour of a function.
Parameters
----------
x, y : CArray or list
specify the (x, y) coordinates of the surface.
X and Y must both be 2-D with the same shape as Z,
or they must both be 1-D such that len(X) is the
number of columns in Z and len(Y) is the number of
rows in Z.
z : CArray or list
value into (x, y) surface's position
colors : [ None | string | (mpl_colors) ]
If None, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be
plotted in this color.
If a tuple of matplotlib color args (string, float,
rgb, etc), different levels will be plotted in different
colors in the order specified.
alpha : float
The alpha blending value
cmap : [ None | Colormap ]
A cm Colormap instance or None. If cmap is None and
colors is None, a default Colormap is used.
vmin, vmax : [ None | scalar ]
If not None, either or both of these values will be
supplied to the matplotlib.colors.
Normalize instance, overriding the default color
scaling based on levels.
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; e.g., to draw just the zero contour
pass levels=[0]
origin : [ None | 'upper' | 'lower' | 'image' ]
If None, the first value of Z will correspond to the
lower left corner, location (0,0). If 'image', the
default parameter value for image.origin will be used.
This keyword is not active if X and Y are specified
in the call to contour.
extent : [ None | (x0,x1,y0,y1) ]
If origin is not None, then extent is interpreted as
in matplotlib.pyplot.imshow(): it gives the outer
pixel boundaries.
In this case, the position of Z[0,0] is the center
of the pixel, not a corner.
If origin is None, then (x0, y0) is the position of
Z[0,0], and (x1, y1) is the position of Z[-1,-1].
This keyword is not active if X and Y are specified
in the call to contour.
extend : [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included.
These added ranges are then mapped to the special colormap
values which default to the ends of the colormap range.
antialiased : [ True | False ]
enable antialiasing, overriding the defaults.
For filled contours, the default is True. For line contours,
it is taken from default_parameters ['lines.antialiased'].
Examples
--------
.. plot:: pyplots/contourf.py
:include-source:
"""
# Convert sequences inside tuple to ndarray
x, y, z = tuple_sequence_tondarray((x, y, z))
return self._sp.contourf(x, y, z, *args, **kwargs)
def clabel(self, contour, *args, **kwargs):
"""Label a contour plot.
Parameters
----------
contour : contour object
returned from contour function
fontsize : int
size in points or relative size e.g., 'smaller', 'x-large'
colors : str
if None, the color of each label matches the color
of the corresponding contour
if one string color, e.g., colors = 'r' or
colors = 'red', all labels will be plotted in
this color
if a tuple of matplotlib color args (string, float,
rgb, etc), different labels will be plotted in different
colors in the order specified
inline : bool
controls whether the underlying contour is removed
or not. Default is True.
inline_spacing : int
space in pixels to leave on each side of label when
placing inline. Defaults to 5.
This spacing will be exact for labels at locations
where the contour is straight, less so for labels on
curved contours.
fmt : str
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour
level (i.e., fmt[level]=string), or it can be any callable,
such as a Formatter instance, that returns a string when
called with a numeric contour level.
manual : bool
if True, contour labels will be placed manually using
mouse clicks.
Click the first button near a contour to add a label,
click the second button (or potentially both mouse
buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the
keyboard can be used to select label locations (enter
to end label placement, delete or backspace act like
the third mouse button, and any other key will select
a label location).
manual can be an iterable object of x,y tuples. Contour
labels will be created as if mouse is clicked at each
x,y positions.
rightside_up : bool
if True (default), label rotations will always be plus
or minus 90 degrees from level.
Examples
--------
.. plot:: pyplots/clabel.py
:include-source:
"""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
return self._sp.clabel(contour, *args, **kwargs)
def colorbar(self, mappable, ticks=None, *args, **kwargs):
"""Add colorbar to plot.
Parameters
----------
mappable : object
Image, ContourSet, or other to which the colorbar applies
use_gridspec : boolean, default False
if True colorbar is created as an instance of Subplot using the grid_spec module.
Additional keyword arguments are of two kinds:
Axes properties:
.. list-table::
:header-rows: 1
* - Property
- Description
* - orientation
- vertical or horizontal
* - fraction, default 0.15
- fraction of original axes to use for colorbar
* - pad, default 0.05 if vertical, 0.15 if horizontal
- fraction of original axes between colorbar and new image axes
* - shrink, default 1.0
- fraction by which to shrink the colorbar
* - aspect, default 20
- ratio of long to short dimensions
* - anchor, default (0.0, 0.5) if vertical; (0.5, 1.0) if horizontal
- the anchor point of the colorbar axes
* - panchor, default (1.0, 0.5) if vertical; (0.5, 0.0) if horizontal;
- the anchor point of the colorbar parent axes. If False, the
parent axes' anchor will be unchanged
Colorbar properties:
.. list-table::
:header-rows: 1
* - Property
- Description
* - extend
- [ 'neither' | 'both' | 'min' | 'max' ] If not 'neither', make
pointed end(s) for out-of- range values. These are set for a
given colormap using the colormap set_under and set_over methods.
* - extendfrac
- [ None | 'auto' | length | lengths ] If set to None, both the
minimum and maximum triangular colorbar extensions with have a
length of 5% of the interior colorbar length (this is the default
setting). If set to 'auto', makes the triangular colorbar
extensions the same lengths as the interior boxes (when spacing
is set to 'uniform') or the same lengths as the respective adjacent
interior boxes (when spacing is set to 'proportional'). If a scalar,
indicates the length of both the minimum and maximum triangular colorbar
extensions as a fraction of the interior colorbar length. A two-element
sequence of fractions may also be given, indicating the lengths of the
minimum and maximum colorbar extensions respectively as a fraction of
the interior colorbar length.
* - extendrect
- [ False | True ] If False the minimum and maximum colorbar extensions
will be triangular (the default). If True the extensions will be rectangular.
* - spacing
- [ 'uniform' | 'proportional' ] Uniform spacing gives each discrete color
the same space; proportional makes the space proportional to the data interval.
* - ticks
- [ None | list of ticks | Locator object ] If None, ticks are determined
automatically from the input.
* - format
- [ None | format string | Formatter object ] If None, the ScalarFormatter
is used. If a format string is given, e.g., '%.3f', that is used. An
alternative Formatter object may be given instead.
* - drawedges
- [ False | True ] If true, draw lines at color boundaries.
Notes
-----
If mappable is a ContourSet, its extend kwarg is included automatically.
Note that the shrink kwarg provides a simple way to keep a vertical colorbar.
If the colorbar is too tall (or a horizontal colorbar is too wide) use a
smaller value of shrink.
Examples
--------
.. plot:: pyplots/colorbar.py
:include-source:
"""
ticks = ticks.tolist() if isinstance(ticks, CArray) else ticks
from matplotlib.pyplot import colorbar
cbar = colorbar(mappable, ticks=ticks, *args, **kwargs)
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
cbar.ax.tick_params(labelsize=kwargs['fontsize'])
return cbar
def errorbar(self, x, y, xerr=None, yerr=None, *args, **kwargs):
"""Plot with error deltas in yerr and xerr.
Vertical errorbars are plotted if yerr is not None. Horizontal
errorbars are plotted if xerr is not None. x, y, xerr, and yerr
can all be scalars, which plots a single error bar at x, y.
Parameters
----------
x : list or CArray
x axis values.
y : list or CArray
y axis values.
xerr, yerr : [ scalar | N, Nx1, or 2xN array-like ], default None
If a scalar number, len(N) array-like object, or an
Nx1 array-like object, errorbars are drawn at +/-value
relative to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
fmt : [ '' | 'none' | plot format string ], default ''
The plot format symbol. If fmt is 'none' (case-insensitive),
only the errorbars are plotted.
This is used for adding errorbars to a bar plot, for example.
Default is '', an empty plot format string; properties are
then identical to the defaults for plot().
ecolor : [ None | mpl color ], default None
A matplotlib color arg which gives the color the errorbar
lines; if None, use the color of the line connecting the markers.
elinewidth : scalar, default None
The linewidth of the errorbar lines. If None, use the linewidth.
capsize : scalar, default 3
The length of the error bar caps in points.
capthick : scalar, default None
An alias kwarg to markeredgewidth (a.k.a. - mew). This setting
is a more sensible name for the property that controls the
thickness of the error bar cap in points. For backwards
compatibility, if mew or markeredgewidth are given, then they
will over-ride capthick. This may change in future releases.
barsabove : [ True | False ]
if True, will plot the errorbars above the plot
symbols. Default is below.
lolims, uplims, xlolims, xuplims : [ False | True ], default False
These arguments can be used to indicate that a value
gives only upper/lower limits. In that case a caret symbol
is used to indicate this. lims-arguments may be of the same
type as xerr and yerr. To use limits with inverted axes,
set_xlim() or set_ylim() must be called before errorbar().
errorevery : positive integer, default 1
subsamples the errorbars. e.g., if everyerror=5, errorbars
for every 5-th datapoint will be plotted. The data plot
itself still shows all data points.
Examples
--------
.. plot:: pyplots/errorbar.py
:include-source:
"""
# Set lines-related parameters
kwargs = self._set_lines_params(kwargs)
# Convert sequences inside tuple to ndarray
x, y, xerr, yerr = tuple_sequence_tondarray((x, y, xerr, yerr))
self._sp.errorbar(x, y, xerr=xerr, yerr=yerr, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None, *args, **kwargs):
"""Bar plot.
Parameters
----------
left : sequence of scalars
x coordinates of the left sides of the bars.
height : sequence of scalars
height(s) of the bars.
width : scalar or array-like, optional, default: 0.8
width(s) of the bars.
bottom : scalar or array-like, optional, default: None
y coordinate(s) of the bars.
color : scalar or array-like, optional
Colors of the bar faces.
edgecolor : scalar or array-like, optional
Colors of the bar edges.
linewidth : scalar or array-like, optional, default: None
Width of bar edge(s). If None, use default linewidth;
If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
If not None, will be used to generate errorbar(s)
on the bar chart.
yerr : scalar or array-like, optional, default: None
If not None, will be used to generate errorbar(s)
on the bar chart.
ecolor : scalar or array-like, optional, default: None
Specifies the color of errorbar(s)
capsize : integer, optional, default: 3
Determines the length in points of the error bar caps.
error_kw : dict
dictionary of kwargs to be passed to errorbar method.
ecolor and capsize may be specified here rather than
independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If edge, aligns bars by their left edges (for vertical
bars) and by their bottom edges (for horizontal bars).
If center, interpret the left argument as the coordinates
of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale.
Returns
-------
bar_list : list of bar type objects
Examples
--------
.. plot:: pyplots/bar.py
:include-source:
"""
if 'linewidth' not in kwargs:
kwargs['linewidth'] = self._params['lines.linewidth']
# Convert sequences inside tuple to ndarray
left, height, width, bottom = tuple_sequence_tondarray(
(left, height, width, bottom))
return self._sp.bar(left, height, width, bottom, *args, **kwargs)
def barh(self, bottom, width, height=0.8, left=None, *args, **kwargs):
"""Horizontal bar plot.
Parameters
----------
bottom : sequence of scalars
y coordinates of the bars.
width : sequence of scalars
width(s) of the bars.
height : scalar or array-like, optional, default: 0.8
height(s) of the bars.
left : scalar or array-like, optional, default: None
x coordinate(s) of the bars.
color : scalar or array-like, optional
Colors of the bar faces.
edgecolor : scalar or array-like, optional
Colors of the bar edges.
linewidth : scalar or array-like, optional, default: None
Width of bar edge(s). If None, use default linewidth;
If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
If not None, will be used to generate errorbar(s)
on the bar chart.
yerr : scalar or array-like, optional, default: None
If not None, will be used to generate errorbar(s)
on the bar chart.
ecolor : scalar or array-like, optional, default: None
Specifies the color of errorbar(s)
capsize : integer, optional, default: 3
Determines the length in points of the error bar caps.
error_kw : dict
dictionary of kwargs to be passed to errorbar method.
ecolor and capsize may be specified here rather than
independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If edge, aligns bars by their left edges (for vertical
bars) and by their bottom edges (for horizontal bars).
If center, interpret the left argument as the coordinates
of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale.
Returns
-------
bar_list : list of bar type objects
"""
if 'linewidth' not in kwargs:
kwargs['linewidth'] = self._params['lines.linewidth']
# Convert sequences inside tuple to ndarray
bottom, width, height, left = tuple_sequence_tondarray(
(bottom, width, height, left))
return self._sp.barh(bottom, width, height, left, *args, **kwargs)
def hist(self, x, *args, **kwargs):
"""Plot a histogram.
Compute and draw the histogram of x.
The return value is a tuple (n, bins, patches) or
([n0, n1, ...], bins, [patches0, patches1,...]) if
the input contains multiple data.
Multiple data can be provided via x as a list of
datasets of potentially different length ([x0, x1, ...]),
or as a 2-D ndarray in which each column is a dataset.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a
sequency of arrays which are not required to be of
the same length
bins : integer or array_like, optional, default is 10
If an integer is given, bins + 1 bin edges are returned.
Unequally spaced bins are supported if bins is a sequence.
range : tuple or None, optional
The lower and upper range of the bins. Lower and upper
outliers are ignored.
If not provided, range is (x.min(), x.max()). Range has
no effect if bins is a sequence.
If bins is a sequence or range is specified, autoscaling
is based on the specified bin range instead of the range of x.
density : boolean, optional
If True, the first element of the return tuple will be
the counts normalized to form a probability density,
i.e., n/(len(x)`dbin), i.e., the integral of the histogram
will sum to 1. If stacked is also True, the sum of the
histograms is normalized to 1.
weights : (n, ) array_like or None, optional
An array of weights, of the same shape as x. Each value
in x only contributes its associated weight towards the
bin count (instead of 1). If density is True, the weights
are normalized, so that the integral of the density over
the range remains 1.
cumulative : boolean, optional
Dafault False. If True, then a histogram is computed
where each bin gives the counts in that bin plus all bins
for smaller values.
The last bin gives the total number of datapoints.
If density is also True then the histogram is normalized
such that the last bin equals 1.
If cumulative evaluates to less than 0 (e.g., -1), the
direction of accumulation is reversed. In this case, if density
is also True, then the histogram is normalized such that
the first bin equals 1.
bottom : array_like, scalar, or None
Location of the bottom baseline of each bin. If a scalar,
the base line for each bin is shifted by the same amount.
If an array, each bin is shifted independently and the
length of bottom must match the number of bins.
If None, defaults to 0.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
- 'bar' (default) is a traditional bar-type histogram.
If multiple data are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple data are
stacked on top of each other.
- 'step' generates a lineplot that is by default unfilled.
- 'stepfilled' generates a lineplot that is by default filled.
align : {'left', 'mid', 'right'}, optional
- 'left': bars are centered on the left bin edges.
- 'mid': default, bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
orientation : {'horizontal', 'vertical'}, optional
If 'horizontal', barh will be used for bar-type histograms
and the bottom kwarg will be the left edges.
rwidth : scalar or None, optional
The relative width of the bars as a fraction of the bin width.
If None, automatically compute the width. Ignored if histtype
is 'step' or 'stepfilled'.
log : boolean, optional
Default False. If True, the histogram axis will be set to a
log scale. If log is True and x is a 1D array, empty bins
will be filtered out and only the non-empty (n, bins, patches)
will be returned.
color : color or array_like of colors or None, optional
Color spec or sequence of color specs, one per dataset.
Default (None) uses the standard line color sequence.
label : string or None, optional
String, or sequence of strings to match multiple datasets.
Bar charts yield multiple patches per dataset, but only the
first gets the label, so that the legend command will work
as expected.
stacked : boolean, optional
If True, multiple data are stacked on top of each other.
If False (default) multiple data are aranged side by side
if histtype is 'bar' or on top of each other if histtype
is 'step'.
Returns
-------
n : CArray or list of arrays
The values of the histogram bins. See density and weights
for a description of the possible semantics.
If input x is an array, then this is an array of length nbins.
If input is a sequence arrays [data1, data2,..], then this is
a list of arrays with the values of the histograms for each of
the arrays in the same order.
bins : CArray
The edges of the bins. Length nbins + 1 (nbins left edges and
right edge of last bin).
Always a single array even when multiple data sets are passed in.
patches : list or list of lists
Silent list of individual patches used to create the histogram or
list of such list if multiple input datasets.
Examples
--------
.. plot:: pyplots/hist.py
:include-source:
"""
if 'linewidth' not in kwargs:
kwargs['linewidth'] = self._params['lines.linewidth']
x = list(xi.tondarray() if isinstance(xi, CArray) else xi for xi in x)
n, bins, patches = self._sp.hist(x, *args, **kwargs)
if isinstance(n, list):
n = list(CArray(ni) for ni in n)
return n, CArray(bins), patches
def boxplot(self, x, notch=False, sym=None, vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
"""Make a box and whisker plot.
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
x : Array or a sequence of vectors.
The input data.
notch : bool, default = False
If False, produces a rectangular box plot.
If True, will produce a notched box plot
sym : str or None, default = None
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
If `None`, then the fliers default to 'b+' If you want more
control use the flierprops kwarg.
vert : bool, default = True
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
whis : float, sequence (default = 1.5) or string
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, IQR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. Set this to an unreasonably
high value to force the whiskers to show the min and max values.
Alternatively, set this to an ascending sequence of percentile
(e.g., [5, 95]) to set the whiskers at specific percentiles of
the data. Finally, *whis* can be the string 'range' to force the
whiskers to the min and max of the data. In the edge case that
the 25th and 75th percentiles are equivalent, *whis* will be
automatically set to 'range'.
bootstrap : None (default) or integer
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see <NAME>.,
<NAME>., and <NAME>., 1978, and Kendall and Stuart,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
usermedians : array-like or None (default)
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed by matplotlib as normal.
conf_intervals : array-like or None (default)
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an
element of *conf_intervals* is None, boxplot compute notches the
method specified by the other kwargs (e.g., *bootstrap*).
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
labels : sequence or None (default)
Labels for each dataset. Length must be compatible with
dimensions of *x*
patch_artist : bool, default = False
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
showmeans : bool, default = False
If True, will toggle one the rendering of the means
showcaps : bool, default = True
If True, will toggle one the rendering of the caps
showbox : bool, default = True
If True, will toggle one the rendering of box
showfliers : bool, default = True
If True, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided, will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If True (and *showmeans* is True), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyond the
whiskers (outliers).
- means: points or lines representing the means.
"""
if isinstance(x, CArray):
x = (x, )
x = tuple_sequence_tondarray(tuple(x))
if usermedians is not None:
if isinstance(usermedians, CArray):
usermedians = (usermedians, )
usermedians = tuple_sequence_tondarray(tuple(usermedians))
if conf_intervals is not None:
if isinstance(conf_intervals, CArray):
conf_intervals = (conf_intervals, )
conf_intervals = tuple_sequence_tondarray(tuple(conf_intervals))
if isinstance(positions, CArray):
positions = positions.tondarray()
self._sp.boxplot(x, notch, sym, vert, whis,
positions, widths, patch_artist,
bootstrap, usermedians, conf_intervals,
meanline, showmeans, showcaps,
showbox, showfliers, boxprops,
labels, flierprops, medianprops,
meanprops, capprops, whiskerprops,
manage_xticks)
def fill_between(self, x, y1, y2=0, where=None,
interpolate=False, step=None, **kwargs):
"""Fill the area between two horizontal curves.
The curves are defined by the points (x, y1) and (x, y2).
This creates one or multiple polygons describing the filled area.
You may exclude some horizontal sections from filling using where.
By default, the edges connect the given points directly.
Use step if the filling should be a step function,
i.e. constant in between x.
Parameters
----------
x : CArray (length N)
The x coordinates of the nodes defining the curves.
y1 : CArray (length N) or scalar
The y coordinates of the nodes defining the first curve.
y2 : CArray (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.
where : CArray of bool (length N), optional, default: None
Define where to exclude some horizontal regions from being filled.
The filled regions are defined by the coordinates x[where].
More precisely, fill between x[i] and x[i+1] if where[i] and
where[i+1]. Note that this definition implies that an isolated
True value between two False values in where will not result
in filling. Both sides of the True position remain unfilled due
to the adjacent False values.
interpolate : bool, optional
This option is only relvant if where is used and the two curves
are crossing each other.
Semantically, where is often used for y1 > y2 or similar.
By default, the nodes of the polygon defining the filled region
will only be placed at the positions in the x array.
Such a polygon cannot describe the above semantics close to
the intersection. The x-sections containing the intersecion
are simply clipped.
Setting interpolate to True will calculate the actual
intersection point and extend the filled region up to this point.
step : {'pre', 'post', 'mid'}, optional
Define step if the filling should be a step function,
i.e. constant in between x.
The value determines where the step will occur:
- 'pre': The y value is continued constantly to the left from
every x position, i.e. the interval (x[i-1], x[i]] has the
value y[i].
- 'post': The y value is continued constantly to the right from
every x position, i.e. the interval [x[i], x[i+1]) has the
value y[i].
- 'mid': Steps occur half-way between the x positions.
"""
x, y1, y2, where = tuple_sequence_tondarray((x, y1, y2, where))
self._sp.fill_between(x, y1, y2=y2, where=where,
interpolate=interpolate, step=step, **kwargs)
def xlim(self, bottom=None, top=None):
"""Set axes x limits.
Parameters
----------
bottom : scalar
Starting value for the x axis.
top : scalar
Ending value for the x axis.
Examples
--------
.. plot:: pyplots/xlim.py
:include-source:
"""
self._xlim = (bottom, top)
self._sp.set_xlim(bottom, top)
def ylim(self, bottom=None, top=None):
"""Set axes y limits.
Parameters
----------
bottom : scalar
Starting value for the y axis.
top : scalar
Ending value for the y axis.
See Also
--------
.xlim : Set x axis limits.
"""
self._ylim = (bottom, top)
self._sp.set_ylim(bottom, top)
def xscale(self, scale_type, nonposx='mask', basex=10, **kwargs):
"""Set scale for x axis.
Parameters
----------
scale_type : {'linear', 'log', 'symlog', 'logit'}
Scale for x axis. Default 'linear'.
nonposx: [ 'mask' | 'clip' ], default 'mask'
Non-positive values in x can be masked as invalid,
or clipped to a very small positive number.
basex : int
The base of the logarithm, must be higger than 1.
"""
self._sp.set_xscale(scale_type, nonposx=nonposx, basex=basex, **kwargs)
def yscale(self, scale_type, nonposy='mask', basey=10, **kwargs):
"""Set scale for y axis.
Parameters
----------
scale_type : {'linear', 'log', 'symlog', 'logit'}
Scale for y axis. Default 'linear'.
nonposy: [ 'mask' | 'clip' ], default 'mask'
Non-positive values in y can be masked as invalid,
or clipped to a very small positive number.
basey : int
The base of the logarithm, must be higger than 1.
"""
self._sp.set_yscale(scale_type, nonposy=nonposy, basey=basey, **kwargs)
def xlabel(self, label, *args, **kwargs):
"""Set a label for the x axis.
Parameters
----------
label : string
Label's text.
*args, **kwargs
Same as :meth:`.text` method.
Examples
--------
.. plot:: pyplots/xlabel.py
:include-source:
"""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
self._xlabel = label
self._sp.set_xlabel(label, *args, **kwargs)
def ylabel(self, label, *args, **kwargs):
"""Set a label for the y axis
Parameters
----------
label : string
Label's text.
*args, **kwargs
Same as :meth:`.text` method.
See Also
--------
.xlabel : Set a label for the x axis.
"""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
self._ylabel = label
self._sp.set_ylabel(label, *args, **kwargs)
def xticks(self, location_array, *args, **kwargs):
"""Set the x-tick locations and labels.
Parameters
----------
location_array : CArray or list
Contain ticks location.
*args, **kwargs
Same as :meth:`.text` method.
Examples
--------
.. plot:: pyplots/xticks.py
:include-source:
"""
if isinstance(location_array, CArray):
location_array = location_array.tondarray()
self._xticks = location_array
self._sp.set_xticks(location_array, *args, **kwargs)
def yticks(self, location_array, *args, **kwargs):
"""Set the y-tick locations and labels.
Parameters
----------
location_array : CArray or list
Contain ticks location.
*args, **kwargs
Same as :meth:`.text` method.
See Also
--------
.xticks : Set the x-tick locations and labels.
"""
if isinstance(location_array, CArray):
location_array = location_array.tondarray()
self._yticks = location_array
self._sp.set_yticks(location_array, *args, **kwargs)
def xticklabels(self, labels, *args, **kwargs):
"""Set the xtick labels.
Parameters
----------
labels : list or CArray of string
Xtick labels.
*args, **kwargs
Same as :meth:`.text` method.
Examples
--------
.. plot:: pyplots/xticklabels.py
:include-source:
"""
labels = labels.tolist() if isinstance(labels, CArray) else labels
self._xticklabels = labels
self._sp.set_xticklabels(labels, *args, **kwargs)
def yticklabels(self, labels, *args, **kwargs):
"""Set the ytick labels.
Parameters
----------
labels : list or CArray of string
Xtick labels.
*args, **kwargs
Same as :meth:`.text` method.
See Also
--------
.xticklabels : Set the xtick labels.
"""
labels = labels.tolist() if isinstance(labels, CArray) else labels
self._yticklabels = labels
self._sp.set_yticklabels(labels, *args, **kwargs)
def tick_params(self, *args, **kwargs):
"""Change the appearance of ticks and tick labels.
Parameters
----------
axis : ['x' | 'y' | 'both']
Axis on which to operate; default is 'both'.
reset : [True | False]
Default False. If True, set all parameters to defaults
before processing other keyword arguments.
which : ['major' | 'minor' | 'both']
Default is 'major'; apply arguments to which ticks.
direction : ['in' | 'out' | 'inout']
Puts ticks inside the axes, outside the axes, or both.
length : int
Tick length in points.
width : int
Tick width in points.
color : str
Tick color; accepts any mpl color spec.
pad : int
Distance in points between tick and label.
labelsize : int, str
Tick label font size in points or as a string (e.g., 'large').
labelcolor : str
Tick label color; mpl color spec.
colors : str
Changes the tick color and the label color to the same
value: mpl color spec.
bottom, top, left, right : bool, optional
Controls whether to draw the respective ticks.
labelbottom, labeltop, labelleft, labelright : bool, optional
Controls whether to draw the respective tick labels.
Examples
--------
.. plot:: pyplots/tick_params.py
:include-source:
"""
self._sp.tick_params(*args, **kwargs)
def grid(self, grid_on=True, axis='both', **kwargs):
"""Draw grid for current plot.
Parameters
----------
grid_on : boolean, default True
if True show grid, elsewhere hide grid.
axis : string, default 'both'
can be 'both' (default), 'x', or 'y' to
control which set of gridlines are drawn.
kwargs : any
Other keyword arguments for grid.
Examples
--------
.. plot:: pyplots/grid.py
:include-source:
"""
self._sp.grid(grid_on, axis=axis, **kwargs)
def text(self, *args, **kwargs):
"""Create a Text instance at x, y with string text.
Parameters
----------
Any of the following keyword arguments is supported.
Text properties:
.. list-table::
:header-rows: 1
* - Property
- Description
* - alpha
- float (0.0 transparent through 1.0 opaque)
* - animated
- [True | False]
* - backgroundcolor
- one of possible color
* - bbox
- rectangle prop dict
* - color
- one of possible color
* - family or fontfamily or fontname or name
- [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
* - horizontalalignment or ha
- [ 'center' | 'right' | 'left' ]
* - label
- string or anything printable with '%s' conversion.
* - linespacing
- float (multiple of font size)
* - position
- (x,y)
* - rasterized
- [True | False | None]
* - rotation
- [ angle in degrees | 'vertical' | 'horizontal' ]
* - size or fontsize
- [size in points | 'xx-small' | 'x-small' | 'small' | 'medium' |
'large' | 'x-large' | 'xx-large' ]
* - stretch or fontstretch
- [a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed'
| 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded'
| 'extra-expanded' | 'ultra-expanded' ]
* - style or fontstyle
- [ 'normal' | 'italic' | 'oblique']
* - text
- string or anything printable with '%s' conversion.
* - verticalalignment or va or ma
- [ 'center' | 'top' | 'bottom' | 'baseline' ]
* - visible
- [True | False]
* - weight or fontweight
- [a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal'
| 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold'
| 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
* - x
- float, x position of the text.
* - y
- float. y position of the text.
* - zorder
- any number, objects with lower zorder values are drawn first.
Font properties:
.. list-table::
:header-rows: 1
* - Property
- Description
* - family
- (font name or font family) es: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace'
* - style
- either between 'normal', 'italic' or 'oblique'
* - variant
- 'normal' or 'small-caps'
* - stretch
- A numeric value in the range 0-1000 or one of 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded',
'expanded', 'extra-expanded' or 'ultra-expanded'
* - weight
- A numeric value in the range 0-1000 or one of 'ultralight', 'light',
'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
* - size
- Either an relative value of 'xx-small', 'x-small', 'small', 'medium',
'large', 'x-large', 'xx-large' or an absolute font size, e.g., 12
"""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
return self._sp.text(*args, **kwargs)
def legend(self, *args, **kwargs):
"""Create legend for plot.
Parameters
----------
loc: integer or string or pair of floats, default: 0
.. list-table::
:header-rows: 1
* - Integer
- Location
* - 0
- 'best'
* - 1
- 'upper right'
* - 2
- 'upper left'
* - 3
- 'lower left'
* - 4
- 'lower right'
* - 5
- 'right'
* - 6
- 'center left'
* - 7
- 'center right'
* - 8
- 'lower center'
* - 9
- 'upper center'
* - 10
- 'center'
bbox_to_anchor : tuple of floats
Specify any arbitrary location for the legend in bbox_transform
coordinates (default Axes coordinates). For example, to put the
legend's upper right hand corner in the center of the axes the
following keywords can be used: loc='upper right',
bbox_to_anchor=(0.5, 0.5).
ncol : integer
The number of columns that the legend has. Default is 1.
prop : None or dict
The font properties of the legend. If None (default), the current
default parameters will be used.
fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}
Controls the font size of the legend. If the value is numeric
the size will be the absolute font size in points.
String values are relative to the current default font size.
This argument is only used if prop is not specified.
numpoints : None or int
The number of marker points in the legend when creating a
legend entry for a line. Default is None which will take the
value from the legend.numpoints default parameter.
scatterpoints : None or int
The number of marker points in the legend when creating a
legend entry for a scatter plot. Default is None which will
take the value from the legend.scatterpoints default parameter.
scatteryoffsets : iterable of floats
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to [0.5]. Default [0.375, 0.5, 0.3125].
markerscale : None or int or float
The relative size of legend markers compared with the originally
drawn ones. Default is None which will take the value from the
legend.markerscale default parameter.
frameon : None or bool
Control whether a frame should be drawn around the legend.
Default is None which will take the value from the legend.frameon
default parameter.
fancybox : None or bool
Control whether round edges should be enabled around the
FancyBboxPatch which makes up the legend's background.
Default is None which will take the value from the
legend.fancybox default parameter.
shadow : None or bool
Control whether to draw a shadow behind the legend.
Default is None which will take the value from the
legend.shadow default parameter.
framealpha : None or float
Control the alpha transparency of the legend's frame.
Default is None which will take the value from the
legend.framealpha default parameter.
mode : either between {"expand", None}
If mode is set to "expand" the legend will be horizontally
expanded to fill the axes area (or bbox_to_anchor if
defines the legend's size).
bbox_transform : None or matplotlib.transforms.Transform
The transform for the bounding box (bbox_to_anchor).
For a value of None (default) the Axes' transAxes transform
will be used.
title : str or None
The legend's title. Default is no title (None).
borderpad : float or None
The fractional whitespace inside the legend border.
Measured in font-size units. Default is None which will take
the value from the legend.borderpad default parameter.
labelspacing : float or None
The vertical space between the legend entries. Measured in
font-size units. Default is None which will take the value
from the legend.labelspacing default parameter.
handlelength : float or None
The length of the legend handles. Measured in
font-size units. Default is None which will take the
value from the legend.handlelength default parameter.
handletextpad : float or None
The pad between the legend handle and text. Measured in
font-size units. Default is None which will take the value
from the legend.handletextpad default parameter.
borderaxespad : float or None
The pad between the axes and legend border. Measured in
font-size units. Default is None which will take the value
from the legend.borderaxespad default parameter.
columnspacing : float or None
The spacing between columns. Measured in font-size units.
Default is None which will take the value from the
legend.columnspacing default parameter.
*args, **kwargs
Same as :meth:`.text`.
Examples
--------
.. plot:: pyplots/legend.py
:include-source:
"""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
self.show_legend = True
return self._sp.legend(*args, **kwargs)
def get_legend(self):
"""Returns the handler of current subplot legend."""
return self._sp.get_legend()
def title(self, text, *args, **kwargs):
"""Set a title for subplot."""
if 'fontsize' not in kwargs:
kwargs['fontsize'] = self._params['font.size']
return self._sp.set_title(text, *args, **kwargs)
def plot_path(self, path, path_style='-', path_width=1.5, path_color='k',
straight=False, start_style='h', start_facecolor='r',
start_edgecolor='k', start_edgewidth=1,
final_style='*', final_facecolor='g',
final_edgecolor='k', final_edgewidth=1):
"""Plot a path traversed by a point.
By default, path is drawn in solid black, start point
is drawn with a red star and the end point is drawn
with a green asterisk.
Parameters
----------
path : CArray
Every row contain one point coordinate.
path_style : str
Style for the path line. Default solid (-).
path_width : int
Width of path line. Default 1.5.
path_color : str
Color for the path line. Default black (k).
straight : bool, default False
If True, path will be plotted straight between start and end point.
start_style : str
Style for the start point. Default an hexagon (h).
start_facecolor : str
Color for the start point. Default red (r).
start_edgecolor : str
Color for the edge of the start point marker. Default black (k).
start_edgewidth : scalar
Width of the edge for the start point. Default 1.
final_style : str
Style for the end point. Default a star (*).
final_facecolor : str
Color for the end point. Default red (g).
final_edgecolor : str
Color for the edge of the final point marker. Default black (k).
final_edgewidth : scalar
Width of the edge for the end point. Default 1.
Examples
--------
.. plot:: pyplots/plot_path.py
:include-source:
"""
path_2d = CArray(path).atleast_2d()
if path_2d.shape[1] != 2:
raise ValueError("cannot plot a {:}-Dimensional path."
"".format(path_2d.shape[1]))
# Plotting full path, then the start and the end points
if straight is False:
self.plot(path_2d[:, 0], path_2d[:, 1],
linestyle=path_style,
color=path_color,
linewidth=path_width)
else:
self.plot(path_2d[[0, -1], 0], path_2d[[0, -1], 1],
linestyle=path_style, color=path_color)
self.plot(path_2d[0, 0], path_2d[0, 1], marker=start_style,
markerfacecolor=start_facecolor,
markeredgecolor=start_edgecolor,
markeredgewidth=start_edgewidth)
self.plot(path_2d[-1, 0], path_2d[-1, 1], marker=final_style,
markerfacecolor=final_facecolor,
markeredgecolor=final_edgecolor,
markeredgewidth=final_edgewidth)
def imshow(self, img, *args, **kwargs):
"""Plot image.
Parameters
----------
img : CArray or PIL.Image.Image
Image to plot.
"""
if isinstance(img, CArray):
img = img.tondarray()
return self._sp.imshow(img, *args, **kwargs)
def matshow(self, array, *args, **kwargs):
"""Plot an array as a matrix.
Parameters
----------
array : CArray
Array that we want plot as a matrix.
"""
return self._sp.matshow(array.tondarray(), *args, **kwargs)
def quiver(self, U, V, X=None, Y=None,
color='k', linestyle='-', linewidth=1.0, alpha=1.0):
"""A quiver plot displays velocity vectors as arrows
with components (u,v) at the points (x,y).
For example, the first vector is defined by components
u(1), v(1) and is displayed at the point x(1), y(1).
quiver(x,y,u,v) plots vectors as arrows at the coordinates
specified in each corresponding pair of elements in x and y.
quiver(u,v) draws vectors specified by u and v at equally
spaced points in the x-y plane.
Parameters
----------
U, V: scalar or CArray
Give the x and y components of the arrow vectors.
X, Y: scalar or CArray, optional
The x and y coordinates of the arrow locations
(default is tail of arrow; see pivot kwarg)
color :
Color of the gradient directions.
linestyle : str
['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) | '-' | '--' | '-.' | ':' |
'None' | ' ' | '']
linewidth : float
Width of the line.
alpha : float
Transparency factor of the directions.
"""
if X is None:
self._sp.quiver(U.tondarray(), V.tondarray(),
color=color, linestyle=linestyle,
linewidth=linewidth, alpha=alpha)
else:
self._sp.quiver(X.tondarray(), Y.tondarray(),
U.tondarray(), V.tondarray(),
color=color, linestyle=linestyle,
linewidth=linewidth, alpha=alpha)
|
<reponame>Johnzhjw/MOE-DGNAS
# -*- coding: utf-8 -*-
from MOP_GNN_torch import MyProblem # 导入自定义问题接口
import sys
import os
import datetime
import argparse
import torch
import numpy as np
import random
from dgl.data import register_data_args, load_data
from search_space import MacroSearchSpace
class Logger(object):
def __init__(self, filename="log_eval.txt"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
if __name__ == '__main__':
path = os.path.abspath(os.path.dirname(__file__))
type = sys.getfilesystemencoding()
# sys.stdout = Logger()
parser = argparse.ArgumentParser(description='evoGNN')
register_data_args(parser)
parser.add_argument("--gpu", type=int, default=-1, help="which GPU to use. Set -1 to use CPU.")
parser.add_argument('--random_seed', type=int, default=72)
parser.add_argument("--epochs", type=int, default=200, help="number of training epochs")
parser.add_argument("--num-heads", type=int, default=8, help="number of hidden attention heads")
parser.add_argument("--num-out-heads", type=int, default=1, help="number of output attention heads")
parser.add_argument("--num-layers", type=int, default=1, help="number of hidden layers")
parser.add_argument("--num-hidden", type=int, default=128, help="number of hidden units")
parser.add_argument("--residual", action="store_true", default=False, help="use residual connection")
parser.add_argument("--in-drop", type=float, default=.6, help="input feature dropout")
parser.add_argument("--attn-drop", type=float, default=.6, help="attention dropout")
parser.add_argument("--lr", type=float, default=0.005, help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4, help="weight decay")
parser.add_argument('--negative-slope', type=float, default=0.2, help="the negative slope of leaky relu")
parser.add_argument('--batch-size', type=int, default=2, help="batch size used for training, validation and test")
parser.add_argument('--early-stop', action='store_true', default=True, help="indicates whether to use early stop or not")
parser.add_argument('--fastmode', action="store_true", default=False, help="skip re-evaluate the validation set")
parser.add_argument('--save_model', action="store_true", default=True, help="whether save the whole model")
parser.add_argument('--base_model', action="store_true", default=False, help="whether use the base model")
parser.add_argument("--aggregator-type", type=str, default="pool", help="Aggregator type: mean/sum/pool/lstm")
parser.add_argument("--attention-type", type=str, default="gat", help="Attention type: const/gcn/gat/sym-gat/cos/linear/gen_linear")
parser.add_argument("--activation-type", type=str, default="relu", help="Attention type: linear/elu/sigmoid/tanh/relu/relu6/softplus/leaky_relu")
parser.add_argument("--name_surrog", type=str, default="KRG_MIXINT", help="Surrogate type: LS/QP/KPLS_squar/KPLS_abs/KRG/KPLSK/IDW/RBF")
args = parser.parse_args()
for dataset in ["cora", "citeseer", "pubmed"]: #"ppi",
args.dataset = dataset
args.gpu = 0
print(args)
folder = 'Result_' + dataset
torch.manual_seed(args.random_seed)
if args.gpu >= 0:
torch.cuda.manual_seed(args.random_seed)
random.seed(args.random_seed)
np.random.seed(args.random_seed)
search_space = MacroSearchSpace(tag_all=True)
MAXGEN = 50
"""===============================实例化问题对象============================"""
maxN_layers = 2
problem = MyProblem(args, search_space, maxN_layers) # 生成问题对象
fileObj = folder + '/pop_acc' + '%04d' % (MAXGEN-1) + '/ObjV.csv'
fileSol = folder + '/pop_acc' + '%04d' % (MAXGEN-1) + '/Phen.csv'
objectives = np.loadtxt(open(fileObj, "r"), delimiter=",", skiprows=0)
solutions = np.loadtxt(open(fileSol, "r"), delimiter=",", skiprows=0)
goodId = np.argsort(-objectives[:, 0])[:10]
solutions = solutions[goodId, :]
num_cut = 1
all_results = []
for solution in solutions:
tmp = []
durs = []
for _ in range(10):
actions, model, train_loss, train_acc, val_loss, val_acc, model_val_acc, test_acc, test_acc_best, dur = \
problem.eval_one_solution(solution, early_stop=False, base_model=args.base_model, epochs=200)
tmp.append(test_acc_best)
durs.append(dur)
print("_" * 80)
tmp.sort()
print(dataset, actions,
sum(p.numel() for p in model.parameters()), np.mean(durs), np.mean(tmp), np.std(tmp), solution)
all_results.append([np.mean(tmp), np.std(tmp),
sum(p.numel() for p in model.parameters()), np.mean(durs)] + solution.tolist())
np.savetxt(folder + '/all_test_imple2_mean_std.csv', np.array(all_results), delimiter=',')
print("_" * 80)
best_ind = np.argsort(-np.array(all_results)[:, 0])[0]
best_sol = solutions[best_ind]
tmp = []
durs = []
for _ in range(100):
actions, model, train_loss, train_acc, val_loss, val_acc, model_val_acc, test_acc, test_acc_best, dur = \
problem.eval_one_solution(best_sol, early_stop=False, base_model=args.base_model, epochs=200)
tmp.append(test_acc_best)
durs.append(dur)
print("_" * 80)
tmp.sort()
print('best: ', dataset, ' ', actions, '; ',
sum(p.numel() for p in model.parameters()), ';',
np.mean(durs), ';', np.mean(tmp), np.std(tmp), ';', best_sol)
np.savetxt(folder + '/final_test_imple2_mean_std.csv',
[sum(p.numel() for p in model.parameters()), np.mean(durs), np.mean(tmp), np.std(tmp)] + best_sol.tolist(),
delimiter=',')
|
from typing import Optional, List, Dict, Union
import numpy as np
from gensim.models import KeyedVectors
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
GensimKeyedVectors = KeyedVectors
NumpyArray = np.array
def embed(
text: List[str],
model: GensimKeyedVectors,
chi2_weights: Optional[Dict[str, float]] = None,
tf_idf_weights: Optional[Dict[str, float]] = None,
freq_weights: Optional[Dict[str, float]] = None,
chi2_weights_pow: float = 1,
tf_idf_weights_pow: float = 1,
freq_weights_pow: float = 1,
) -> NumpyArray:
"""Embeds tokens into a single vector by averaging all token vectors.
Args:
text: Tokenized sentence.
model: Embedding model in Gensim format.
chi2_weights: Chi2 weights for each word in context.
tf_idf_weights: TF-IDF weights for each word in context.
freq_weights: Frequency weights for each word in context.
chi2_weights_pow: Chi2 weights power.
tf_idf_weights_pow: TF-IDF weights power.
freq_weights_pow: Frequency weights power.
Returns:
Averaged (and weighted) vector representing sentence.
"""
chi2_weight = 1
tf_idf_weight = 1
freq_weight = 1
words = [w for w in text if w in model.vocab]
lexicon = list(set(words))
lw = len(lexicon)
if lw == 0:
print(f"Empty lexicon in {text}")
return np.zeros(model.vector_size)
vectors = np.zeros((lw, model.vector_size))
for i, word in enumerate(lexicon):
if chi2_weights:
chi2_weight = np.power(chi2_weights[word], chi2_weights_pow)
if tf_idf_weights:
tf_idf_weight = np.power(tf_idf_weights[word], tf_idf_weights_pow)
if freq_weights:
freq_weight = np.power(freq_weights[word], freq_weights_pow)
vectors[i, :] = (
model[word] * chi2_weight * tf_idf_weight * freq_weight
) # Adding word and its vector to matrix
context_embedding = np.sum(
vectors, axis=0
) # Computing sum of all vectors in the document
context_embedding = np.divide(context_embedding, lw) # Computing average vector
return context_embedding
def get_freq_weights(
word: str,
model: GensimKeyedVectors,
a: float = np.float_power(10, -3),
wcount: int = 250000000,
) -> float:
"""Weight word based on its frequency
Some Gensim models are able to retain information about frequency
distribution of particular words.
We are using this information in order to weight words based on their frequency.
Args:
word: Raw word.
model: Embedding model in Gensim format.
a: Smoothing coefficient.
wcount: The number of words in the corpus on which the model was trained (by default - the number of words in the RNC).
Returns:
Word weight (rare words get bigger weights).
"""
if word in model:
prob = model.vocab[word].count / wcount
weight = a / (a + prob)
return weight
return 1
def get_chi2_scores(
contexts: NumpyArray, gold_senses: NumpyArray
) -> Union[NumpyArray, Dict[str, int]]:
"""Calculates chi2 scores for each word in context.
Args:
contexts: List of context sentences.
gold_senese: List of gold(train) senses.
Returns:
Chi2 scores for each word in context and vectorizer
vocabulary with word indexes.
"""
count_vectorizer = CountVectorizer(
lowercase=False, tokenizer=lambda text: text.split()
)
count_vect = count_vectorizer.fit_transform(contexts)
return chi2(count_vect, gold_senses)[0], count_vectorizer.vocabulary_
def get_tf_idf_coeffs(contexts: NumpyArray) -> Union[NumpyArray, Dict[str, int]]:
"""Calculates TF-IDF coefficients for each word in context.
Args:
contexts: List of context sentences.
Returns:
TF-IDF coefficients for each word in context and vectorizer
vocabulary with word indexes.
"""
tfidf = TfidfVectorizer(lowercase=False, tokenizer=lambda text: text.split())
return tfidf.fit_transform(contexts).todense(), tfidf.vocabulary_
def save(df, corpus):
"""Saves dataset with predicted senses to CSV file.
Args:
df: Dataframe with mutisense words and their contexts.
corpus: Name of the original file.
Returns:
Path to saved CSV file with predicted senses.
"""
output_fpath = corpus + "_predictions.csv"
df.to_csv(output_fpath, sep="\t", encoding="utf-8", index=False)
print("Generated dataset: {}".format(output_fpath))
return output_fpath
|
<reponame>ejnnr/steerable_pdos
import numpy as np
import math
from e2cnn.kernels.basis import KernelBasis
from e2cnn.kernels.utils import offset_iterator
from e2cnn.group import Group, IrreducibleRepresentation
from e2cnn.group import cyclic_group, dihedral_group, so2_group, o2_group
from e2cnn.group import CyclicGroup, DihedralGroup, SO2, O2
from .utils import homogenized_cheby, transform_polynomial
from .basis import DiffopBasis
from typing import Union, Tuple, Optional
class R2FlipsSolution(DiffopBasis):
def __init__(self,
group: Group,
in_irrep: Union[str, IrreducibleRepresentation, int],
out_irrep: Union[str, IrreducibleRepresentation, int],
axis: float,
max_frequency: int = None,
max_offset: int = None,
):
if isinstance(group, int):
group = cyclic_group(2)
assert isinstance(group, CyclicGroup) and group.order() == 2
assert (max_frequency is not None or max_offset is not None), \
'Error! Either the maximum frequency or the maximum offset for the frequencies must be set'
self.max_frequency = max_frequency
self.max_offset = max_offset
assert max_frequency is None or (isinstance(max_frequency, int) and max_frequency >= 0)
assert max_offset is None or (isinstance(max_offset, int) and max_offset >= 0)
assert isinstance(axis, float)
self.axis = axis
if isinstance(in_irrep, int):
in_irrep = group.irrep(in_irrep)
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
if isinstance(out_irrep, int):
out_irrep = group.irrep(out_irrep)
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
self.N = 1
self.fi = in_irrep.attributes['frequency']
self.fo = out_irrep.attributes['frequency']
self.ts = []
self.mu = []
self.invert = (self.fi + self.fo) % 2
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(0, 1, self.max_offset, self.max_frequency, non_negative=True):
# the current shifted frequency
mu = t
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (t, mu, self.max_frequency)
if mu > 0 or self.invert == 0.:
# don't add sin(0*theta) as a basis since it is zero everywhere
self.mu.append(mu)
self.ts.append(t)
self.dim = len(self.mu)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
# would be set later anyway but we need it now
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = homogenized_cheby(mu, "u" if self.invert else "t").reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
if axis != 0:
so2 = SO2(1)
# rotation matrix by angle_offset
matrix = so2.irrep(1)(axis)
# we transform the polynomial with the matrix
coefficients = [transform_polynomial(element, matrix) for element in coefficients]
super().__init__(coefficients)
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx]
attr["order"] = abs(self.mu[idx])
attr["invert"] = self.invert
attr["offset"] = self.ts[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2FlipsSolution):
return False
elif self.in_irrep != other.in_irrep or self.out_irrep != other.out_irrep or self.axis != other.axis:
return False
else:
return np.allclose(self.mu, other.mu) and self.invert == other.invert
def __hash__(self):
return (hash(self.in_irrep) + hash(self.out_irrep) + hash(str(self.mu)) + hash(self.invert))
class R2DiscreteRotationsSolution(DiffopBasis):
def __init__(self,
group: Union[Group, int],
in_irrep: Union[str, IrreducibleRepresentation, int],
out_irrep: Union[str, IrreducibleRepresentation, int],
max_frequency: int = None,
max_offset: int = None,
):
if isinstance(group, int):
group = cyclic_group(group)
assert isinstance(group, CyclicGroup)
assert (max_frequency is not None or max_offset is not None), \
'Error! Either the maximum frequency or the maximum offset for the frequencies must be set'
self.max_frequency = max_frequency
self.max_offset = max_offset
assert max_frequency is None or (isinstance(max_frequency, int) and max_frequency >= 0)
assert max_offset is None or (isinstance(max_offset, int) and max_offset >= 0)
if isinstance(in_irrep, int):
in_irrep = group.irrep(in_irrep)
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
self.n = in_irrep.attributes['frequency']
if isinstance(out_irrep, int):
out_irrep = group.irrep(out_irrep)
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
self.m = out_irrep.attributes['frequency']
self.N = group.order()
self.ts = []
self.invert = []
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
self.s = []
# m, n > 0
for invert in range(2):
for s in [0, 1]:
k = self.m - self.n * (-1) ** s
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.invert.append(invert)
self.mu.append(mu)
self.s.append(s)
self.ts.append(t)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert (self.m == 0 or (self.m == self.N//2 and self.N % 2 == 0))
# n > 0, m = 0 or N/2
for invert in range(2):
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.invert.append(invert)
self.mu.append(mu)
self.ts.append(t)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert (self.n == 0 or (self.n == self.N // 2 and self.N % 2 == 0))
# m > 0, n = 0 or N/2
for invert in range(2):
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.invert.append(invert)
self.mu.append(mu)
self.ts.append(t)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert (self.n == 0 or (self.n == self.N // 2 and self.N % 2 == 0))
assert (self.m == 0 or (self.m == self.N // 2 and self.N % 2 == 0))
for invert in range(2):
k = self.m - self.n
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency, non_negative=True):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
if mu > 0 or invert == 0.:
# don't add sin(0*theta) as a basis since it is zero everywhere
self.invert.append(invert)
self.mu.append(mu)
self.ts.append(t)
self.dim = len(self.invert)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
# would be set later anyway but we need it now
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
invert = self.invert[i]
s = self.s[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, invert)
out[0, 1, :] = -(-1)**s * cheby("u", mu, invert)
out[1, 0, :] = cheby("u", mu, invert)
out[1, 1, :] = (-1)**s * cheby("t", mu, invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
invert = self.invert[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**invert * homogenized_cheby(mu, "u" if invert else "t")
out[0, 1, :] = homogenized_cheby(mu, "t" if invert else "u")
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
invert = self.invert[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**invert * homogenized_cheby(mu, "u" if invert else "t")
out[1, 0, :] = homogenized_cheby(mu, "t" if invert else "u")
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
invert = self.invert[i]
mu = self.mu[i]
out = homogenized_cheby(mu, "u" if invert else "t").reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
super().__init__(coefficients)
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx]
attr["order"] = abs(self.mu[idx])
attr["invert"] = self.invert[idx]
if hasattr(self, "s"):
attr["s"] = self.s[idx]
attr["offset"] = self.ts[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2DiscreteRotationsSolution):
return False
elif self.in_irrep != other.in_irrep or self.out_irrep != other.out_irrep:
return False
elif hasattr(self, "s") and not np.allclose(self.s, other.s):
return False
else:
return np.allclose(self.mu, other.mu) and self.invert == other.invert
def __hash__(self):
return (hash(self.in_irrep) + hash(self.out_irrep) + hash(str(self.mu)) + hash(str(self.invert)))
class R2FlipsDiscreteRotationsSolution(DiffopBasis):
def __init__(self,
group: Union[Group, int],
in_irrep: Union[str, IrreducibleRepresentation, Tuple[int]],
out_irrep: Union[str, IrreducibleRepresentation, Tuple[int, int]],
axis: float,
max_frequency: int = None,
max_offset: int = None,
):
if isinstance(group, int):
group = dihedral_group(group)
assert isinstance(group, DihedralGroup)
assert (max_frequency is not None or max_offset is not None), \
'Error! Either the maximum frequency or the maximum offset for the frequencies must be set'
self.max_frequency = max_frequency
self.max_offset = max_offset
assert isinstance(axis, float)
self.axis = axis
assert max_frequency is None or (isinstance(max_frequency, int) and max_frequency >= 0)
assert max_offset is None or (isinstance(max_offset, int) and max_offset >= 0)
if isinstance(in_irrep, tuple):
in_irrep = group.irrep(in_irrep[0], in_irrep[1])
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
if isinstance(out_irrep, tuple):
out_irrep = group.irrep(out_irrep[0], out_irrep[1])
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
self.N = group.rotation_order
self.m = out_irrep.attributes['frequency']
self.n = in_irrep.attributes['frequency']
self.fi = in_irrep.attributes['flip_frequency']
self.fo = out_irrep.attributes['flip_frequency']
self.ts = []
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
assert (self.m > 0 and self.n > 0 and self.fi == 1 and self.fo == 1)
self.s = []
# m, n > 0
self.invert = 0
for s in [0, 1]:
k = self.m - self.n * (-1) ** s
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.mu.append(mu)
self.s.append(s)
self.ts.append(t)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert ((self.m == 0 or (self.m == self.N // 2 and self.N % 2 == 0)) and (self.fi == 1))
# n > 0, m = 0 or N/2
self.invert = self.fo
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.mu.append(mu)
self.ts.append(t)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert ((self.n == 0 or (self.n == self.N// 2 and self.N % 2 == 0)) and self.fo == 1), (self.n, self.m, self.N, self.fi, self.fo)
# m > 0, n = 0 or N/2
self.invert = self.fi
k = self.n + self.m
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
self.mu.append(mu)
self.ts.append(t)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert (self.n == 0 or (self.n == self.N // 2 and self.N % 2 == 0)), (self.n, self.m, self.N, self.fi, self.fo)
assert (self.m == 0 or (self.m == self.N // 2 and self.N % 2 == 0)), (self.n, self.m, self.N, self.fi, self.fo)
self.invert = ((self.fi + self.fo) % 2)
k = self.m - self.n
# for each available frequency offset, build the corresponding basis vector
for t in offset_iterator(k, self.N, self.max_offset, self.max_frequency, non_negative=True):
# the current shifted frequency
mu = k + t * self.N
if self.max_offset is not None:
assert (math.fabs(t) <= self.max_offset), (t, self.max_offset)
if self.max_frequency is not None:
assert (math.fabs(mu) <= self.max_frequency), (k, t, mu, self.max_frequency)
if mu > 0 or self.invert == 0:
# don't add sin(0*theta) as a basis since it is zero everywhere
self.mu.append(mu)
self.ts.append(t)
self.dim = len(self.mu)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
# would be set later anyway but we need it now
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
s = self.s[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, self.invert)
out[0, 1, :] = -(-1)**s * cheby("u", mu, self.invert)
out[1, 0, :] = cheby("u", mu, self.invert)
out[1, 1, :] = (-1)**s * cheby("t", mu, self.invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**self.invert * homogenized_cheby(mu, "u" if self.invert else "t")
out[0, 1, :] = homogenized_cheby(mu, "t" if self.invert else "u")
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**self.invert * homogenized_cheby(mu, "u" if self.invert else "t")
out[1, 0, :] = homogenized_cheby(mu, "t" if self.invert else "u")
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = homogenized_cheby(mu, "u" if self.invert else "t").reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
if axis != 0:
so2 = SO2(1)
# rotation matrix by angle_offset
matrix = so2.irrep(1)(axis)
# we transform the polynomial with the matrix
coefficients = [transform_polynomial(element, matrix) for element in coefficients]
super().__init__(coefficients)
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx]
attr["order"] = abs(self.mu[idx])
attr["invert"] = self.invert
if hasattr(self, "s"):
attr["s"] = self.s[idx]
attr["offset"] = self.ts[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2DiscreteRotationsSolution):
return False
elif self.in_irrep != other.in_irrep or self.out_irrep != other.out_irrep:
return False
elif hasattr(self, "s") and not np.allclose(self.s, other.s):
return False
else:
return np.allclose(self.mu, other.mu) and self.invert == other.invert
def __hash__(self):
return (hash(self.in_irrep) + hash(self.out_irrep) + hash(str(self.mu)) + hash(self.invert))
class R2ContinuousRotationsSolution(DiffopBasis):
def __init__(
self,
group: Group,
in_irrep: Union[str, IrreducibleRepresentation, int],
out_irrep: Union[str, IrreducibleRepresentation, int],
):
assert isinstance(group, SO2)
if isinstance(in_irrep, int):
in_irrep = group.irrep(in_irrep)
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(
f"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
self.n = in_irrep.attributes["frequency"]
if isinstance(out_irrep, int):
out_irrep = group.irrep(out_irrep)
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(
f"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found"
)
self.m = out_irrep.attributes["frequency"]
self.invert = []
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
# m, n > 0
ss = []
for invert in range(2):
for s in [0, 1]:
mu = self.m - self.n * (-1) ** s
self.invert.append(invert)
self.mu.append(mu)
ss.append(s)
self.s = np.array(ss)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert self.m == 0
# n > 0, m = 0
for invert in range(2):
mu = self.n + self.m
self.invert.append(invert)
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert self.n == 0
# m > 0, n = 0
for invert in range(2):
mu = self.n + self.m
self.invert.append(invert)
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert self.n == 0 and self.m == 0
self.mu.append(0)
self.invert.append(0)
self.dim = len(self.invert)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
# would be set later anyway but we need it now
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
invert = self.invert[i]
s = self.s[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, invert)
out[0, 1, :] = -(-1)**s * cheby("u", mu, invert)
out[1, 0, :] = cheby("u", mu, invert)
out[1, 1, :] = (-1)**s * cheby("t", mu, invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
invert = self.invert[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, invert)
out[0, 1, :] = cheby("u", mu, invert)
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
invert = self.invert[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, invert)
out[1, 0, :] = cheby("u", mu, invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
out = np.array([1]).reshape(1, 1, 1)
coefficients.append(out)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
super().__init__(coefficients)
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx]
attr["order"] = abs(self.mu[idx])
attr["invert"] = self.invert[idx]
if hasattr(self, "s"):
attr["s"] = self.s[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2ContinuousRotationsSolution):
return False
elif self.in_irrep != other.in_irrep or self.out_irrep != other.out_irrep:
return False
elif hasattr(self, "s") and not np.allclose(self.s, other.s):
return False
else:
return np.allclose(self.mu, other.mu) and self.invert == other.invert
def __hash__(self):
return (hash(self.in_irrep) + hash(self.out_irrep) + hash(str(self.mu)) + hash(str(self.invert)))
class R2FlipsContinuousRotationsSolution(DiffopBasis):
def __init__(self,
group: Group,
in_irrep: Union[str, IrreducibleRepresentation, Tuple[int]],
out_irrep: Union[str, IrreducibleRepresentation, Tuple[int, int]],
axis: float = 0.,
):
assert isinstance(group, O2)
assert isinstance(axis, float)
self.axis = axis
if isinstance(in_irrep, tuple):
in_irrep = group.irrep(in_irrep[0], in_irrep[1])
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
if isinstance(out_irrep, tuple):
out_irrep = group.irrep(out_irrep[0], out_irrep[1])
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance"
f" of IrreducibleRepresentation but {in_irrep} found")
self.m = out_irrep.attributes['frequency']
self.n = in_irrep.attributes['frequency']
self.fi = in_irrep.attributes['flip_frequency']
self.fo = out_irrep.attributes['flip_frequency']
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
assert (self.m > 0 and self.n > 0 and self.fi == 1 and self.fo == 1)
self.s = []
# m, n > 0
self.invert = 0
for s in [0, 1]:
mu = self.m - self.n * (-1) ** s
self.mu.append(mu)
self.s.append(s)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert self.m == 0 and self.fi == 1
# n > 0, m = 0
self.invert = self.fo
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert self.n == 0 and self.fo == 1
# m > 0, n = 0
self.invert = self.fi
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert self.n == 0 and self.m == 0
self.invert = ((self.fi + self.fo) % 2)
mu = self.m - self.n
if mu > 0 or self.invert == 0:
# don't add sin(0*theta) as a basis since it is zero everywhere
self.mu.append(mu)
self.dim = len(self.mu)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
# would be set later anyway but we need it now
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
# the basis vectors depends on the shape of the input and output irreps,
# while their frequencies depend on the irreps frequencies
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
s = self.s[i]
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = cheby("t", mu, self.invert)
out[0, 1, :] = -(-1)**s * cheby("u", mu, self.invert)
out[1, 0, :] = cheby("u", mu, self.invert)
out[1, 1, :] = (-1)**s * cheby("t", mu, self.invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**self.invert * homogenized_cheby(mu, "u" if self.invert else "t")
out[0, 1, :] = homogenized_cheby(mu, "t" if self.invert else "u")
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty((self.shape) + (abs(mu) + 1,))
out[0, 0, :] = (-1)**self.invert * homogenized_cheby(mu, "u" if self.invert else "t")
out[1, 0, :] = homogenized_cheby(mu, "t" if self.invert else "u")
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = homogenized_cheby(mu, "u" if self.invert else "t").reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f"Shape {self.shape} not recognized!")
if axis != 0:
so2 = SO2(1)
# rotation matrix by angle_offset
matrix = so2.irrep(1)(axis)
# we transform the polynomial with the matrix
coefficients = [transform_polynomial(element, matrix) for element in coefficients]
super().__init__(coefficients)
def __getitem__(self, idx):
assert idx < self.dim
attr = {}
attr["frequency"] = self.mu[idx]
attr["order"] = abs(self.mu[idx])
attr["invert"] = self.invert
if hasattr(self, "s"):
attr["s"] = self.s[idx]
attr["idx"] = idx
return attr
def __eq__(self, other):
if not isinstance(other, R2FlipsContinuousRotationsSolution):
return False
elif self.in_irrep != other.in_irrep or self.out_irrep != other.out_irrep:
return False
elif hasattr(self, "s") and not np.allclose(self.s, other.s):
return False
else:
return np.allclose(self.mu, other.mu) and self.invert == other.invert
def __hash__(self):
return (hash(self.in_irrep) + hash(self.out_irrep) + hash(str(self.mu)) + hash(self.invert))
def cheby(kind, mu, invert):
inverter = {"u": "t", "t": "u"}
if kind == "t" and invert:
sign = -1
else:
sign = 1
return sign * homogenized_cheby(mu, inverter[kind] if invert else kind)
|
#!/usr/bin/env python
# coding=utf-8
import os, re, sys
from datetime import datetime
import twitter # https://pypi.python.org/pypi/twitter
import pytz # https://pypi.python.org/pypi/pytz
import ConfigParser
# Credits:
# http://stackoverflow.com/questions/4563272/how-to-convert-a-python-utc-datetime-to-a-local-datetime-using-only-python-stand/13287083
def utc_to_local(utc_dt):
# Convert UTC datetime to local datetime
local_tz = pytz.timezone('US/Central')
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) # .normalize might be unnecessary
def aslocaltimestr(utc_dt):
# Reformat local datetime for sign display
return utc_to_local(utc_dt).strftime('- %A, %B %-d, %-I:%M %p')
def strip_random_ms(text):
# Remove random millisecond string from tweet text
return re.sub(r' {[0-9]{1,3}}',r'',text)
def get_last_tweet(twitter_obj, screen_name):
# Get last tweet from authorized timeline
last_tweet=twitter_obj.statuses.user_timeline(screen_name=screen_name,
count=1)[0]
tweet_text = last_tweet['text']
tweet_dt = datetime.strptime(last_tweet['created_at'],
'%a %b %d %H:%M:%S +0000 %Y')
return (tweet_text, tweet_dt)
def get_config(inifile):
# Read configuration from ini file
config = ConfigParser.RawConfigParser()
config.read(inifile)
return config
def setup_twitter(consumer_key, consumer_secret, credentials_file):
# Authenticate to twitter using OAuth
if not os.path.exists(credentials_file):
twitter.oauth_dance("Tweet to Door Sign Converter", consumer_key,
consumer_secret, credentials_file)
oauth_token, oauth_secret = twitter.read_token_file(credentials_file)
t = twitter.Twitter(auth=twitter.OAuth(
oauth_token, oauth_secret, consumer_key, consumer_secret))
return t
def main():
# Find out where and who we are (in terms of paths and filenames)
script_dir = os.path.abspath(os.path.dirname(__file__))
script_file = os.path.basename(os.path.abspath(__file__))
(script_name, script_ext) = os.path.splitext(script_file)
# Identify needed files
credentials_filename = ".%s_app_credentials" % (script_name)
credentials_file = os.path.join(script_dir, credentials_filename)
ini_filename = "%s.ini" % (script_name)
ini_file = os.path.join(script_dir, ini_filename)
# Get configuration
config = get_config(ini_filename)
consumer_key = config.get('twitter', 'CONSUMER_KEY')
consumer_secret = config.get('twitter', 'CONSUMER_SECRET')
screen_name = config.get('twitter', 'screen_name')
graph_url = config.get('graph', 'url')
# Authenticate to twitter
t=setup_twitter(consumer_key,consumer_secret,credentials_file)
# Get last tweet, reformat
(tweet_text, tweet_dt) = get_last_tweet(t,screen_name)
reformatted_tweet = "%s %s" % ( strip_random_ms(tweet_text),
aslocaltimestr(tweet_dt) )
status_filename = "%s.json" % (script_name)
status_file = os.path.join(script_dir, status_filename)
reformatted_tweet = '[{"status":"%s", "date":"%s", "graph":"%s"}]' % ( strip_random_ms(tweet_text),
aslocaltimestr(tweet_dt), graph_url )
with open(status_file, 'w') as f:
f.write(reformatted_tweet.encode('utf-8'))
if __name__ == "__main__":
main()
|
<filename>examples/01-web/12-dom.py
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import URL, DOM, plaintext
from pattern.web import NODE, TEXT, COMMENT, ELEMENT, DOCUMENT
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# The pattern.web module has a number of convenient search engines, as demonstrated.
# But often you will need to handle the HTML in web pages of your interest manually.
# The DOM object can be used for this, similar to the Javascript DOM.
# The DOM (Document Object Model) parses a string of HTML
# and returns a tree of nested Element objects.
# The DOM elements can then be searched by tag name, CSS id, CSS class, ...
# For example, top news entries on Reddit are coded as:
# <div class="_1poyrkZ7g36PawDueRza-J s1r3zmnv-7 bmeGah">
# ...
# <span class="y8HYJ-y_lTUHkQIc1mdCq yj3st6-1 kYJFRo">
# ...
# <a class="SQnoC3ObvgnGjWt90zD9Z " href="http://i.imgur.com/yDyPu8P.jpg">Bagel the bengal, destroyer of boxes</a>
# ...
# </div>
#
# ... which - naturally - is a picture of a cat.
url = URL("http://www.reddit.com/top/")
dom = DOM(url.download(cached=True))
#print(dom.body.content)
for e in dom.by_tag("div._1poyrkZ7g36PawDueRza-J s1r3zmnv-7 bmeGah")[:5]: # Top 5 reddit entries.
for a in e.by_tag("a.SQnoC3ObvgnGjWt90zD9Z")[:1]:
print(plaintext(a.content))
print(a.attrs["href"])
print("")
# The links in the HTML source code may be relative,
# e.g., "../img.jpg" instead of "www.domain.com/img.jpg".
# We can get the absolute URL by prepending the base URL.
# However, this can get messy with anchors, trailing slashes and redirected URL's.
# A good way to get absolute URL's is to use the module's abs() function:
from pattern.web import abs
url = URL("http://nodebox.net")
for link in DOM(url.download()).by_tag("a"):
link = link.attrs.get("href", "")
link = abs(link, base=url.redirect or url.string)
print(link)
# The DOM object is a tree of nested Element and Text objects.
# All objects inherit from Node (check the source code).
# Node.type : NODE, TEXT, COMMENT, ELEMENT or DOM
# Node.parent : Parent Node object.
# Node.children : List of child Node objects.
# Node.next : Next Node in Node.parent.children.
# Node.previous : Previous Node in Node.parent.children.
# DOM.head : Element with tag name "head".
# DOM.body : Element with tag name "body".
# Element.tag : Element tag name, e.g. "body".
# Element.attrs : Dictionary of tag attributes, e.g. {"class": "header"}
# Element.content : Element HTML content as a string.
# Element.source : Element tag + content
# Element.get_element_by_id(value)
# Element.get_elements_by_tagname(value)
# Element.get_elements_by_classname(value)
# Element.get_elements_by_attribute(name=value)
# You can also use shorter aliases (we prefer them):
# Element.by_id(), by_tag(), by_class(), by_attr().
# The tag name passed to Element.by_tag() can include
# a class (e.g., "div.message") or an id (e.g., "div#header").
# For example:
# In the <head> tag, retrieve the <meta name="keywords"> element.
# Get the string value of its "content" attribute and split into a list:
dom = DOM(URL("https://www.apple.com/uk/").download(cached=True))
kw = dom.head.by_attr(name="Description")[0]
kw = kw.attrs["content"]
print(kw)
print("")
# If you know CSS, you can also use short and handy CSS selectors:
# http://www.w3.org/TR/CSS2/selector.html
# Element(selector) will return a list of nested elements that match the given string.
dom = DOM(URL("http://www.clips.ua.ac.be").download())
for e in dom("div#ContentPlaceHolder1_ctl00_ctl01_Omkadering span div:contents p"):
print(plaintext(e.content))
print("")
######################################## Test Techcrunch - https://techcrunch.com/ ####################################
print("#"*40, "Test Techcrunch", "#"*40)
url = URL("https://techcrunch.com/startups/")
dom = DOM(url.download(cached=True))
for e in dom.by_tag("header.post-block__header")[:5]:
for a in e.by_tag("h2.post-block__title")[:1]:
print(plaintext(a.content))
for h in a.by_tag("a.post-block__title__link")[:1]:
print(h.attrs["href"])
print("")
print("\n")
header = dom.by_class("river__title")[0]
print(header.content)
print("\n")
title_image = dom.by_attr(name="msapplication-TileImage")[0]
print(title_image.attrs['content'])
print("\n")
url = URL("https://techcrunch.com")
dom = DOM(url.download(cached=True))
for k in dom.by_class("post-block__title__link"):
print(k.content.strip())
print("")
print("\n")
for e in dom("header:post-block__header h2:post-block__title a:post-block__title__link"):
print(e.content.strip())
print(e.attrs["href"])
print("")
################################ Test Habr - https://habr.com ####################################
print("#"*40, "Test Habr", "#"*40)
url = URL("https://habr.com")
dom = DOM(url.download(cached=True))
for e in dom.by_tag("h2.post__title")[:5]:
for a in e.by_tag("a.post__title_link")[:1]:
print(plaintext(a.content))
print("")
print("\n")
for k in dom.by_class("post__hubs inline-list"):
for p in k.by_tag("li.inline-list__item inline-list__item_hub"):
for t in p.by_tag("a.inline-list__item-link hub-link "):
print(t.content)
print("\n")
descr = dom.by_attr(name="description")[0]
print(descr.attrs['content'])
print("\n")
for p in dom("div#broadcast_tabs_posts"):
for e in p.by_class("content-list content-list_most-read"):
for k in e.by_tag("a.post-info__title post-info__title_large"):
print(plaintext(k.content))
print("") |
<reponame>NWYLZW/right-click-helper
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import sin, cos, sqrt, acos
from typing import ClassVar
from PyQt5.QtCore import Qt, pyqtSignal, QTimer
from PyQt5.QtGui import QPaintEvent, QPainter, QColor, QPainterPath
from PyQt5.QtWidgets import QLabel, QHBoxLayout, QWidget
from src.rightClickHelper.component.core import LifeStage
from src.rightClickHelper.component.elePyWidget import ElePyWidget, watchProperty
from src.rightClickHelper.component.popover.elePyMenuPopover import ElePyMenuPopover, MenuPopoverMode, PopoverMenuItem
from src.rightClickHelper.tool.widgetTool import WidgetTool
class ElePySelect(
ElePyWidget
):
"""
signal: {
change: {
param: (PopoverMenuItem, [int], [PopoverMenuItem]),
command : '所选项改变时触发,参数分别为(选择的菜单项,菜单已选index列表,所有菜单项列表)',
}
},
properties: {
placeholder: {
type : str,
default : '请选择',
command : '未选择事默认展示的文字',
},
disabled: {
type : bool,
default : false,
command : '控制是否为禁用状态',
},
select-type: {
type : str,
default : '请选择',
candidate: ['single', 'multiple'],
command : '选择类型',
},
select-menu-items: {
type : [PopoverMenuItem],
default : [],
command : '待选菜单选项',
},
sel-index-list: {
type : [int],
default : [],
command : '默认选择的菜单项列表, 当select-type属性为single时只会选择第一个',
},
}
"""
change = pyqtSignal(PopoverMenuItem, list, list)
def __init__(
self, parent=None, properties: dict = {}
):
self.__rightIconRotateAngle = 0
self._menuPopover = None # type: ElePyMenuPopover
self.__transformProperties = {}
super().__init__(parent, {
'disabled': False,
**properties
})
def forbiddenItem(self, index: int):
self._menuPopover.changeItemStatus(index, 'forbidden')
def accessItem(self, index: int):
self._menuPopover.changeItemStatus(index, '')
def drawRightIcon(self, event: QPaintEvent):
width = self.rightIcon.width()
unit = 6; PI = 3.1415926535897932384626433832795
rotateAngle = -90 + self.__rightIconRotateAngle
theta = acos(-1/sqrt(5)) + self.__rightIconRotateAngle/180*PI
startP = (
int(width/2 + (unit/2)*sqrt(5)*cos(theta)),
int(width/2 + (unit/2)*sqrt(5)*sin(theta))
)
painter = QPainter(self.rightIcon)
painter.translate(*startP)
painter.rotate(rotateAngle)
pen = painter.pen()
pen.setColor(QColor(202, 208, 212))
pen.setWidthF(2)
painter.setPen(pen)
path = QPainterPath()
path.lineTo(unit, unit)
path.lineTo(unit*2, 0)
painter.drawPath(path)
def _initUi(self):
self.setLayout(QHBoxLayout())
content = QWidget(self)
content.setObjectName('ElePySelect-content')
self.layout().addWidget(content)
content.setStyleSheet('''\
#ElePySelect-content {
border: 1px solid rgb(220, 223, 230);
border-radius: 5px;
background-color: rgb(255, 255, 255);
}
#ElePySelect-content:hover {
border: 1px solid rgb(192, 196, 204);
}''')
content.setLayout(QHBoxLayout())
content.layout().setContentsMargins(5, 5, 5, 5)
self.label = QLabel(content)
WidgetTool.setFont(self.label)
content.layout().addWidget(self.label)
self.rightIcon = QWidget(content)
content.layout().addWidget(self.rightIcon)
self.rightIcon.setFixedSize(20, 20)
self.rightIcon.paintEvent = self.drawRightIcon
self.popoverContent = content
ElePyMenuPopover.setMenu(
self.popoverContent, [], {
'popover-trigger': 'click'
}, mode=WidgetTool.getProperty(
'select-mode', MenuPopoverMode.LIGHT
)(self), createPopover=self.setMenuPopover
)
def _initUiAfter(self):
if self.property('sel-index-list') is None:
self.setProperty('sel-index-list', [])
def updateLabel(self):
placeholder = WidgetTool.getProperty(
'placeholder', '请选择'
)(self) # type: str
menuItems = WidgetTool.getProperty(
'select-menu-items', []
)(self) # type: list[PopoverMenuItem]
selItems = [] # type: list[dict]
for index in self.selIndexList:
if index < 0 or index >= len(menuItems): continue
selItems.append(menuItems[index])
dim = False
if len(self.selIndexList) == 0 or len(selItems) == 0:
self.label.setText(placeholder)
else:
self.label.setText(', '.join([
selItem['label'] for selItem in selItems
]))
dim = dim or WidgetTool.getProperty('disabled', False)(self)
self.label.setStyleSheet(f'''\
QLabel {{
color: {'rgb(192, 196, 204)' if dim else 'rgb(0, 0, 0)'};
}}''')
if self.maximumWidth() == 16777215:
selfWidth = WidgetTool.getTextWidth(self.label) + self.rightIcon.width() + 40
else:
selfWidth = self.maximumWidth()
if selfWidth < self.minimumWidth():
selfWidth = self.minimumWidth()
self.setMinimumWidth(selfWidth)
self.popoverContent.setFixedWidth(selfWidth - self.rightIcon.width())
self.parent().repaint(); self.parent().update()
@watchProperty({
'menu-popover-is-show': {'type': bool}
})
def menuPopoverVisibleChange(self, direction, oldVal, propertyName):
hasAttr = hasattr(self, 'rotateRightIconTimer')
if not hasAttr or (
hasAttr and self.rotateRightIconTimer is None
):
self.rotateRightIconTimer = QTimer(self)
def rotate():
self.__rightIconRotateAngle += 9 if direction else -9
self.repaint()
if self.__rightIconRotateAngle >= 90 or self.__rightIconRotateAngle <= 0:
self.rotateRightIconTimer = None
return
if self.rotateRightIconTimer is not None:
self.rotateRightIconTimer.singleShot(1, rotate)
rotate()
@watchProperty({
'select-menu-items': {'type': list}
})
def selectMenuItemsChange(self, newVal, oldVal, propertyName):
self.__transformProperties['menu-popover-items'] = newVal
@watchProperty({
'disabled': {'type': bool}
})
def disableChange(self, newVal, oldVal, propertyName):
if newVal:
self.setCursor(Qt.ForbiddenCursor)
else:
self.setCursor(Qt.PointingHandCursor)
if self._menuPopover is None:
self.__transformProperties['forbiddenShow'] = newVal
else:
self._menuPopover.setProperty('forbiddenShow', newVal)
@watchProperty({
'sel-index-list': {'type': list}
})
def selIndexListChange(self, newVal, oldVal, propertyName):
self.selIndexList = newVal
if self._lifeStage in [
LifeStage.INIT_UI_AFTER,
LifeStage.INITED,
]: self.updateLabel()
def currentText(self):
return self.label.text()
def menuPopover(self):
return self._menuPopover
def indexOfMenuItem(
self, menuItem: PopoverMenuItem
) -> int:
for index in range(len(self._menuPopover.menuItemWs)):
if menuItem is self._menuPopover.menuItemWs[index]: return index
return -1
def isSel(
self, menuItem: PopoverMenuItem
) -> bool:
if self.indexOfMenuItem(menuItem) in self.selIndexList:
return True
return False
def setMenuPopover(
self, PopoverClass: ClassVar[ElePyMenuPopover], widget: QWidget, properties: dict
):
properties = {
**properties,
**self.__transformProperties
}
self._menuPopover = PopoverClass(widget, properties) # type: ElePyMenuPopover
def itemClicked(menuItem):
selectType = WidgetTool.getProperty('select-type', 'single')(self)
index = self.indexOfMenuItem(menuItem)
if index != -1:
if not self.isSel(menuItem):
if selectType == 'single':
self.selIndexList = []
self.selIndexList.append(index)
self.change.emit(menuItem, self.selIndexList, WidgetTool.getProperty(
'select-menu-items', []
)(self))
else:
if selectType == 'multiple':
self.selIndexList.remove(index)
self.updateLabel()
self._menuPopover.itemClicked.connect(itemClicked)
self._menuPopover.showed.connect(
lambda: self.setProperty('menu-popover-is-show', True)
)
self._menuPopover.hided.connect(
lambda: self.setProperty('menu-popover-is-show', False)
)
return self._menuPopover
|
<reponame>Covarians/dash-echarts<filename>dash_echarts/examples/heat.py
import dash_echarts
import dash, random
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
from dash.exceptions import PreventUpdate
def gen_data(num):
result = []
for i in range(24):
for j in range(7):
d = {'name': ['a','b','c','d','e','f','g'][random.randint(0,6)], 'value': [i, j, random.randint(0, num)]}
result.append(d)
return result
def main():
'''
dash_echarts examples
name: heat with echarts
author: dameng <<EMAIL>>
'''
app = dash.Dash(__name__)
hours = ['12a', '1a', '2a', '3a', '4a', '5a', '6a',
'7a', '8a', '9a', '10a', '11a',
'12p', '1p', '2p', '3p', '4p', '5p',
'6p', '7p', '8p', '9p', '10p', '11p']
days = ['Saturday', 'Friday', 'Thursday',
'Wednesday', 'Tuesday', 'Monday', 'Sunday']
option = {
'tooltip': {
'position': 'top'
},
'grid': {
'height': '50%',
'top': '5%'
},
'xAxis': {
'type': 'category',
'data': hours,
'splitArea': {
'show': True
}
},
'yAxis': {
'type': 'category',
'data': days,
'splitArea': {
'show': True
}
},
'visualMap': {
'min': 0,
'max': 100,
'calculable': True,
'show': False,
'orient': 'horizontal',
'left': 'center',
'bottom': '35%',
'inRange': {
'color': ['#ffff00', '#ff0000']
}
},
'series': [{
# 'symbol': 'none',
'name': '<NAME>',
'type': 'heatmap',
'data': gen_data(100),
'label': {
'show': True,
'formatter': 'fm',
'textStyle': {
'fontSize': '10px'
},
'rich': {
'large': {
'color': 'black',
'fontSize': '30px'
},
'small': {
'color': 'darkblue',
'fontSize': '15px'
}
},
},
'emphasis': {
'itemStyle': {
'shadowBlur': 10,
'shadowColor': 'rgba(0, 0, 0, 0.5)'
}
}
}]
}
app.layout = html.Div([
dash_echarts.DashECharts(
fun_effects=[
{'name':'t', 'option':{'a':1, 'b':2}},
{'name':'t', 'option':{'a':3, 'b':4}}
],
funs = {
"t": '''
function(option){
console.log(option)
console.log('hahahahehehe')
console.log(this.m)
this.m()
}
''',
"m": '''
function(){
console.log('yiiyiyiyiyiiy')
}
''',
"fm": '''
function (p){
if (p.value[2]<60)
return '{small|'+p.name+'}'+
'\\n\\n'+
'{large|'+p.value[2]+'}';
return '{small|'+p.name+'}'+
'\\n\\n'+
p.value[2];
}
'''
},
option = option,
id='echarts',
fun_values=['fm'],
# fun_keys=['formatter'],
# fun_paths={'fm': ['series', '0', 'label', 'formatter']},
style={
"width": '100vw',
"height": '100vh',
}
),
dcc.Interval(id="interval", interval=5 * 1000, n_intervals=0),
])
@app.callback(
Output('echarts', 'option'),
[Input('interval', 'n_intervals')])
def update(n_intervals):
if n_intervals == 0:
raise PreventUpdate
else:
option['series'][0]['data'] = gen_data(100)
return option
app.run_server(debug=True)
if __name__ == '__main__':
main()
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
import copy
from math import cos, sin
# ArcBallT and this tutorials set of points/vectors/matrix types
from ArcBall import *
PI2 = 2.0*3.1415926535 # 2 * PI (not squared!) // PI Squared
# *********************** Globals ***********************
# Python 2.2 defines these directly
try:
True
except NameError:
True = 1 == 1
False = 1 == 0
g_Transform = Matrix4fT()
g_LastRot = Matrix3fT()
g_ThisRot = Matrix3fT()
g_ArcBall = ArcBallT(640, 480)
g_isDragging = False
g_quadratic = None
# A general OpenGL initialization function. Sets all of the initial parameters.
# We call this right after our OpenGL window is created.
def Initialize(Width, Height):
global g_quadratic
# This Will Clear The Background Color To Black
glClearColor(0.0, 0.0, 0.0, 1.0)
# Enables Clearing Of The Depth Buffer
glClearDepth(1.0)
# The Type Of Depth Test To Do
glDepthFunc(GL_LEQUAL)
# Enables Depth Testing
glEnable(GL_DEPTH_TEST)
# Select Flat Shading (Nice Definition Of Objects)
glShadeModel(GL_FLAT)
# Really Nice Perspective Calculations
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
g_quadratic = gluNewQuadric()
gluQuadricNormals(g_quadratic, GLU_SMOOTH)
gluQuadricDrawStyle(g_quadratic, GLU_FILL)
# Why? this tutorial never maps any textures?! ?
# gluQuadricTexture(g_quadratic, GL_TRUE); # // Create Texture Coords
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
return True
def Upon_Drag(cursor_x, cursor_y):
""" Mouse cursor is moving
Glut calls this function (when mouse button is down)
and pases the mouse cursor postion in window coords as the mouse moves.
"""
global g_isDragging, g_LastRot, g_Transform, g_ThisRot
if (g_isDragging):
mouse_pt = Point2fT(cursor_x, cursor_y)
# // Update End Vector And Get Rotation As Quaternion
ThisQuat = g_ArcBall.drag(mouse_pt)
# // Convert Quaternion Into Matrix3fT
g_ThisRot = Matrix3fSetRotationFromQuat4f(ThisQuat)
# Use correct Linear Algebra matrix multiplication C = A * B
# // Accumulate Last Rotation Into This One
g_ThisRot = Matrix3fMulMatrix3f(g_LastRot, g_ThisRot)
# // Set Our Final Transform's Rotation From This One
g_Transform = Matrix4fSetRotationFromMatrix3f(g_Transform, g_ThisRot)
return
def Upon_Click(button, button_state, cursor_x, cursor_y):
""" Mouse button clicked.
Glut calls this function when a mouse button is
clicked or released.
"""
global g_isDragging, g_LastRot, g_Transform, g_ThisRot
g_isDragging = False
if (button == GLUT_RIGHT_BUTTON and button_state == GLUT_UP):
# Right button click
g_LastRot = Matrix3fSetIdentity() # // Reset Rotation
g_ThisRot = Matrix3fSetIdentity() # // Reset Rotation
g_Transform = Matrix4fSetRotationFromMatrix3f(
g_Transform, g_ThisRot) # // Reset Rotation
elif (button == GLUT_LEFT_BUTTON and button_state == GLUT_UP):
# Left button released
# // Set Last Static Rotation To Last Dynamic One
g_LastRot = copy.copy(g_ThisRot)
elif (button == GLUT_LEFT_BUTTON and button_state == GLUT_DOWN):
# Left button clicked down
# // Set Last Static Rotation To Last Dynamic One
g_LastRot = copy.copy(g_ThisRot)
# // Prepare For Dragging
g_isDragging = True
mouse_pt = Point2fT(cursor_x, cursor_y)
# // Update Start Vector And Prepare For Dragging
g_ArcBall.click(mouse_pt)
return
def Torus(MinorRadius, MajorRadius):
# // Draw A Torus With Normals
# // Start A Triangle Strip
glBegin(GL_TRIANGLE_STRIP)
for i in xrange(20): # // Stacks
for j in xrange(-1, 20): # // Slices
# NOTE, python's definition of modulus for negative numbers returns
# results different than C's
# (a / d)*d + a % d = a
if (j < 0):
wrapFrac = (-j % 20)/20.0
wrapFrac *= -1.0
else:
wrapFrac = (j % 20)/20.0
phi = PI2*wrapFrac
sinphi = sin(phi)
cosphi = cos(phi)
r = MajorRadius + MinorRadius*cosphi
glNormal3f(sin(PI2*(i % 20+wrapFrac)/20.0)*cosphi,
sinphi, cos(PI2*(i % 20+wrapFrac)/20.0)*cosphi)
glVertex3f(sin(PI2*(i % 20+wrapFrac)/20.0)*r,
MinorRadius*sinphi, cos(PI2*(i % 20+wrapFrac)/20.0)*r)
glNormal3f(sin(PI2*(i+1 % 20+wrapFrac)/20.0)*cosphi,
sinphi, cos(PI2*(i+1 % 20+wrapFrac)/20.0)*cosphi)
glVertex3f(sin(PI2*(i+1 % 20+wrapFrac)/20.0)*r,
MinorRadius*sinphi, cos(PI2*(i+1 % 20+wrapFrac)/20.0)*r)
glEnd() # // Done Torus
return
def Draw():
# // Clear Screen And Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# // Reset The Current Modelview Matrix
glLoadIdentity()
# // Move Left 1.5 Units And Into The Screen 6.0
glTranslatef(-1.5, 0.0, -6.0)
# // NEW: Prepare Dynamic Transform
glPushMatrix()
# // NEW: Apply Dynamic Transform
glMultMatrixf(g_Transform)
glColor3f(0.75, 0.75, 1.0)
Torus(0.30, 1.00)
# // NEW: Unapply Dynamic Transform
glPopMatrix()
# // Reset The Current Modelview Matrix
glLoadIdentity()
# // Move Right 1.5 Units And Into The Screen 7.0
glTranslatef(1.5, 0.0, -6.0)
# // NEW: Prepare Dynamic Transform
glPushMatrix()
# // NEW: Apply Dynamic Transform
glMultMatrixf(g_Transform)
glColor3f(1.0, 0.75, 0.75)
gluSphere(g_quadratic, 1.3, 20, 20)
# // NEW: Unapply Dynamic Transform
glPopMatrix()
# // Flush The GL Rendering Pipeline
glFlush()
glutSwapBuffers()
return
|
# pylint: disable=W0611
'''
Kivy Base
=========
This module contains core Kivy functionality and is not intended for end users.
Feel free to look though it, but calling any of these methods directly may well
result in unpredicatable behavior.
Event loop management
---------------------
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'stopTouchApp',
)
import sys
from kivy.config import Config
from kivy.logger import Logger
from kivy.utils import platform
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
ExceptionManager.STOP.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if not cls in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occured in the runTouchApp() main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if not listener in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before run().
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
EventLoop.add_stop_callback().'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def post_dispatch_input(self, etype, me):
'''This function is called by dispatch_input() when we want to dispatch
an input event. The event is dispatched to all listeners and if
grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios':
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except AttributeError:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by idle() to read events from input providers, pass events to
postproc, and dispatch final events.
'''
# first, aquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _run_mainloop():
'''If no window has been created, this will be the executed mainloop.'''
while True:
try:
EventLoop.run()
stopTouchApp()
break
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
we try to get the window (must be created by you beforehand)
and add the widget to it. Very usefull for embedding Kivy
in another toolkit. (like Qt, check kivy-designed)
'''
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# we are in a slave mode, don't do dispatching.
if slave:
return
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event lopp
# ourself (previous behavior.)
#
try:
if EventLoop.window is None:
_run_mainloop()
else:
EventLoop.window.mainloop()
finally:
stopTouchApp()
def stopTouchApp():
'''Stop the current application by leaving the main loop'''
if EventLoop is None:
return
if EventLoop.status != 'started':
return
Logger.info('Base: Leaving application in progress...')
EventLoop.close()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 5 16:44:23 2017
@author: <NAME>
This python library contains some useful functions to deal with
prime numbers and whole numbers.
Overview:
isPrime(number)
sieveEr(N)
getPrimeNumbers(N)
primeFactorization(number)
greatestPrimeFactor(number)
smallestPrimeFactor(number)
getPrime(n)
getPrimesBetween(pNumber1, pNumber2)
----
isEven(number)
isOdd(number)
gcd(number1, number2) // greatest common divisor
kgV(number1, number2) // least common multiple
getDivisors(number) // all divisors of 'number' inclusive 1, number
isPerfectNumber(number)
NEW-FUNCTIONS
simplifyFraction(numerator, denominator)
factorial (n) // n!
fib (n) // calculate the n-th fibonacci term.
-----
goldbach(number) // Goldbach's assumption
"""
def isPrime(number):
"""
input: positive integer 'number'
returns true if 'number' is prime otherwise false.
"""
import math # for function sqrt
# precondition
assert isinstance(number,int) and (number >= 0) , \
"'number' must been an int and positive"
status = True
# 0 and 1 are none primes.
if number <= 1:
status = False
for divisor in range(2,int(round(math.sqrt(number)))+1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
status = False
break
# precondition
assert isinstance(status,bool), "'status' must been from type bool"
return status
# ------------------------------------------
def sieveEr(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
This function implements the algorithm called
sieve of erathostenes.
"""
# precondition
assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2"
# beginList: conatins all natural numbers from 2 upt to N
beginList = [x for x in range(2,N+1)]
ans = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(beginList)):
for j in range(i+1,len(beginList)):
if (beginList[i] != 0) and \
(beginList[j] % beginList[i] == 0):
beginList[j] = 0
# filters actual prime numbers.
ans = [x for x in beginList if x != 0]
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans
# --------------------------------
def getPrimeNumbers(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N (inclusive)
This function is more efficient as function 'sieveEr(...)'
"""
# precondition
assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2"
ans = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2,N+1):
if isPrime(number):
ans.append(number)
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans
# -----------------------------------------
def primeFactorization(number):
"""
input: positive integer 'number'
returns a list of the prime number factors of 'number'
"""
import math # for function sqrt
# precondition
assert isinstance(number,int) and number >= 0, \
"'number' must been an int and >= 0"
ans = [] # this list will be returns of the function.
# potential prime number factors.
factor = 2
quotient = number
if number == 0 or number == 1:
ans.append(number)
# if 'number' not prime then builds the prime factorization of 'number'
elif not isPrime(number):
while (quotient != 1):
if isPrime(factor) and (quotient % factor == 0):
ans.append(factor)
quotient /= factor
else:
factor += 1
else:
ans.append(number)
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans
# -----------------------------------------
def greatestPrimeFactor(number):
"""
input: positive integer 'number' >= 0
returns the greatest prime number factor of 'number'
"""
# precondition
assert isinstance(number,int) and (number >= 0), \
"'number' bust been an int and >= 0"
ans = 0
# prime factorization of 'number'
primeFactors = primeFactorization(number)
ans = max(primeFactors)
# precondition
assert isinstance(ans,int), "'ans' must been from type int"
return ans
# ----------------------------------------------
def smallestPrimeFactor(number):
"""
input: integer 'number' >= 0
returns the smallest prime number factor of 'number'
"""
# precondition
assert isinstance(number,int) and (number >= 0), \
"'number' bust been an int and >= 0"
ans = 0
# prime factorization of 'number'
primeFactors = primeFactorization(number)
ans = min(primeFactors)
# precondition
assert isinstance(ans,int), "'ans' must been from type int"
return ans
# ----------------------
def isEven(number):
"""
input: integer 'number'
returns true if 'number' is even, otherwise false.
"""
# precondition
assert isinstance(number, int), "'number' must been an int"
assert isinstance(number % 2 == 0, bool), "compare bust been from type bool"
return number % 2 == 0
# ------------------------
def isOdd(number):
"""
input: integer 'number'
returns true if 'number' is odd, otherwise false.
"""
# precondition
assert isinstance(number, int), "'number' must been an int"
assert isinstance(number % 2 != 0, bool), "compare bust been from type bool"
return number % 2 != 0
# ------------------------
def goldbach(number):
"""
Goldbach's assumption
input: a even positive integer 'number' > 2
returns a list of two prime numbers whose sum is equal to 'number'
"""
# precondition
assert isinstance(number,int) and (number > 2) and isEven(number), \
"'number' must been an int, even and > 2"
ans = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
primeNumbers = getPrimeNumbers(number)
lenPN = len(primeNumbers)
# run variable for while-loops.
i = 0
j = 1
# exit variable. for break up the loops
loop = True
while (i < lenPN and loop):
j = i+1;
while (j < lenPN and loop):
if primeNumbers[i] + primeNumbers[j] == number:
loop = False
ans.append(primeNumbers[i])
ans.append(primeNumbers[j])
j += 1;
i += 1
# precondition
assert isinstance(ans,list) and (len(ans) == 2) and \
(ans[0] + ans[1] == number) and isPrime(ans[0]) and isPrime(ans[1]), \
"'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
# ----------------------------------------------
def gcd(number1,number2):
"""
Greatest common divisor
input: two positive integer 'number1' and 'number2'
returns the greatest common divisor of 'number1' and 'number2'
"""
# precondition
assert isinstance(number1,int) and isinstance(number2,int) \
and (number1 >= 0) and (number2 >= 0), \
"'number1' and 'number2' must been positive integer."
rest = 0
while number2 != 0:
rest = number1 % number2
number1 = number2
number2 = rest
# precondition
assert isinstance(number1,int) and (number1 >= 0), \
"'number' must been from type int and positive"
return number1
# ----------------------------------------------------
def kgV(number1, number2):
"""
Least common multiple
input: two positive integer 'number1' and 'number2'
returns the least common multiple of 'number1' and 'number2'
"""
# precondition
assert isinstance(number1,int) and isinstance(number2,int) \
and (number1 >= 1) and (number2 >= 1), \
"'number1' and 'number2' must been positive integer."
ans = 1 # actual answer that will be return.
# for kgV (x,1)
if number1 > 1 and number2 > 1:
# builds the prime factorization of 'number1' and 'number2'
primeFac1 = primeFactorization(number1)
primeFac2 = primeFactorization(number2)
elif number1 == 1 or number2 == 1:
primeFac1 = []
primeFac2 = []
ans = max(number1,number2)
count1 = 0
count2 = 0
done = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in primeFac1:
if n not in done:
if n in primeFac2:
count1 = primeFac1.count(n)
count2 = primeFac2.count(n)
for i in range(max(count1,count2)):
ans *= n
else:
count1 = primeFac1.count(n)
for i in range(count1):
ans *= n
done.append(n)
# iterates through primeFac2
for n in primeFac2:
if n not in done:
count2 = primeFac2.count(n)
for i in range(count2):
ans *= n
done.append(n)
# precondition
assert isinstance(ans,int) and (ans >= 0), \
"'ans' must been from type int and positive"
return ans
# ----------------------------------
def getPrime(n):
"""
Gets the n-th prime number.
input: positive integer 'n' >= 0
returns the n-th prime number, beginning at index 0
"""
# precondition
assert isinstance(n,int) and (n >= 0), "'number' must been a positive int"
index = 0
ans = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not isPrime(ans):
ans += 1
# precondition
assert isinstance(ans,int) and isPrime(ans), \
"'ans' must been a prime number and from type int"
return ans
# ---------------------------------------------------
def getPrimesBetween(pNumber1, pNumber2):
"""
input: prime numbers 'pNumber1' and 'pNumber2'
pNumber1 < pNumber2
returns a list of all prime numbers between 'pNumber1' (exclusiv)
and 'pNumber2' (exclusiv)
"""
# precondition
assert isPrime(pNumber1) and isPrime(pNumber2) and (pNumber1 < pNumber2), \
"The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
number = pNumber1 + 1 # jump to the next number
ans = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not isPrime(number):
number += 1
while number < pNumber2:
ans.append(number)
number += 1
# fetch the next prime number.
while not isPrime(number):
number += 1
# precondition
assert isinstance(ans,list) and ans[0] != pNumber1 \
and ans[len(ans)-1] != pNumber2, \
"'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
# ----------------------------------------------------
def getDivisors(n):
"""
input: positive integer 'n' >= 1
returns all divisors of n (inclusive 1 and 'n')
"""
# precondition
assert isinstance(n,int) and (n >= 1), "'n' must been int and >= 1"
from math import sqrt
ans = [] # will be returned.
for divisor in range(1,n+1):
if n % divisor == 0:
ans.append(divisor)
#precondition
assert ans[0] == 1 and ans[len(ans)-1] == n, \
"Error in function getDivisiors(...)"
return ans
# ----------------------------------------------------
def isPerfectNumber(number):
"""
input: positive integer 'number' > 1
returns true if 'number' is a perfect number otherwise false.
"""
# precondition
assert isinstance(number,int) and (number > 1), \
"'number' must been an int and >= 1"
divisors = getDivisors(number)
# precondition
assert isinstance(divisors,list) and(divisors[0] == 1) and \
(divisors[len(divisors)-1] == number), \
"Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
# ------------------------------------------------------------
def simplifyFraction(numerator, denominator):
"""
input: two integer 'numerator' and 'denominator'
assumes: 'denominator' != 0
returns: a tuple with simplify numerator and denominator.
"""
# precondition
assert isinstance(numerator, int) and isinstance(denominator,int) \
and (denominator != 0), \
"The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
gcdOfFraction = gcd(abs(numerator), abs(denominator))
# precondition
assert isinstance(gcdOfFraction, int) and (numerator % gcdOfFraction == 0) \
and (denominator % gcdOfFraction == 0), \
"Error in function gcd(...,...)"
return (numerator // gcdOfFraction, denominator // gcdOfFraction)
# -----------------------------------------------------------------
def factorial(n):
"""
input: positive integer 'n'
returns the factorial of 'n' (n!)
"""
# precondition
assert isinstance(n,int) and (n >= 0), "'n' must been a int and >= 0"
ans = 1 # this will be return.
for factor in range(1,n+1):
ans *= factor
return ans
# -------------------------------------------------------------------
def fib(n):
"""
input: positive integer 'n'
returns the n-th fibonacci term , indexing by 0
"""
# precondition
assert isinstance(n, int) and (n >= 0), "'n' must been an int and >= 0"
tmp = 0
fib1 = 1
ans = 1 # this will be return
for i in range(n-1):
tmp = ans
ans += fib1
fib1 = tmp
return ans
|
<reponame>MILeach/FLAMEGPU2_dev
import pytest
from unittest import TestCase
from pyflamegpu import *
MODEL_NAME = "something"
AGENT_NAME1 = "something2"
AGENT_NAME2 = "something3"
class ModelDescriptionTest(TestCase):
def test_name(self):
m = pyflamegpu.ModelDescription(MODEL_NAME)
# Model has the right name
assert m.getName() == MODEL_NAME
def test_agent(self):
m = pyflamegpu.ModelDescription(MODEL_NAME)
assert m.hasAgent(AGENT_NAME1) == False
assert m.hasAgent(AGENT_NAME2) == False
assert m.getAgentsCount() == 0
a = m.newAgent(AGENT_NAME1)
assert m.getAgentsCount() == 1
b = m.newAgent(AGENT_NAME2)
assert m.getAgentsCount() == 2
# Cannot create agent with same name
with pytest.raises(pyflamegpu.FGPURuntimeException) as e:
m.newAgent(AGENT_NAME1)
assert e.value.type() == "InvalidAgentName"
# Two created agents are different
assert a != b
# Agents have the right name
assert a.getName() == AGENT_NAME1
assert b.getName() == AGENT_NAME2
assert m.hasAgent(AGENT_NAME1)
assert m.hasAgent(AGENT_NAME2)
# Returned agent is same
assert a == m.Agent(AGENT_NAME1)
assert b == m.Agent(AGENT_NAME2)
assert a == m.getAgent(AGENT_NAME1)
assert b == m.getAgent(AGENT_NAME2)
def test_message(self):
m = pyflamegpu.ModelDescription(MODEL_NAME)
assert m.hasMessage(AGENT_NAME1) == False
assert m.hasMessage(AGENT_NAME2) == False
assert m.getMessagesCount() == 0
a = m.newMessageBruteForce(AGENT_NAME1)
assert m.getMessagesCount() == 1
b = m.newMessageBruteForce(AGENT_NAME2)
assert m.getMessagesCount() == 2
# Cannot create message with same name
with pytest.raises(pyflamegpu.FGPURuntimeException) as e:
m.newMessage(AGENT_NAME1)
assert e.value.type() == "InvalidMessageName"
# Two created messages are different
assert a != b
# Messages have the right name
assert a.getName() == AGENT_NAME1
assert b.getName() == AGENT_NAME2
assert m.hasMessage(AGENT_NAME1)
assert m.hasMessage(AGENT_NAME2)
# Returned message is same
assert a == m.Message(AGENT_NAME1)
assert b == m.Message(AGENT_NAME2)
assert a == m.getMessage(AGENT_NAME1)
assert b == m.getMessage(AGENT_NAME2)
def test_layer(self):
m = pyflamegpu.ModelDescription(MODEL_NAME)
assert m.hasLayer(AGENT_NAME1) == False
assert m.hasLayer(AGENT_NAME2) == False
assert m.hasLayer(0) == False
assert m.hasLayer(1) == False
assert m.getLayersCount() == 0
a = m.newLayer(AGENT_NAME1)
assert m.getLayersCount() == 1
assert m.hasLayer(0)
assert m.hasLayer(AGENT_NAME1)
b = m.newLayer(AGENT_NAME2)
assert m.getLayersCount() == 2
# Cannot create layer with same name
with pytest.raises(pyflamegpu.FGPURuntimeException) as e:
m.newLayer(AGENT_NAME1)
assert e.value.type() == "InvalidFuncLayerIndx"
# Two created layers are different
assert a != b
# Layers have the right name
assert a.getName() == AGENT_NAME1
assert b.getName() == AGENT_NAME2
assert m.hasLayer(AGENT_NAME1)
assert m.hasLayer(AGENT_NAME2)
assert m.hasLayer(0)
assert m.hasLayer(1)
# Returned layer is same
assert a == m.Layer(AGENT_NAME1)
assert b == m.Layer(AGENT_NAME2)
assert a == m.Layer(0)
assert b == m.Layer(1)
assert a == m.getLayer(AGENT_NAME1)
assert b == m.getLayer(AGENT_NAME2)
assert a == m.getLayer(0)
assert b == m.getLayer(1)
assert 0 == a.getIndex()
assert 1 == b.getIndex()
|
from typing import Dict, Any
import pytest
import yaml
from pydantic import ValidationError
from fidesops.graph.config import (
CollectionAddress,
ScalarField,
ObjectField,
FieldAddress,
FieldPath,
)
from fidesops.graph.graph import DatasetGraph, Edge
from fidesops.models.datasetconfig import convert_dataset_to_graph
from fidesops.schemas.dataset import FidesopsDataset
from ..graph.graph_test_util import field
example_dataset_yaml = """dataset:
- fides_key: xyz
fidesops_meta:
after: [db1, db2, db3]
name: xyz
description: x
collections:
- name: address
fidesops_meta:
after: [a.b, c.d, e.f]
fields:
- name: city
data_categories: [user.provided.identifiable.contact.city]
- name: id
data_categories: [system.operations]
fidesops_meta:
primary_key: True
data_type: integer
"""
example_dataset_nested_yaml = """dataset:
- fides_key: mongo_nested_test
name: Mongo Example Nested Test Dataset
description: Example of a Mongo dataset that contains nested data
collections:
- name: photos
fields:
- name: _id
data_categories: [system.operations]
fidesops_meta:
primary_key: True
data_type: object_id
- name: photo_id
data_categories: [user.derived.identifiable.unique_id]
fidesops_meta:
references:
- dataset: postgres_main_database
field: photo_collection.id
direction: from
data_type: integer
- name: name
data_categories: [user.provided.identifiable]
fidesops_meta:
data_type: string
- name: submitter
fidesops_meta:
data_type: string
data_categories: [user.provided.identifiable]
- name: thumbnail
fields:
- name: photo_id
fidesops_meta:
data_type: integer
- name: name
data_categories: [user.provided.identifiable]
fidesops_meta:
data_type: string
- name: submitter
fidesops_meta:
data_type: string
references:
- dataset: postgres_main_database
field: users.id
direction: from
data_categories: [user.provided.identifiable]
- name: camera_used
data_categories: [ system.operations ]
fidesops_meta:
identity: 'camera_uuid'
data_type: integer
- name: tags
fidesops_meta:
data_type: string[]
data_categories: [user.provided]
- name: comments
fidesops_meta:
data_type: object[]
fields:
- name: comment_id
- name: text
- name: submitter
"""
example_bad_dataset_nested_yaml = """dataset:
- fides_key: mongo_nested_test
name: Mongo Example Nested Test Dataset
description: Example of a Mongo dataset that contains nested data
collections:
- name: photos
fields:
- name: thumbnail
fidesops_meta:
data_type: string
fields:
- name: photo_id
data_type: integer
- name: name
data_categories: [user.provided.identifiable]
data_type: string
- name: submitter
data_type: string
data_categories: [user.provided.identifiable]
"""
def __to_dataset__(yamlstr: str) -> Dict[str, Any]:
return yaml.safe_load(yamlstr).get("dataset")[0]
def test_dataset_yaml_format():
"""Test that 'after' parameters are properly read"""
dataset = __to_dataset__(example_dataset_yaml)
d: FidesopsDataset = FidesopsDataset.parse_obj(dataset)
config = convert_dataset_to_graph(d, "ignore")
assert config.after == {"db1", "db2", "db3"}
assert config.collections[0].after == {
CollectionAddress("a", "b"),
CollectionAddress("c", "d"),
CollectionAddress("e", "f"),
}
def test_dataset_yaml_format_invalid_format():
"""Test that 'after' parameters are properly read"""
dataset = __to_dataset__(example_dataset_yaml)
dataset.get("collections")[0].get("fidesops_meta").get("after")[0] = "invalid"
with pytest.raises(ValueError) as exc:
d: FidesopsDataset = FidesopsDataset.parse_obj(dataset)
convert_dataset_to_graph(d, "ignore")
assert "FidesCollection must be specified in the form 'FidesKey.FidesKey'" in str(
exc.value
)
def test_dataset_yaml_format_invalid_fides_keys():
"""Test that 'after' parameters are properly read"""
dataset = __to_dataset__(example_dataset_yaml)
dataset.get("collections")[0].get("fidesops_meta").get("after")[
0
] = "invalid-dataset-name.invalid-collection-name"
with pytest.raises(ValueError) as exc:
d: FidesopsDataset = FidesopsDataset.parse_obj(dataset)
convert_dataset_to_graph(d, "ignore")
assert "FidesKey must only contain alphanumeric characters, '.' or '_'." in str(
exc.value
)
def test_nested_dataset_format():
dataset = __to_dataset__(example_dataset_nested_yaml)
ds = FidesopsDataset.parse_obj(dataset)
graph = convert_dataset_to_graph(ds, "ignore")
comments_field = field([graph], "mongo_nested_test", "photos", "comments")
tags_field = field([graph], "mongo_nested_test", "photos", "tags")
_id_field = field([graph], "mongo_nested_test", "photos", "_id")
thumbnail_field = field([graph], "mongo_nested_test", "photos", "thumbnail")
assert isinstance(comments_field, ObjectField)
assert comments_field.is_array
assert comments_field.data_type() == "object"
assert isinstance(comments_field.fields["text"], ScalarField)
assert comments_field.fields["text"].data_type() == "None"
assert isinstance(tags_field, ScalarField)
assert tags_field.is_array
assert isinstance(_id_field, ScalarField)
assert _id_field.is_array is False
assert isinstance(thumbnail_field, ObjectField)
assert thumbnail_field.is_array is False
assert thumbnail_field.data_type() == "object"
assert thumbnail_field.fields["photo_id"].data_type() == "integer"
assert thumbnail_field.fields["name"].data_type() == "string"
def test_nested_dataset_validation():
with pytest.raises(ValidationError):
FidesopsDataset.parse_obj(__to_dataset__(example_bad_dataset_nested_yaml))
def test_invalid_datatype():
"""Test that specifying a data type string that doesn't correspond to a supported
data type string will throw a validation error."""
bad_data_declaration = """dataset:
- fides_key: dont_care
collections:
- name: dont_care
fields:
- name: dont_care
fidesops_meta:
data_type: this_is_bad"""
dataset = __to_dataset__(bad_data_declaration)
with pytest.raises(ValidationError):
FidesopsDataset.parse_obj(dataset)
example_postgres_yaml = """dataset:
- fides_key: postgres_main_database
name: Postgres users and photos
description: Example of a Postgres reference db
collections:
- name: photo_collection
fields:
- name: id
data_categories: [system.operations]
fidesops_meta:
primary_key: True
data_type: integer
- name: users
fields:
- name: name
data_categories: [ user.provided.identifiable.name]
fidesops_meta:
data_type: string
- name: id
data_categories: [system.operations]
fidesops_meta:
data_type: integer
- name: cameras
fields:
- name: name
data_categories: [ user.provided.nonidentifiable]
fidesops_meta:
data_type: string
- name: id
data_categories: [system.operations]
fidesops_meta:
data_type: integer
references:
- dataset: mongo_nested_test
field: photos.thumbnail.camera_used
direction: from
"""
def test_dataset_graph_connected_by_nested_fields():
"""Two of the fields in the postgres dataset references a nested field in the mongo dataset"""
dataset = __to_dataset__(example_dataset_nested_yaml)
ds = FidesopsDataset.parse_obj(dataset)
mongo_dataset = convert_dataset_to_graph(ds, "ignore")
postgres_dataset = __to_dataset__(example_postgres_yaml)
ds_postgres = FidesopsDataset.parse_obj(postgres_dataset)
postgres_dataset = convert_dataset_to_graph(ds_postgres, "ignore")
dataset_graph = DatasetGraph(mongo_dataset, postgres_dataset)
assert dataset_graph.edges == {
Edge(
FieldAddress("postgres_main_database", "users", "id"),
FieldAddress("mongo_nested_test", "photos", "thumbnail", "submitter"),
),
Edge(
FieldAddress("postgres_main_database", "photo_collection", "id"),
FieldAddress("mongo_nested_test", "photos", "photo_id"),
),
Edge(
FieldAddress("mongo_nested_test", "photos", "thumbnail", "camera_used"),
FieldAddress("postgres_main_database", "cameras", "id"),
),
}
assert dataset_graph.identity_keys == {
FieldAddress(
"mongo_nested_test", "photos", "thumbnail", "camera_used"
): "camera_uuid"
}
assert [
field_path.string_path
for field_path in dataset_graph.data_category_field_mapping[
CollectionAddress("mongo_nested_test", "photos")
]["system.operations"]
] == ["_id", "thumbnail.camera_used"]
example_object_with_data_categories_nested_yaml = """dataset:
- fides_key: mongo_nested_test
name: Mongo Example Nested Test Dataset
description: Example of a Mongo dataset that has a data_category incorrectly declared at the object level
collections:
- name: photos
fields:
- name: thumbnail
data_categories: [user.derived]
fidesops_meta:
data_type: object
fields:
- name: photo_id
data_type: integer
- name: name
data_categories: [user.provided.identifiable]
"""
def test_object_data_category_validation():
"""Test trying to validate object with data category specified"""
with pytest.raises(ValidationError):
FidesopsDataset.parse_obj(
__to_dataset__(example_object_with_data_categories_nested_yaml)
)
non_array_field_with_invalid_flag = """dataset:
- fides_key: mongo_return_all_elements_test
name: Mongo Return All Elements Test Dataset
description: Example of a Mongo dataset that incorrectly has return_all_elements specified on a non array field.
collections:
- name: photos
fields:
- name: thumbnail
fidesops_meta:
return_all_elements: true
data_type: object
fields:
- name: photo_id
data_type: integer
- name: name
data_categories: [user.provided.identifiable]
"""
def test_return_all_elements_specified_on_non_array_field():
"""Test return_all_elements can only be specified on array fields"""
with pytest.raises(ValidationError):
FidesopsDataset.parse_obj(__to_dataset__(non_array_field_with_invalid_flag))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.