input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
if yk != 1111
lbf dec -434 if aao != -1344
kg dec -837 if f >= -1593
f inc 513 if eri <= -2804
fk dec -925 if fk >= -2756
t inc 280 if kg != 2100
lx dec 328 if dy < 3880
t inc -359 if j < -1103
es inc 289 if ada < -1203
x dec -414 if gk >= -574
aao dec 710 if dy == 3873
es dec -70 if ada <= -1187
ada dec 883 if j == -1110
is dec -635 if um != -105
is inc 627 if a == 1034
um inc -348 if ada > -1190
mmw dec -270 if uy >= -454
ada dec 362 if is > 3900
eri dec -504 if t != 4541
uy inc 910 if eri >= -2804
es dec 261 if fk < -1821
uy dec -430 if uy >= 456
yk dec -841 if um >= -107
j dec -287 if fk >= -1834
a dec 90 if uy <= 897
lx inc 867 if hg < 819
yk dec 265 if fk >= -1820
is inc -444 if umr >= 6676
kg inc 586 if f == -1600
es dec -298 if uy != 889
mmw inc -896 if gk >= -576
a dec -320 if t >= 4538
t dec -722 if hg >= 819
mmw inc -778 if ada < -1553
um dec -914 if j != -826
kg inc -964 if f < -1605
yk inc -51 if umr >= 6670
um dec 219 if j == -820
dy inc 727 if a == 1264
a dec -817 if kg <= 2692
um dec 99 if fk == -1826
kg dec 302 if uy != 892
j dec -696 if lx <= -688
uy inc -626 if t <= 5268
a dec 961 if hg <= 837
yk inc -803 if a != 1119
mmw dec 678 if lx < -680
lbf inc 862 if fk < -1825
kg inc -786 if t <= 5259
aao dec 91 if x == -1407
uy dec -548 if t == 5263
aao inc 638 if is != 3463
hg dec 244 if uy > 807
f inc 58 if lbf >= 765
fk inc 581 if is != 3455
hg dec -692 if fk >= -1247
um inc -752 if aao < -1424
lbf dec 14 if lbf != 783
t dec 270 if f == -1542
uy inc 70 if hg <= 1277
is inc 676 if lx == -682
is inc 230 if um >= -163
gk dec 799 if yk > 1107
ada dec -20 if eri != -2804
lx dec -278 if umr == 6678
gk inc 83 if umr <= 6686
lbf inc 265 if a >= 1118
is dec 476 if j != -820
eri dec 509 if lx > -413
is dec -486 if dy < 4606
kg dec 989 if is > 4847
lx inc 996 if dy >= 4591
is inc -371 if eri > -3303
kg dec 209 if f != -1542
kg dec -21 if kg < 1394
uy dec -34 if x == -1417
mmw inc -753 if hg != 1285
gk dec 862 if kg > 1408
ada dec 346 if is < 4856
umr inc 262 if es > -1888
lx inc -842 if umr < 6948
f inc 557 if yk != 1103
dy inc -323 if lbf >= 1030
kg dec 610 if ada >= -1883
f dec 727 if f != -1544
es dec -922 if aao > -1435
kg inc -628 if f == -2269
eri inc -762 if lx < -245
a dec 246 if a <= 1118
aao dec 51 if kg >= 181
is dec 761 if x >= -1417
j inc 359 if x == -1417
mmw dec 562 if ada < -1880
lbf dec 955 if aao != -1426
umr dec -851 if x == -1417
t dec -845 if is <= 4101
lbf inc -666 if hg > 1272
um dec -105 if f >= -2274
gk inc 191 if umr != 7781
dy dec -399 if eri <= -4072
is inc 681 if t != 5835
mmw inc -731 if ada > -1883
x inc -603 if yk < 1111
j inc 860 if fk <= -1239
t dec -480 if kg > 175
eri inc 308 if mmw > 957
hg inc 37 if gk >= -1163
eri inc 868 if kg >= 172
gk inc 381 if f > -2276
ada inc -231 if fk < -1234
uy dec -404 if gk >= -781
es inc -33 if yk != 1105
j inc -210 if um != -66
umr dec -783 if kg == 176
a inc 287 if hg < 1322
es inc 794 if f >= -2270
f inc 816 if a < 1417
eri inc -136 if fk <= -1235
j dec 421 if lbf < 362
kg dec -889 if fk > -1248
um inc 701 if yk <= 1110
x dec -487 if fk < -1243
aao dec 623 if umr <= 8582
mmw dec -307 if eri > -3038
t inc 803 if f == -1453
kg dec 980 if a < 1404
dy inc -974 if gk < -772
yk inc 221 if eri < -3040
gk inc -421 if aao > -2059
mmw dec -751 if f <= -1445
kg dec 422 if lx <= -250
dy dec 191 if x <= -1535
um inc -318 if gk != -1195
mmw inc 870 if kg != 651
j dec -865 if t < 7131
um dec 886 if t < 7124
ada inc 171 if eri < -3030
x dec -755 if es == -202
fk inc -325 if hg >= 1313
t dec 984 if t != 7130
j dec 55 if aao == -2056
fk dec 80 if x <= -773
dy dec 852 if lx != -256
is inc 295 if a >= 1408
uy dec 733 if hg < 1318
x inc 868 if um <= -560
t dec -532 if t != 6128
aao inc -530 if x == 90
is dec 374 if lbf == 358
aao inc 93 if t == 6673
uy inc -657 if hg != 1317
fk inc -670 if umr > 8568
x inc 791 if j != 634
mmw dec -872 if uy >= -73
yk inc -771 if eri <= -3034
umr dec -125 if kg < 645
mmw inc -617 if aao > -2581
mmw dec -90 if mmw != 3145
es dec 182 if aao <= -2574
kg dec 903 if ada >= -1934
um inc 319 if lbf == 358
uy inc -958 if f <= -1447
ada dec -965 if eri == -3033
fk dec 750 if es >= -393
hg inc -488 if hg >= 1307
lx inc 742 if umr != 8703
uy inc 378 if kg != 643
lx dec -584 if um != -233
kg dec -654 if ada >= -986
is dec 181 if f == -1453
eri inc 302 if lx < 1080
uy inc -907 if kg != 1289
yk dec 652 if is < 4223
lbf inc -147 if hg != 823
fk dec -584 if kg < 1300
fk dec -593 if f <= -1449
dy dec -83 if gk < -1189
a dec -568 if es <= -377
aao inc -742 if gk <= -1193
kg dec -560 if hg <= 831
es dec -871 if fk > -1899
lx inc 239 if kg != 1855
uy dec 696 if ada >= -984
kg dec -899 if yk >= 452
x inc -303 if is <= 4223
um inc -207 if lbf != 211
eri dec 520 if eri >= -2731
is dec -251 if t < 6675
umr inc 19 if kg == 1857
lbf dec -365 if uy != -2632
yk dec 192 if fk < -1883
dy dec 451 if hg > 820
j dec -789 if lx <= 1320
um dec -577 if lx > 1309
es dec 954 if dy != 2800
aao dec 798 if f <= -1446
mmw dec -339 if mmw >= 3143
j inc 126 if yk == 259
ada inc -898 if umr != 8718
kg inc -365 if eri == -3251
aao dec 718 if lx > 1307
x inc -872 if kg <= 1495
j dec -862 if is != 4464
mmw dec -785 if is == 4469
t dec 355 if uy != -2627
ada dec -528 if ada == -977
yk dec -139 if hg >= 823
mmw dec -97 if is == 4469
j dec -454 if is == 4469
yk dec -151 if kg < 1500
lbf inc -19 if lbf != 576
yk inc 281 if umr == 8718
gk dec 59 if mmw <= 4374
umr dec 266 if hg | |
<reponame>XLPRUtils/pyxllib<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : <EMAIL>
# @Date : 2020/05/30
import collections
import filecmp
import os
import pathlib
import random
import re
import shutil
import tempfile
import humanfriendly
# 大小写不敏感字典
import pyxllib.stdlib.zipfile as zipfile # 重写了标准库的zipfile文件,cp437改为gbk,解决zip中文乱码问题
from pyxllib.algo.pupil import natural_sort
from pyxllib.text.pupil import strfind
from pyxllib.debug.pupil import dprint
from pyxllib.file.specialist import get_etag, PathBase, File
from pyxllib.prog.newbie import first_nonnone
from pyxllib.prog.pupil import check_install_package
____dir = """
支持文件或文件夹的对比复制删除等操作的函数:filescmp、filesdel、filescopy
"""
class Dir(PathBase):
r"""类似NestEnv思想的文件夹处理类
这里的测试可以全程自己造一个
"""
__slots__ = ('_path', 'subs', '_origin_wkdir')
# 零、常用的目录类
TEMP = pathlib.Path(tempfile.gettempdir())
if os.environ.get('Desktop', None): # 如果修改了win10默认的桌面路径,需要在环境变量添加一个正确的Desktop路径值
DESKTOP = os.environ['Desktop']
else:
DESKTOP = os.path.join(str(pathlib.Path.home()), 'Desktop') # 这个不一定准,桌面是有可能被移到D盘等的
DESKTOP = pathlib.Path(DESKTOP)
# 添加 HOME 目录? 方便linux操作?
# 一、基本目录类功能
def __init__(self, path=None, root=None, *, subs=None, check=True):
"""根目录、工作目录
>> Dir() # 以当前文件夹作为root
>> Dir(r'C:/pycode/code4101py') # 指定目录
:param path: 注意哪怕path传入的是Dir,也只会设置目录,不会取其paths成员值
:param subs: 该目录下,选中的子文件(夹)
"""
self._path = None
self.subs = subs or [] # 初始默认没有选中任何文件(夹)
# 1 快速初始化
if root is None:
if isinstance(path, Dir):
self._path = path._path
# 注意用Dir A 初始化 Dir B,并不会把A的subs传递给B
return
elif isinstance(path, pathlib.Path):
self._path = path
# 2 普通初始化
if self._path is None:
self._path = self.abspath(path, root)
# 3 检查
if check:
if not self._path:
raise ValueError(f'无效路径 {self._path}')
elif self._path.is_file():
raise ValueError(f'不能用文件初始化一个Dir对象 {self._path}')
@classmethod
def safe_init(cls, path, root=None, *, subs=None):
""" 如果失败不raise,而是返回None的初始化方式 """
try:
d = Dir(path, root, subs=subs)
d._path.is_file() # 有些问题上一步不一定测的出来,要再补一个测试
return d
except (ValueError, TypeError, OSError, PermissionError):
# ValueError:文件名过长,代表输入很可能是一段文本,根本不是路径
# TypeError:不是str等正常的参数
# OSError:非法路径名,例如有 *? 等
# PermissionError: linux上访问无权限、不存在的路径
return None
@property
def size(self) -> int:
""" 计算目录的大小,会递归目录计算总大小
https://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
>> Dir('D:/slns/pyxllib').size # 这个算的就是真实大小,不是占用空间
2939384
"""
if self:
total_size = 0
for dirpath, dirnames, Pathnames in os.walk(str(self)):
for f in Pathnames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
else: # 不存在的对象
total_size = 0
return total_size
@property
def psize(self) -> str:
""" 美化显示的文件大小 """
return humanfriendly.format_size(self.size, binary=True)
def __truediv__(self, key) -> pathlib.Path:
r""" 路径拼接功能
>>> Dir('C:/a') / 'b.txt'
WindowsPath('C:/a/b.txt')
"""
return self._path / str(key)
def with_dirname(self, value):
return Dir(self.name, value)
def absdst(self, dst):
""" 在copy、move等中,给了个"模糊"的目标位置dst,智能推导出实际file、dir绝对路径
"""
dst_ = self.abspath(dst)
if isinstance(dst, str) and dst[-1] in ('\\', '/'):
dst_ = Dir(self.name, dst_)
else:
dst_ = Dir(dst_)
return dst_
def ensure_dir(self):
r""" 确保目录存在
"""
if not self:
os.makedirs(str(self))
def copy(self, dst, if_exists=None):
return self.process(dst, shutil.copytree, if_exists)
def rename(self, dst, if_exists=None):
r""" 重命名
"""
return self.move(Dir(dst, self.parent), if_exists)
def delete(self):
r""" 删除自身文件
"""
if self:
try:
shutil.rmtree(str(self))
except OSError:
# OSError: Cannot call rmtree on a symbolic link
# TODO 本来不应该try except,而是先用os.path.islink判断的,但是这个好像有bug,判断不出来~~
os.unlink(str(self))
# 二、目录类专有功能
def sample(self, n=None, frac=None):
"""
:param n: 在 paths 中抽取n个文件
:param frac: 按比例抽取文件
:return: 新的Dir文件选取状态
"""
n = n or int(frac * len(self.subs))
paths = random.sample(self.subs, n)
return Dir(self._path, subs=paths)
def subpaths(self):
""" 返回所有subs的绝对路径 """
return [self._path / p for p in self.subs]
def subfiles(self):
""" 返回所有subs的File对象 (过滤掉文件夹对象) """
return list(map(File, filter(lambda p: not p.is_dir(), self.subpaths())))
def subdirs(self):
""" 返回所有subs的File对象 (过滤掉文件对象) """
return list(map(Dir, filter(lambda p: not p.is_file(), self.subpaths())))
def select(self, patter, nsort=True, type_=None,
ignore_backup=False, ignore_special=False,
min_size=None, max_size=None,
min_ctime=None, max_ctime=None, min_mtime=None, max_mtime=None,
**kwargs):
r""" 增加选中文件,从filesmatch衍生而来,参数含义见 filesfilter
:param bool nsort: 是否使用自然排序,关闭可以加速
:param str type_:
None,所有文件
'file',只匹配文件
'dir', 只匹配目录
:param bool ignore_backup: 如果设为False,会过滤掉自定义的备份文件格式,不获取备份类文件
:param bool ignore_special: 自动过滤掉 '.git'、'$RECYCLE.BIN' 目录下文件
:param int min_size: 文件大小过滤,单位Byte
:param int max_size: ~
:param str min_ctime: 创建时间的过滤,格式'2019-09-01'或'2019-09-01 00:00'
:param str max_ctime: ~
:param str min_mtime: 修改时间的过滤
:param str max_mtime: ~
:param kwargs: see filesfilter
:seealso: filesfilter
注意select和exclude的增减操作是不断叠加的,而不是每次重置!
如果需要重置,应该重新定义一个Folder类
>> Dir('C:/pycode/code4101py').select('*.pyw').select('ckz.py')
C:/pycode/code4101py: ['ol批量修改文本.pyw', 'ckz.py']
>> Dir('C:/pycode/code4101py').select('**/*.pyw').select('ckz.py')
C:/pycode/code4101py: ['ol批量修改文本.pyw', 'chenkz/批量修改文本.pyw', 'winr/bc.pyw', 'winr/reg/FileBackup.pyw', 'ckz.py']
>> Dir('C:/pycode/code4101py').select('*.py', min_size=200*1024) # 200kb以上的文件
C:/pycode/code4101py: ['liangyb.py']
>> Dir(r'C:/pycode/code4101py').select('*.py', min_mtime=datetime.date(2020, 3, 1)) # 修改时间在3月1日以上的
"""
subs = filesmatch(patter, root=str(self), type_=type_,
ignore_backup=ignore_backup, ignore_special=ignore_special,
min_size=min_size, max_size=max_size,
min_ctime=min_ctime, max_ctime=max_ctime, min_mtime=min_mtime, max_mtime=max_mtime,
**kwargs)
subs = self.subs + subs
if nsort: subs = natural_sort(subs)
return Dir(self._path, subs=subs)
def select_files(self, patter, nsort=True,
ignore_backup=False, ignore_special=False,
min_size=None, max_size=None,
min_ctime=None, max_ctime=None, min_mtime=None, max_mtime=None):
""" TODO 这系列的功能可以优化加速,在没有复杂规则的情况下,可以尽量用源生的py检索方式实现 """
subs = filesmatch(patter, root=str(self), type_='file',
ignore_backup=ignore_backup, ignore_special=ignore_special,
min_size=min_size, max_size=max_size,
min_ctime=min_ctime, max_ctime=max_ctime,
min_mtime=min_mtime, max_mtime=max_mtime)
if nsort:
subs = natural_sort(subs)
for x in subs:
yield File(self._path / x, check=False)
def select_dirs(self, patter, nsort=True,
ignore_backup=False, ignore_special=False,
min_size=None, max_size=None,
min_ctime=None, max_ctime=None, min_mtime=None, max_mtime=None):
subs = filesmatch(patter, root=str(self), type_='dir',
ignore_backup=ignore_backup, ignore_special=ignore_special,
min_size=min_size, max_size=max_size,
min_ctime=min_ctime, max_ctime=max_ctime,
min_mtime=min_mtime, max_mtime=max_mtime)
if nsort:
subs = natural_sort(subs)
for x in subs:
yield Dir(self._path / x, check=False)
def select_paths(self, patter, nsort=True,
ignore_backup=False, ignore_special=False,
min_size=None, max_size=None,
min_ctime=None, max_ctime=None, min_mtime=None, max_mtime=None):
subs = filesmatch(patter, root=str(self),
ignore_backup=ignore_backup, ignore_special=ignore_special,
min_size=min_size, max_size=max_size,
min_ctime=min_ctime, max_ctime=max_ctime,
min_mtime=min_mtime, max_mtime=max_mtime)
if nsort:
subs = natural_sort(subs)
for x in subs:
yield self._path / x
def procpaths(self, func, start=None, end=None, ref_dir=None, pinterval=None, max_workers=1, interrupt=True):
""" 对选中的文件迭代处理
:param func: 对每个文件进行处理的自定义接口函数
参数 p: 输入参数 Path 对象
return: 可以没有返回值
TODO 以后可以返回字典结构,用不同的key表示不同的功能,可以控制些高级功能
:param ref_dir: 使用该参数时,则每次会给func传递两个路径参数
第一个是原始的file,第二个是ref_dir目录下对应路径的file
TODO 增设可以bfs还是dfs的功能?
将目录 test 的所有文件拷贝到 test2 目录 示例代码:
def func(p1, p2):
File(p1).copy(p2)
Dir('test').select('**/*', type_='file').procfiles(func, ref_dir='test2')
"""
from pyxllib.debug.specialist.xllog import Iterate
if ref_dir:
ref_dir = Dir(ref_dir)
paths1 = self.subpaths()
paths2 = [(ref_dir / self.subs[i]) for i in range(len(self.subs))]
def wrap_func(data):
func(*data)
data = zip(paths1, paths2)
else:
data = self.subpaths()
wrap_func = func
Iterate(data).run(wrap_func, start=start, end=end, pinterval=pinterval,
max_workers=max_workers, interrupt=interrupt)
def select_invert(self, patter='**/*', nsort=True, **kwargs):
""" 反选,在"全集"中,选中当前状态下没有被选中的那些文件
这里设置的选择模式,是指全集的选择范围
"""
subs = Dir(self).select(patter, nsort, **kwargs).subs
cur_subs = set(self.subs)
new_subs = []
for s in subs:
if s not in cur_subs:
new_subs.append(s)
return Dir(self._path, subs=new_subs)
def exclude(self, patter, **kwargs):
""" 去掉部分选中文件
d1 = Dir('test').select('**/*.eps')
d2 = d1.exclude('subdir/*.eps')
d3 = d2.select_invert(type_='file')
print(d1.files) # ['AA20pH-c1=1-1.eps', 'AA20pH-c1=1-2.eps', 'subdir/AA20pH-c1=1-2 - 副本.eps']
print(d2.files) # ['AA20pH-c1=1-1.eps', 'AA20pH-c1=1-2.eps']
print(d3.files) # ['subdir/AA20pH-c1=1-2 - 副本.eps']
"""
subs = set(filesmatch(patter, root=str(self), **kwargs))
new_subs = []
for s in self.subs:
if s not in subs:
new_subs.append(s)
return Dir(self._path, subs=new_subs)
def describe(self):
""" 输出目录的一些基本统计信息
"""
msg = []
dir_state = self.select('*')
files = dir_state.subfiles()
suffixs = collections.Counter([f.suffix for f in files]).most_common()
dir_size = self.size
msg.append(f'size: {dir_size} ≈ {humanfriendly.format_size(dir_size, binary=True)}')
msg.append(f'files: {len(files)}, {suffixs}')
msg.append(f'dirs: {len(dir_state.subdirs())}')
res = '\n'.join(msg)
print(res)
def __enter__(self):
""" 使用with模式可以进行工作目录切换
注意!注意!注意!
切换工作目录和多线程混合使用会有意想不到的坑,要慎重!
"""
self._origin_wkdir = os.getcwd()
os.chdir(str(self))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self._origin_wkdir)
____filesxxx = """
本来Path、File是能同时处理文件、目录的
改版后,files底层因为有用到File,现在却不能支持目录的操作了
可能会有些bug,尽量不要用这些旧功能,或者尽早移除
"""
def filescmp(f1, f2, shallow=True):
"""只有两个存在且是同类型的文件或文件夹,内容相同才会返回True,否则均返回False
:param f1: 待比较的第1个文件(文件夹)
:param f2: 待比较的第2个文件(文件夹)
:param shallow: 默认True,即是利用os.stat()返回的基本信息进行比较
例如其中的文件大小,但修改时间等是不影响差异判断的
如果设为False,则会打开比较具体内容,速度会慢一点
"""
if os.path.isfile(f1) and os.path.isfile(f2):
cmp = filecmp.cmp(f1, f2, shallow)
elif os.path.isdir(f1) and os.path.isdir(f2):
# 文件夹只确保直接子目录下的清单名称,不比较具体每个文件内容是否相同,和子目录相同
t = filecmp.dircmp(f1, f2, shallow)
cmp = False
try:
if not t.left_only and not t.right_only:
cmp = True
except TypeError:
pass
else: # 有不存在的文件
cmp = False
return cmp
def filesfilter(files, *, root=os.curdir, type_=None,
ignore_backup=False, ignore_special=False,
min_size=None, max_size=None,
min_ctime=None, max_ctime=None,
min_mtime=None, max_mtime=None):
"""
:param files: 类list对象
:param type_:
None,所有文件
'file',只匹配文件
'dir', 只匹配目录
:param ignore_backup: 如果设为False,会过滤掉自定义的备份文件格式,不获取备份类文件
:param ignore_special: 自动过滤掉 '.git'、'$RECYCLE.BIN' 目录下文件
:param min_size: 文件大小过滤,单位Byte
:param max_size: ~
:param min_ctime: 创建时间的过滤,格式'2019-09-01'或'2019-09-01 00:00'
:param max_ctime: ~
:param min_mtime: 修改时间的过滤
:param max_mtime: ~
:return:
"""
from datetime import datetime
def judge(f):
if root: f = os.path.join(root, f)
if type_ == 'file' and not os.path.isfile(f):
return False
elif type_ == 'dir' and not os.path.isdir(f):
return False
# 尽量避免调用 os.stat,判断是否有自定义大小、时间规则,没有可以跳过这部分
check_arg = first_nonnone([min_size, max_size, min_ctime, max_ctime, min_mtime, max_mtime])
if check_arg is not None:
msg = os.stat(f)
if first_nonnone([min_size, max_size]) is not None:
size = File(f).size
if min_size is not None and size < min_size: return False
if max_size is not None and size > max_size: return False
if min_ctime or max_ctime:
file_ctime = datetime.fromtimestamp(msg.st_ctime)
if min_ctime and file_ctime < min_ctime: return False
if max_ctime and file_ctime > max_ctime: return False
if min_mtime or max_mtime:
file_mtime = datetime.fromtimestamp(msg.st_mtime)
if min_mtime and file_mtime < min_mtime: return False
if max_mtime and file_mtime > max_mtime: return False
if ignore_special:
parts = File(f).parts
if '.git' in parts or '$RECYCLE.BIN' in parts:
return False
if ignore_backup and File(f).backup_time:
return False
return True
root = os.path.abspath(root)
return list(filter(judge, files))
def filesmatch(patter, *, root=os.curdir, **kwargs) -> list:
r"""
:param patter:
str,
不含*、?、<、>,普通筛选规则
含*、?、<、>,支持Path.glob的通配符模式,使用**可以表示任意子目录
glob其实支持[0-9]这种用法,但是[、]在文件名中是合法的,
为了明确要使用glob模式,我这里改成<>模式
**/*,是不会匹配到根目录的
re.Patter,正则筛选规则(这种方法会比较慢,但是很灵活) 或者其他有match成员函数的类也可以
会获得当前工作目录下的所有文件相对路径,组成list
对list的所有元素使用re.match进行匹配
list、tuple、set对象
对每一个元素,递归调用filesmatch
其他参数都是文件筛选功能,详见filesfilter中介绍
:return: 匹配到的所有存在的文件、文件夹,返回“相对路径”
TODO patter大小写问题?会导致匹配缺失的bug吗?
>> os.chdir('F:/work/filesmatch') # 工作目录
1、普通匹配
>> filesmatch('a') # 匹配当前目录下的文件a,或者目录a
['a']
>> filesmatch('b/a/')
['b\\a']
>> filesmatch('b/..\\a/')
['a']
>> filesmatch('c') # 不存在c则返回 []
[]
2、通配符模式
>> filesmatch('work/*.png') # 支持通配符
[]
>> filesmatch('*.png') # 支持通配符
['1.png', '1[.png', 'logo.png']
>> filesmatch('**/*.png') # 包含所有子目录下的png图片
['1.png', '1[.png', 'logo.png', 'a\\2.png']
>> filesmatch('?.png')
['1.png']
>> filesmatch('[0-9]/<0-9>.txt') # 用<0-9>表示[0-9]模式
['[0-9]\\3.txt']
3、正则模式
>> filesmatch(re.compile(r'\d\[\.png$'))
['1[.png']
4、其他高级用法
>> filesmatch('**/*', type_='dir', max_size=0) # 筛选空目录
['b', '[0-9]']
>> filesmatch('**/*', type_='file', max_size=0) # | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 <NAME>.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import json
import queue
import threading
import subprocess
import psutil
import signal
import shutil
import ctypes
import array
import unicodedata
import pyautogui
import pyperclip
import numpy as np
import cv2
from PIL import Image
import io
if (os.name == 'nt'):
import win32clipboard
qPath_sounds = '_sounds/'
qPath_icons = '_icons/'
qPath_fonts = '_fonts/'
class qFunc_class:
def __init__(self, ):
self.qScreenWidth = 0
self.qScreenHeight = 0
def __del__(self, ):
pass
def init(self, ):
return True
def setNice(self, nice, ):
try:
p = psutil.Process()
if (nice == 'high'): # 優先度: 高
p.nice(psutil.HIGH_PRIORITY_CLASS)
elif (nice == 'above'): # 優先度: 通常以上
p.nice(psutil.ABOVE_NORMAL_PRIORITY_CLASS)
elif (nice == 'normal'): # 優先度: 通常
p.nice(psutil.NORMAL_PRIORITY_CLASS)
elif (nice == 'below'): # 優先度: 通常以下
p.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
elif (nice == 'idol'): # 優先度: 低
p.nice(psutil.IDLE_PRIORITY_CLASS)
else: # 優先度: 通常
p.nice(psutil.NORMAL_PRIORITY_CLASS)
except:
pass
def getNice(self, ):
try:
p = psutil.Process()
nice = p.nice()
if (nice == psutil.HIGH_PRIORITY_CLASS): # 優先度: 高
return 'high'
elif (nice == psutil.ABOVE_NORMAL_PRIORITY_CLASS): # 優先度: 通常以上
return 'above'
elif (nice == psutil.NORMAL_PRIORITY_CLASS): # 優先度: 通常
return 'normal'
elif (nice == psutil.BELOW_NORMAL_PRIORITY_CLASS): # 優先度: 通常以下
return 'below'
elif (nice == psutil.IDLE_PRIORITY_CLASS): # 優先度: 低
return 'idol'
else: # 優先度: 通常
pass
except:
pass
return 'normal'
def getJson(self, json_path='_config/', json_file='test_key.json', ):
json_dic = {}
try:
with codecs.open(json_path + json_file, 'r', 'utf-8') as r:
json_dic = json.load(r)
if (json_dic != {}):
return True, json_dic
except Exception as e:
print('getJson error! ' + json_path + json_file)
return False, {}
def putJson(self, json_path='_config/', json_file='test_key.json', json_dic={}, ):
try:
w = codecs.open(json_path + json_file, 'w', 'utf-8')
w.write(json.dumps(json_dic, indent=4, ensure_ascii=False, ))
w.close()
return True
except Exception as e:
print('putJson error! ' + json_path + json_file)
return False
def makeDirs(self, ppath, remove=False, ):
try:
if (len(ppath) > 0):
path=ppath.replace('\\', '/')
if (path[-1:] != '/'):
path += '/'
if (not os.path.isdir(path[:-1])):
os.makedirs(path[:-1])
else:
if (remove != False):
files = glob.glob(path + '*')
for f in files:
if (remove == True):
try:
self.remove(f)
except Exception as e:
pass
if (str(remove).isdigit()):
try:
nowTime = datetime.datetime.now()
fileStamp = os.path.getmtime(f)
fileTime = datetime.datetime.fromtimestamp(fileStamp)
td = nowTime - fileTime
if (td.days >= int(remove)):
self.remove(f)
except Exception as e:
pass
except Exception as e:
pass
return True
def kill(self, name, ):
if (os.name == 'nt'):
try:
kill = subprocess.Popen(['taskkill', '/im', name + '.exe', '/f', ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
kill.wait()
kill.terminate()
kill = None
return True
except Exception as e:
pass
else:
try:
kill = subprocess.Popen(['pkill', '-9', '-f', name, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
kill.wait()
kill.terminate()
kill = None
return True
except Exception as e:
pass
return False
def remove(self, filename, maxWait=1, ):
if (not os.path.exists(filename)):
return True
if (maxWait == 0):
try:
os.remove(filename)
return True
except Exception as e:
return False
else:
chktime = time.time()
while (os.path.exists(filename)) and ((time.time() - chktime) <= maxWait):
try:
os.remove(filename)
return True
except Exception as e:
pass
time.sleep(0.10)
if (not os.path.exists(filename)):
return True
else:
return False
def copy(self, fromFile, toFile, ):
try:
shutil.copy2(fromFile, toFile)
return True
except Exception as e:
return False
def txtsWrite(self, filename, txts=[''], encoding='utf-8', exclusive=False, mode='w', ):
if (exclusive == False):
try:
w = codecs.open(filename, mode, encoding)
for txt in txts:
if (encoding != 'shift_jis'):
w.write(txt + '\n')
else:
w.write(txt + '\r\n')
w.close()
w = None
return True
except Exception as e:
w = None
return False
else:
res = self.remove(filename, )
if (res == False):
return False
else:
f2 = filename[:-4] + '.tmp.txt'
res = self.remove(f2, )
if (res == False):
return False
else:
try:
w = codecs.open(f2, mode, encoding)
for txt in txts:
if (encoding != 'shift_jis'):
w.write(txt + '\n')
else:
w.write(txt + '\r\n')
w.close()
w = None
os.rename(f2, filename)
return True
except Exception as e:
w = None
return False
def txtsRead(self, filename, encoding='utf-8', exclusive=False, ):
if (not os.path.exists(filename)):
return False, ''
encoding2 = encoding
if (encoding2 == 'utf-8'):
encoding2 = 'utf-8-sig'
if (exclusive == False):
try:
txts = []
txt = ''
r = codecs.open(filename, 'r', encoding2)
for t in r:
t = t.replace('\n', '')
t = t.replace('\r', '')
txt = (txt + ' ' + str(t)).strip()
txts.append(t)
r.close
r = None
return txts, txt
except Exception as e:
r = None
return False, ''
else:
f2 = filename[:-4] + '.wrk.txt'
res = self.remove(f2, )
if (res == False):
return False
else:
try:
os.rename(filename, f2)
txts = []
txt = ''
r = codecs.open(f2, 'r', encoding2)
for t in r:
t = t.replace('\n', '')
t = t.replace('\r', '')
txt = (txt + ' ' + str(t)).strip()
txts.append(t)
r.close
r = None
self.remove(f2, )
return txts, txt
except Exception as e:
r = None
return False, ''
def statusSet(self, filename='', Flag=True, txt='_on_'):
if (Flag == True):
chktime = time.time()
while (not os.path.exists(filename)) and ((time.time() - chktime) < 1):
try:
w = open(filename, 'w')
w.write(txt)
w.close()
w = None
return True
except Exception as e:
w = None
time.sleep(0.10)
else:
chktime = time.time()
while (os.path.exists(filename)) and ((time.time() - chktime) < 1):
try:
os.remove(filename, )
return True
except Exception as e:
pass
time.sleep(0.10)
return False
def statusCheck(self, filename='', ):
if (os.path.exists(filename)):
return True
else:
return False
def statusWait_false(self, filename, falseWait=1, ):
if (falseWait != 0):
chktime = time.time()
while (os.path.exists(filename)) and ((time.time() - chktime) < falseWait):
time.sleep(0.10)
return self.statusCheck(filename)
def txtFilePath(self, txt='',):
if (txt == ''):
return False
chk = txt.replace('\\','/')
if (os.path.isfile(chk)) \
or (os.path.isdir(chk)):
return chk
return False
def txt2filetxt(self, txt='', ):
ftxt = txt.replace(' ','_')
ftxt = ftxt.replace(u' ','_')
ftxt = ftxt.replace(u'、','_')
ftxt = ftxt.replace(u'。','_')
ftxt = ftxt.replace('"','_')
ftxt = ftxt.replace('$','_')
ftxt = ftxt.replace('%','_')
ftxt = ftxt.replace('&','_')
ftxt = ftxt.replace("'",'_')
ftxt = ftxt.replace('\\','_')
ftxt = ftxt.replace('|','_')
ftxt = ftxt.replace('*','_')
ftxt = ftxt.replace('/','_')
ftxt = ftxt.replace('?','_')
ftxt = ftxt.replace(':',',')
ftxt = ftxt.replace('<','_')
ftxt = ftxt.replace('>','_')
return ftxt
def findWindow(self, winTitle='Display', ):
if (os.name != 'nt'):
return False
parent_handle = ctypes.windll.user32.FindWindowW(0, winTitle)
if (parent_handle == 0):
return False
else:
return parent_handle
def moveWindowSize(self, winTitle='Display', posX=0, posY=0, dspMode='full+', ):
if (os.name != 'nt'):
return False
parent_handle = self.findWindow(winTitle)
if (parent_handle == False):
return False
else:
dspWidth, dspHeight = self.getResolution(dspMode)
HWND_TOP = 0
SWP_SHOWWINDOW = 0x0040
ctypes.windll.user32.SetWindowPos(parent_handle, HWND_TOP, posX, posY, dspWidth, dspHeight, SWP_SHOWWINDOW)
return True
def setForegroundWindow(self, winTitle='Display', ):
if (os.name != 'nt'):
return False
parent_handle = self.findWindow(winTitle)
if (parent_handle == False):
return False
else:
ctypes.windll.user32.SetForegroundWindow(parent_handle)
return True
def img2clip(self, file):
if (os.name == 'nt'):
#try:
img = Image.open(file)
output = io.BytesIO()
img.convert('RGB').save(output, 'BMP')
data = output.getvalue()[14:]
output.close()
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data)
win32clipboard.CloseClipboard()
return True
#except Exception as e:
# pass
return False
def in_japanese(self, txt=''):
t = txt.replace('\r', '')
t = t.replace('\n', '')
try:
for s in t:
name = unicodedata.name(s)
if ('CJK UNIFIED' in name) \
or ('HIRAGANA' in name) \
or ('KATAKANA' in name):
return True
except Exception as e:
pass
return False
def waitSec(self, sec=0, ):
xSec = sec
while (int(xSec) > 0):
print('wait … ' + str(int(xSec)))
time.sleep(1)
xSec -= 1
if (xSec > 0):
time.sleep(xSec)
return True
def sendKey(self, txt='', cr=True, lf=False, afterSec=0.5, ):
out_txt = txt
if (cr==True) or (lf==True):
out_txt = out_txt.replace('\r', '')
out_txt = out_txt.replace('\n', '')
pyperclip.copy(out_txt)
pyautogui.hotkey('ctrl', 'v')
if (cr==True) or (lf==True):
pyautogui.typewrite(['enter',])
if (afterSec != 0):
time.sleep(afterSec)
return True
def keyPress(self, keys=[], afterSec=0.5, ):
for key in keys:
pyautogui.press(key)
if (afterSec != 0):
time.sleep(afterSec)
return True
def notePad(self, txt='', cr=True, lf=False, ):
winTitle = u'無題 - メモ帳'
if (os.name != 'nt'):
return False
parent_handle = ctypes.windll.user32.FindWindowW(0, winTitle)
if (parent_handle == 0):
return False
else:
out_txt = txt
if (cr==True) or (lf==True):
out_txt = out_txt.replace('\r', '')
out_txt = out_txt.replace('\n', '')
if (cr==True):
out_txt += '\r'
if (lf==True):
out_txt += '\n'
if (True):
#try:
child_handles = array.array('i')
ENUM_CHILD_WINDOWS = ctypes.WINFUNCTYPE( \
ctypes.c_int, \
ctypes.c_int, \
ctypes.py_object)
ctypes.windll.user32.EnumChildWindows( \
parent_handle, \
ENUM_CHILD_WINDOWS(self.enum_child_windows_proc), \
ctypes.py_object(child_handles) )
WM_CHAR = 0x0102
for i in range(len(out_txt)):
ctypes.windll.user32.SendMessageW(child_handles[0], WM_CHAR, (ord(out_txt[i])), 0)
return True
#except Exception as e:
# return False
def enum_child_windows_proc(self, handle, list):
list.append(handle)
| |
"""tests for the IsoSurfGeom module"""
from os import listdir, remove, getcwd, mkdir
from os.path import isfile, isdir, join
import pytest
from pymoab import core, types
import numpy as np
import itertools
import warnings
from IsogeomGenerator import isg, ivdb
# Set up test files and expected results
test_dir = getcwd() + "/tests/test_files/"
test_mesh = test_dir + "test_mesh.vtk"
data = 'dname'
levels = [15, 5, 25, 35, 45]
exp_db = test_dir + "/exp-test/"
exp_vols_dir = exp_db + "/vols"
common_files = [f for f in listdir(exp_vols_dir)
if isfile(join(exp_vols_dir, f))]
exp_levelfile = exp_db + "/levelfile"
exp_levels = [5, 15, 25, 35, 45]
exp_geom = test_dir + '/exp-isogeom.h5m'
# geometric extent info
exp_ext_min = -10.
exp_ext_max = 10.
exts = [np.full(3, exp_ext_min), np.full(3, exp_ext_max)]
def __ivdb_obj(completed):
# manually generated a usuable ivdb object
iv = ivdb.IvDb(levels=levels, data=data, db=exp_db)
iv.xmin = iv.ymin = iv.zmin = exp_ext_min
iv.xmax = iv.ymax = iv.zmax = exp_ext_max
iv.completed = completed
return iv
def test_init_none():
r = np.full(6, False)
ig = isg.IsGm()
if ig.levels is None:
r[0] = True
if ig.data is None:
r[1] = True
if ig.db == getcwd() + "/tmp":
r[2] = True
if isinstance(ig.mb, type(core.Core())):
r[3] = True
if ig.isovol_meshsets == {}:
r[4] = True
if ig.xmin == ig.xmax == ig.ymin == ig.ymax == ig.zmin == ig.zmax is None:
r[5] = True
assert(all(r))
def test_init_input():
r = np.full(5, False)
ig = isg.IsGm(levels=levels, data=data, db=exp_db, extents=exts)
if ig.levels == exp_levels:
r[0] = True
if ig.data == data:
r[1] = True
if ig.db == exp_db:
r[2] = True
if ig.xmin == ig.ymin == ig.zmin == exp_ext_min:
r[3] = True
if ig.xmax == ig.ymax == ig.zmax == exp_ext_max:
r[4] = True
assert(all(r))
def test_init_ivdb():
"""test that info is taken from ivdb"""
r = np.full(5, False)
iv = __ivdb_obj(True)
ig = isg.IsGm(ivdb=iv)
if ig.levels == exp_levels:
r[0] = True
if ig.data == data:
r[1] = True
if ig.db == exp_db:
r[2] = True
if ig.xmin == ig.ymin == ig.zmin == exp_ext_min:
r[3] = True
if ig.xmax == ig.ymax == ig.zmax == exp_ext_max:
r[4] = True
assert(all(r))
def test_init_input_ivdb():
"""test that info from ivdb overwrites other input"""
r = np.full(5, False)
iv = __ivdb_obj(True)
ig = isg.IsGm(ivdb=iv, levels=[0, 2], data='nonsense', db='fake_db')
if ig.levels == exp_levels:
r[0] = True
if ig.data == data:
r[1] = True
if ig.db == exp_db:
r[2] = True
if ig.xmin == ig.ymin == ig.zmin == exp_ext_min:
r[3] = True
if ig.xmax == ig.ymax == ig.zmax == exp_ext_max:
r[4] = True
assert(all(r))
def test_init_input_file():
ig = isg.IsGm(levels=exp_levelfile)
assert(ig.levels == exp_levels)
def test_read_ivdb():
"""read info from ivdb obj"""
iv = __ivdb_obj(True)
ig = isg.IsGm()
ig.read_ivdb(iv)
r = np.full(5, False)
if ig.levels == exp_levels:
r[0] = True
if ig.data == data:
r[1] = True
if ig.db == exp_db:
r[2] = True
if ig.xmin == ig.ymin == ig.zmin == exp_ext_min:
r[3] = True
if ig.xmax == ig.ymax == ig.zmax == exp_ext_max:
r[4] = True
assert(all(r))
def test_read_ivdb_incomplete():
"""raise error if incomplete ivdb obj"""
iv = __ivdb_obj(False)
ig = isg.IsGm()
with pytest.raises(RuntimeError) as error_info:
ig.read_ivdb(iv)
assert "Incomplete IvDb object" in str(error_info)
def test_read_database():
"""check that meshsets are properly populated with read_database"""
# create obj and read database
ig = isg.IsGm(levels=levels, data=data, db=exp_db)
ig.read_database()
# expected meshset entity handles
ehs = [12682136550675316737,
12682136550675316738,
12682136550675316739,
12682136550675316740,
12682136550675316741]
# setup truth array
res = np.full(len(ehs) + 1, False)
# check that meshsets exist in the moab instance
for r, eh in enumerate(ehs):
try:
# any moab call that will work if meshet exists, else
# it will fail
ig.mb.get_child_meshsets(eh)
except RuntimeError:
pass
else:
res[r] = True
# check meshets and bound information are in dictionary
exp_meshsets = {(0, ehs[0]): {'bounds': (None, 5.0)},
(1, ehs[1]): {'bounds': (5.0, 15.0)},
(2, ehs[2]): {'bounds': (15.0, 25.0)},
(3, ehs[3]): {'bounds': (25.0, 35.0)},
(4, ehs[4]): {'bounds': (35.0, None)}}
if sorted(ig.isovol_meshsets) == sorted(exp_meshsets):
res[-1] = True
# assert all pass
assert(all(res))
def test_read_database_numfiles_error():
"""read_database throws error if num levels and files mismatch"""
# create obj and read database
ig = isg.IsGm(levels=[300], data=data, db=exp_db)
with pytest.raises(RuntimeError) as error_info:
ig.read_database()
assert "does not match number" in str(error_info)
def test_read_database_nolevels_error():
"""read_database throws error no levels are defined"""
# create obj and read database
ig = isg.IsGm()
with pytest.raises(RuntimeError) as error_info:
ig.read_database()
assert "levels defined" in str(error_info)
def test_separate_isovols_exterior():
"""test that disjoint volumes are properly separated"""
# load mesh that needs separation
ig = isg.IsGm()
fs = ig.mb.create_meshset()
ig.mb.load_file(test_dir + '/vol-files/separate-vols.stl', file_set=fs)
# create useable meshset dict
ig.isovol_meshsets[(0, fs)] = {}
# manually set the geometric extents
# these are chosen such that the volume file aligns on the x plane
# geometric extents (-10, 15). The volume file y and z are -5 to 5,
# so if this is considered to be one volume in a larger geometry,
# only one the surfaces on the x planes are considered exterior.
ig.xmin = -10.
ig.xmax = 15.
ig.ymin = ig.zmin = -15.
ig.ymax = ig.zmax = 15.
# separate the volumes
ig.separate_isovols()
# check there are four new surfaces
r = np.full(4, False)
num_surfs = len(ig.isovol_meshsets[(0, fs)]['surfs_EH'])
if num_surfs == 4:
r[0] = True
# check that no triangles are shared between the each of the surfaces
surf0 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][0]
tris0 = set(ig.mb.get_entities_by_type(surf0, types.MBTRI))
surf1 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][1]
tris1 = set(ig.mb.get_entities_by_type(surf1, types.MBTRI))
surf2 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][2]
tris2 = set(ig.mb.get_entities_by_type(surf2, types.MBTRI))
surf3 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][3]
tris3 = set(ig.mb.get_entities_by_type(surf3, types.MBTRI))
common_tris = [list(tris0 & tris1), list(tris0 & tris2),
list(tris0 & tris3), list(tris1 & tris2),
list(tris1 & tris3), list(tris2 & tris3)]
if not common_tris == 0:
r[1] = True
# check that two surfaces have 10 tris and two surfaces have 2 tris
num_tris = sorted([len(tris0), len(tris1), len(tris2), len(tris3)])
if num_tris == [2, 2, 10, 10]:
r[2] = True
# check that 2 surfs have 8 verts and 2 surfs have 4 verts
verts0 = set(ig.mb.get_entities_by_type(surf0, types.MBVERTEX))
verts1 = set(ig.mb.get_entities_by_type(surf1, types.MBVERTEX))
verts2 = set(ig.mb.get_entities_by_type(surf2, types.MBVERTEX))
verts3 = set(ig.mb.get_entities_by_type(surf3, types.MBVERTEX))
num_verts = sorted([len(verts0), len(verts1), len(verts2), len(verts3)])
if num_verts == [4, 4, 8, 8]:
r[3] = True
assert(all(r))
def test_separate_isovols_single_exterior():
"""test a single vol with an exterior surface is split in separation"""
# load mesh that does not need separation
ig = isg.IsGm()
fs = ig.mb.create_meshset()
ig.mb.load_file(test_dir + '/vol-files/single-box-1.stl', file_set=fs)
# create useable meshset dict
ig.isovol_meshsets[(0, fs)] = {}
# manually set the geometric extents
# these are chosen such that the volume file aligns on the -x plane
# geometric extents (-5). The volume file x, y, and z are -5 to 5,
# so if this is considered to be one volume in a larger geometry,
# only one the surfaces on the -x plane is considered exterior.
ig.xmin = -5.
ig.xmax = 15.
ig.ymin = ig.zmin = -15.
ig.ymax = ig.zmax = 15.
# separate the volumes
ig.separate_isovols()
# check there are two new surfaces
r = np.full(4, False)
num_surfs = len(ig.isovol_meshsets[(0, fs)]['surfs_EH'])
if num_surfs == 2:
r[0] = True
# check that no triangles are shared between the each of the surfaces
surf0 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][0]
tris0 = set(ig.mb.get_entities_by_type(surf0, types.MBTRI))
surf1 = ig.isovol_meshsets[(0, fs)]['surfs_EH'][1]
tris1 = set(ig.mb.get_entities_by_type(surf1, types.MBTRI))
common_tris = tris0 & tris1
if len(common_tris) == 0:
r[1] = True
# check that one surface has 2 triangles and the other has 10
num_tris = sorted([len(tris0), len(tris1)])
if num_tris == [2, 10]:
r[2] = True
# check that one surface has 8 verts and the other has 4
verts0 = set(ig.mb.get_entities_by_type(surf0, types.MBVERTEX))
verts1 = set(ig.mb.get_entities_by_type(surf1, types.MBVERTEX))
num_verts = sorted([len(verts0), len(verts1)])
if num_verts == [4, 8]:
r[3] = True
assert(all(r))
def test_separate_isovols_single_interior():
"""test a single interior vol is unchanged when it is separated"""
# load mesh that does not need separation
ig = isg.IsGm()
fs = ig.mb.create_meshset()
ig.mb.load_file(test_dir + '/vol-files/single-box-1.stl', file_set=fs)
# create useable meshset dict
ig.isovol_meshsets[(0, fs)] = {}
# manually set the geometric extents so that no surface is on the
# exterior
ig.xmin = -15.
ig.xmax = 15.
ig.ymin = ig.zmin = -15.
ig.ymax = ig.zmax = 15.
# separate the volumes
ig.separate_isovols()
# check there is one new surfaces
r = np.full(3, False)
num_surfs = len(ig.isovol_meshsets[(0, fs)]['surfs_EH'])
if num_surfs == 1:
| |
==2 \
and now_hand_card[line][row+1] ==1: #889,打出9
print('打出的牌是{}'.format(self._get_card_str(line, row+1)))
print('打出牌的行列是:line = {}, row = {}'.format(line, row+1))
self._conv_req_mess('Discard双边6678', self._get_card_str(line, row+1), '') # 889打出9
return
elif row == 7 and now_hand_card[line][row-2] == 0 and now_hand_card[line][row-1] == 1 and \
now_hand_card[line][row] == 2 and now_hand_card[line][row+1] == 1:
print('打出的牌是{}'.format(self._get_card_str(line, row)))
print('打出牌的行列是:line = {}, row = {}'.format(line, row))
self._conv_req_mess('Discard双边6678', self._get_card_str(line, row), '') # 7889打出8变为789
return
elif row == 8 and now_hand_card[line][row-1] == 1 and now_hand_card[line][row] ==2 and \
now_hand_card[line][row-2] == 0 : #899,打出9
print('打出的牌是{}'.format(self._get_card_str(line, row-1)))
print('打出牌的行列是:line = {}, row = {}'.format(line, row-1))
self._conv_req_mess('Discard双边6678', self._get_card_str(line, row-1), '') # 889打出9
return
elif row == 8 and now_hand_card[line][row-3] == 0 and now_hand_card[line][row-2] == 1 and \
now_hand_card[line][row-1] ==1 and now_hand_card[line][row] == 2 : #7899,打出9
print('打出的牌是{}'.format(self._get_card_str(line, row)))
print('打出牌的行列是:line = {}, row = {}'.format(line, row))
self._conv_req_mess('Discard双边6678', self._get_card_str(line, row), '') # 7899打出9
return
elif row == 0 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 \
and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 1 and now_hand_card[line][row+5] ==0 : #4连1234,再接一张5,打出5
print('打出的牌是{}'.format(self._get_card_str(line, row+4)))
print('4+1,打1;;打出牌的位置是lin={},ro={}'.format(line, row+4))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row+4), '') #12345没6接5,打5
return
elif row ==1 and now_hand_card[line][row-1]==1 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 \
and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 1 and now_hand_card[line][row+5] ==0 : #4连2345,再接一张1,没6打出1
print('打出的牌是{}'.format(self._get_card_str(line, row-1)))
print('4+1,打1;;打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row-1), '') #2345没6接1,打1
return
elif row >=1 and row <= 3 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 \
and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 1 and now_hand_card[line][row+5] == 0: #4连2345,再接一张5,打出5
print('打出的牌是{}'.format(self._get_card_str(line, row+4)))
print('4+1,打5;;打出牌的位置是lin={},ro={}'.format(line, row+4))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row+4), '') # 那就直接输出改牌
return
elif row == 4 and now_hand_card[line][row-1] == 0 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 \
and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 1: #4连5678没4,再接一张9,打出5
print('打出的牌是{}'.format(self._get_card_str(line, row)))
print('4+1,打5;;打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row), '') #5678,没4接9,打5
return
elif row == 4 and now_hand_card[line][row-2]==0 and now_hand_card[line][row-1] == 1 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 \
and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 0 : #4连5678,没9,没3再接一张4,打出8
print('打出的牌是{}'.format(self._get_card_str(line, row+3)))
print('4+1,打5;;打出牌的位置是lin={},ro={}'.format(line, row+3))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row+3), '') # 5678,没4,接4.打8
return
elif row == 5 and now_hand_card[line][row-2] ==0 and now_hand_card[line][row-1] ==1 and now_hand_card[line][row] ==1 and now_hand_card[line][row+1] ==1 and \
now_hand_card[line][row+2] == 1 and now_hand_card[line][row+3] == 1 : #6789,没4,接5,打5
print('打出的牌是{}'.format(self._get_card_str(line, row-1)))
print('4+1,打5;;打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard4连+1', self._get_card_str(line, row-1), '') # 5678,没4,接4.打8
return
elif row < 6 and now_hand_card[line][row] == 1 and now_hand_card[line][row+3] == 1:#手牌1234
if now_hand_card[line][row+1] == 2 and now_hand_card[line][row+2] == 1:#手牌1234,接到2,打出1
print('打出的牌是{}'.format(self._get_card_str(line, row)))
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard4连(1234)接2', self._get_card_str(line, row), '')
return
elif now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 2 and now_hand_card[line][row+3] == 0:#手牌1234,接到3,打出4
print('打出的牌是{}'.format(self._get_card_str(line, row + 3))) #1234,没5,接3打4
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard4连(1234)接3', self._get_card_str(line, row + 3), '')
return
elif row < 2 and now_hand_card[line][row] == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 and \
now_hand_card[line][row+3] == 1 and now_hand_card[line][row+4] == 1 and now_hand_card[line][row+5] == 1 and \
now_hand_card[line][row+6] == 1 :
if now_hand_card[line][row+7] == 1: #测试这里B2B3B3B4B4B8B9B9
print('打出的牌是{}'.format(self._get_card_str(line, row + 7)))
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard', self._get_card_str(line, row + 7), '')#手牌1234567,接到一张8,打出8
return
elif row < 3 and now_hand_card[line][row] == 1 and now_hand_card[line][row+3] == 1 and now_hand_card[line][row+6] == 1 :#7连1234567
if now_hand_card[line][row+1] == 2 and now_hand_card[line][row+2] == 1 and now_hand_card[line][row+4] == 1 \
and now_hand_card[line][row+5] == 1 :
print('打出的牌是{}'.format(self._get_card_str(line, row)))
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard', self._get_card_str(line, row), '') # 手牌1234567,接到一张2,打出1
return
elif now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 2 and now_hand_card[line][row+4] == 1 \
and now_hand_card[line][row+5] == 1 :
print('打出的牌是{}'.format(self._get_card_str(line, row + 3)))
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard', self._get_card_str(line, row+3), '') # 手牌1234567,接到一张3,打出4
return
elif now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 and now_hand_card[line][row+4] == 2 \
and now_hand_card[line][row+5] == 1 :
print('打出的牌是{}'.format(self._get_card_str(line, row + 3)))
print('打出牌的位置是lin={},ro={}'.format(line, row))
self._conv_req_mess('Discard', self._get_card_str(line, row + 3), '') # 手牌1234567,接到一张5,打出4
return
elif now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 1 and now_hand_card[line][row+4] == 1 \
and now_hand_card[line][row+5] == 2 :
print('打出牌的位置是lin={},ro={}'.format(line, row))
print('打出的牌是{}'.format(self._get_card_str(line, row + 6)))
self._conv_req_mess('Discard', self._get_card_str(line, row + 6), '') # 手牌1234567,接到一张6,打出7
return
elif now_hand_card[line][row] == 3 : #打出刻子31旁边的1这章单牌
if row == 0 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 0 :#1112打出2
print('打出的牌是{}'.format(self._get_card_str(line, row + 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row+1))
self._conv_req_mess('Discard', self._get_card_str(line, row + 1), '') # 手牌2223打出2
return
elif row < 7 and row > 0 and now_hand_card[line][row+1] ==1 and now_hand_card[line][row+2] == 0: #122
print('打出的牌是{}'.format(self._get_card_str(line, row + 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row+1))
self._conv_req_mess('Discard', self._get_card_str(line, row + 1), '') # 手牌2223打出3
return
elif row == 1 and now_hand_card[line][row-1] == 1 : #1222,打出1
print('打出的牌是{}'.format(self._get_card_str(line, row -1)))
print('打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard', self._get_card_str(line, row -1), '') # 手牌1222打出3
return
elif row == 1 and now_hand_card[line][row+1] == 1 and now_hand_card[line][row+2] == 0 :
print('打出的牌是{}'.format(self._get_card_str(line, row + 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row+1))
self._conv_req_mess('Discard', self._get_card_str(line, row + 1), '') # 手牌1222打出3
return
elif row < 7 and row > 1 and now_hand_card[line][row+1] == 0 and now_hand_card[line][row-1] == 1\
and now_hand_card[line][row-2] ==0: #2333
print('打出的牌是{}'.format(self._get_card_str(line, row -1)))
print('打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard', self._get_card_str(line, row -1), '') # 手牌#2333打出2
return
elif row == 7 : #手牌7888,8889打出7 或者9 / 8999 打出8
if now_hand_card[line][row-2] == 0 and now_hand_card[line][row-1] == 1 :
print('打出的牌是{}'.format(self._get_card_str(line, row - 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard', self._get_card_str(line, row-1), '') # 手牌8999打出8
return
elif now_hand_card[line][row-2] == 0 and now_hand_card[line][row+1] == 1 :
print('打出的牌是{}'.format(self._get_card_str(line, row + 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row+1))
self._conv_req_mess('Discard', self._get_card_str(line, row + 1), '') # 手牌8889打出9
return
elif row == 8 and now_hand_card[line][row-2] == 0 and now_hand_card[line][row-1] == 1: #8999打出8
print('打出的牌是{}'.format(self._get_card_str(line, row - 1)))
print('打出牌的位置是lin={},ro={}'.format(line, row-1))
self._conv_req_mess('Discard', self._get_card_str(line, row - 1), '') # 手牌8889打出9
return
#如果这些都没有那就得在补充了
def _del_other_out(self):
'''
处理对手玩家打牌
:return:
'''
temp_hand_count = copy.deepcopy(self.StateData[self.player_seat, 0])
#print('deal_other_out输出当前手牌的矩阵:')
#print( temp_hand_count)
line, row = self._get_card_index(self.Now_Deal)
print('Now_Deal: line = {},row = {}'.format(line,row))
#print('输出下line ={},row={}'.format(line,row))
temp_hand_count[line, row] += 1
print('line row 位置上的值是:',temp_hand_count[line][row])
print(temp_hand_count)
print('deal_outher_out正在处理的牌是:',self.Now_Deal)
is_hu, _ = self._check_hu(temp_hand_count)
logging.debug("胡牌判断:%s", is_hu)
if is_hu:
self._conv_req_mess('Win', self.Now_Deal)
return
try:
can_gang, gang_count = self._check_gang(self.player_seat, is_self=False)
if can_gang: #补扛
self._conv_req_mess('Kon', self._get_card_str(gang_count[0][0], gang_count[0][1]) * 4, '')
logging.debug(self.ReqMess)
return
can_pen, pen_count = self._check_peng(self.player_seat)
if can_pen:
self._conv_req_mess('Pon', self._get_card_str(pen_count[0][0], pen_count[0][1]) * 3, '')
logging.debug(self.ReqMess)
return
can_chi, chi_count = self._check_chi(self.player_seat)
logging.debug(chi_count)
if can_chi:
action_index = self._get_card_cod(chi_count[0][0], chi_count[0][1])
logging.debug(action_index)
#对子 刻子 旁边不吃 分别判断三个位置,
'''
print('row == 0')
if row == 0 and (temp_hand_count[line][row+1] == 2 and temp_hand_count[line][row+2] == 2) or (temp_hand_count[line][row+1] == 3 and temp_hand_count[line][row+2] == 3 ): #2233,不能吃1
self._conv_req_mess('Pass1', '', '') #2233,不能吃1
return
print('row == 1')
if row == 1 and (temp_hand_count[line][row-1] == 2 and temp_hand_count[line][row+1] == 2 ) or \
temp_hand_count[line][row + 1] == 2 and temp_hand_count[line][row+2] == 2 or (temp_hand_count[line][row-1] == 3 and temp_hand_count[line][row+1] == 3 ) or \
temp_hand_count[line][row + 1] == 3 and temp_hand_count[line][row+2] == 3: #11233 不吃2 23344,不吃2
self._conv_req_mess('Pass2', '', '') #1133 不吃2 23344,不吃2
return
print('row >=2 row <= 6')
if row >= 2 and row <= 6 and temp_hand_count[line][row-2] == 0 and temp_hand_count[line][row-1] == 0 and temp_hand_count[line][row+1] == 0 and ( temp_hand_count[line][row-2] == 2 \
and temp_hand_count[line][row-1] == 2 or temp_hand_count[line][row-1] == 2 and temp_hand_count[line][row+1] == 2\
or temp_hand_count[line][row+1] == 2 and temp_hand_count[line][row+2] == 2 ) or ( temp_hand_count[line][row-2] == 3 \
and temp_hand_count[line][row-1] == 3 or temp_hand_count[line][row-1] == 3 and temp_hand_count[line][row+1] == 3\
or temp_hand_count[line][row+1] == 3 and temp_hand_count[line][row+2] == 3 ): #1122,不吃3,34455,不吃3...56677,不吃5,55667,不吃7
self._conv_req_mess('Pass3', '', '') # 56677不吃5;55677不吃6;55667不吃7;
return
print('row == 7')
if row == 7 and temp_hand_count[line][row-1] == 2 and temp_hand_count[line][row+1] == 2 or temp_hand_count[line][row-2] == 2 and \
temp_hand_count[line][row-1] == 2 or temp_hand_count[line][row-1] == 3 and temp_hand_count[line][row+1] == 3 or temp_hand_count[line][row-2] == 3 and \
temp_hand_count[line][row-1] == 3: #77899不吃8,66778,不吃8
self._conv_req_mess('Pass4', '', '') #77899不吃8,
return
print('row == 8')
if row == 8 and temp_hand_count[line][row-2] == 2 and temp_hand_count[line][row-1] == 2 or \
temp_hand_count[line][row-2] == 3 and temp_hand_count[line][row-1] == 3:
self._conv_req_mess('Pass5', '', '') #77889不吃9
return
'''
line1, row1 = self._get_card_index(index_chi[action_index][0:2])
line2, row2 = self._get_card_index(index_chi[action_index][2:4])
line3, row3 = self._get_card_index(index_chi[action_index][4:6])
if (temp_hand_count[line1, row1] == 2 and temp_hand_count[line2, row2] == 2) or (
temp_hand_count[line1, row1] == 2 and temp_hand_count[line3, row3] == 2) or (
temp_hand_count[line2, row2] == 2 and temp_hand_count[line3, row3] == 2) or temp_hand_count[
line1, row1] == 3 or temp_hand_count[line2, row2] == 3 or temp_hand_count[line3, row3] == 3:
self._conv_req_mess('Pass', '', '')
return
self._conv_req_mess('Chow', index_chi[action_index], '')
return
self._conv_req_mess('Pass6', '', '')
except:
logging.error("消息处理失败")
if __name__ | |
#!/usr/bin/env python3
import argparse
import glob
import json
import os
import re
from collections import defaultdict
from datetime import date, timedelta, datetime
OVERVIEW_COUNT = 10
# Common things ---------------------------------------------------------------
# See main at bottom
class ManualChange:
"""
Apply a change to a range of menus in the v2 API. v1 is not supported.
"""
def __init__(self, replacer, resto, start, end, all_days=False):
"""
:param replacer: The function that will do the replacements. It will receive the path to the file and the
original menu.
:param start: The start date (inclusive).
:param end: The end date (inclusive).
:param resto: Which restaurant(s) to apply to.
:param all_days: If the message should be added for all weekdays in the range. If false (the default), the
changes will only be applied if there already is a menu for the day.
"""
self.replacer = replacer
self.start = start
self.end = end
self.resto = resto
if isinstance(self.resto, str):
self.resto = [self.resto]
assert isinstance(self.resto, list)
self.all_days = all_days
def is_applicable(self, menu_date):
"""Check if this change is applicable to the given date"""
return self.start <= menu_date <= self.end
def date_range(self):
"""Return an iterator over the applicable range. Only weekdays are returned."""
for n in range(int((self.end - self.start).days) + 1):
result = self.start + timedelta(n)
if result.weekday() < 5:
yield result
# <NAME> 18
# Sint-Jansvest die geen menu meer serveert, alleen overschotten.
def restjesmaand18_replacer(_path, original):
# original: {"date": "2018-06-14", "meals": [], "open": false, "vegetables": []}
name = ("Om voedseloverschotten op het einde van het academiejaar te beperken, "
"kunnen we geen dagmenu presenteren. "
"Ga langs en laat je verrassen door ons keukenpersoneel.")
return {
"message": name,
"date": original["date"],
"meals": [],
"open": True,
"vegetables": [],
}
# Paasvakantie 2019
def paasvakantie19_general(_path, original):
original['message'] = ("Tijdens de paasvakantie zijn resto's Campus Sterre en Campus Merelbeke geopend als "
"cafetaria.")
original['open'] = True
return original
def paasvakantie19_en(_path, original):
original['message'] = 'During the Easter Holiday restos Campus Sterre and Campus Merelbeke operate as cafetaria.'
original['open'] = True
return original
def paasvakantie19_brug(_path, original):
original['message'] = "Tijdens de paasvakantie is De Brug enkel 's middags geopend."
return original
# Werken in De Brug waardoor de resto gesloten is.
def werken_brug19_replacer(_path, original):
message = ('De Brug sluit van 20 mei tot 30 september 2019 voor verbouwingswerken. Tijdens de sluiting neemt resto '
'Kantienberg de functies en het aanbod van de Brug over, zoals de avondopening.')
return {
"message": message,
"date": original["date"],
"open": False
}
def werken_brug19_replacer2(_path, original):
message = ("Resto De Brug en Cafetaria De Brug zijn nog even gesloten in afwachting van het voltooien van de"
" werken. Tot dan kan je's middags en 's avonds terecht in Resto Kantienberg. Wij houden jullie op de"
" hoogte!<br>'s Middags is Resto Sint-Jansvest tijdelijk een reguliere resto met een uitgebreid aanbod"
" aan belegde broodjes. Enkel soep of broodjes nodig? Dan is Cafetaria campus Boekentoren (via"
" Blandijnberg) zeer dichtbij.")
return {
"message": message,
"date": original["date"],
"open": False
}
def tijdelijke_sluiting_sint_jansvest(_path, original):
message = "Resto Sint-Jansvest is tijdelijk gesloten wegens wegenwerken. Tijdens de werken kan u terecht in De " \
"Brug. "
return {
"message": message,
"date": original["date"],
"open": False,
"meals": original.get("meals", [])
}
def corona_sluiting_nl(_path, original):
message = "De studentenrestaurants en cafetaria's sluiten vanaf maandag 16 maart 2020 de deuren. " \
"De UGent neemt die maatregel om verdere verspreiding van het coronavirus tot een minimum te beperken. " \
"De sluiting loopt zeker tot en met 7 juni 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_sluiting_en(_path, original):
message = "The student restaurants and cafeterias will be closed as from Monday 16 March 2020. " \
"Ghent University is taking this measure to minimize the further spreading of the coronavirus. " \
"The closure will certainly last until 7 June 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_heropening_nl(_path, original):
message = "Ter plaatse eten is momenteel niet mogelijk; enkel takeaway van een beperkt aanbod. De coronamaatregelen blijven van kracht! " \
"Resto Dunant, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese met kaas",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomaat-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Dagsoep",
"price": ""
}],
"vegetables": []
}
def corona_heropening_en(_path, original):
message = "The canteen is closed; only takeaway of a reduced offering is possible. The Corona measures remain active! " \
"Resto Dunant, Coupure & Sterre and cafetaria UZ Gent & Boekentoren are open. " \
"At the start of the academic year, the other locations will follow."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese with cheese",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomato-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Soup of the day",
"price": ""
}],
"vegetables": []
}
def corona_closed_for_now(_path, original):
message = "<NAME>, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": False
}
def kantienberg_2020(_path, original):
return {
"message": "<NAME> blijft gesloten tijdens academiejaar 2020-2021.",
"date": original["date"],
"open": False
}
def corona_2020_2021_nl(_path, original):
message = "Door de coronamaatregelen veranderen enkele zaken: ter plaatse eten is niet mogelijk " \
"(enkel afhalen) en er is een beperkter aanbod."
original["message"] = message
return original
def corona_2020_2021_en(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced."
original["message"] = message
return original
def corona_2020_2021_nl_red(_path, original):
message = "Enkel afhalen en een beperkter aanbod. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_cold(_path, original):
message = "Enkel cafetaria-aanbod en koude meeneemgerechten. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_en_red(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced. " \
"The restaurants and cafetaria's will remain open in code red."
original["message"] = message
return original
def exam_closure_sterre_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 15, 18 en 26 januari."
original["message"] = message
original["open"] = False
return original
def exam_closure_dunant_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 8, 15, 18, 22, 25 en 29 januari."
original["message"] = message
original["open"] = False
return original
def christmas(_path, original):
original["message"] = "Naast de UGent-verlofdagen zijn de resto's ook gesloten tijdens de eerste week van de " \
"kerstvakantie. "
original["open"] = False
return original
def exam_closure_en_2020(_path, original):
original["message"] = "Resto Sterre and Dunant are closed on some days in January due to exams. Check the site " \
"for more details."
return original
def dies_natalis_2021(_path, original):
original["message"] = "De resto's zijn gesloten op Dies Natalis."
original["open"] = False
return original
def dies_natalis_2021_en(_path, original):
original["message"] = "The restaurants are closed on Dies Natalis."
original["open"] = False
return original
def easter_2021_week1(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug en UZ Gent open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def easter_2021_week2(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug, UZ Gent en Coupure open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def summer_2021_1(_path, original):
original["message"] = "Cafetaria de Brug en resto's Ardoyen, Sterre en Merelbeke met een gewijzigd aanbod. Er zullen" \
" dan enkel broodjes en salad bowls te verkrijgen zijn. De zitplaatsen kunnen nog niet gebruikt worden."
original["open"] = True
return original
def summer_2021_2(_path, original):
original["message"] = "Cafetaria's de Brug en UZ Gent, | |
buildlogs, with the
given phase metadata. If the job reaches a completed state, update_job_phase also update the
queue and cleanups any existing state and executors.
"""
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
except KeyError:
logger.warning("Job %s no longer exists in the orchestrator, likely expired", job_id)
return False
except Exception as e:
logger.error("Exception loading job %s from orchestrator: %s", job_id, e)
return False
# Check if the build has not already reached a final phase
if build_job.repo_build.phase in EphemeralBuilderManager.ARCHIVABLE_BUILD_PHASES:
logger.warning(
"Job %s is already in a final completed phase (%s), cannot update to %s",
job_id,
build_job.repo_build.phase,
phase
)
return False
# Update the build phase
phase_metadata = phase_metadata or {}
updated = model.build.update_phase_then_close(build_job.build_uuid, phase)
if updated:
self.append_log_message(build_job.build_uuid, phase, self._build_logs.PHASE, phase_metadata)
# Check if on_job_complete needs to be called
if updated and phase in EphemeralBuilderManager.COMPLETED_PHASES:
executor_name = job_data_json.get("executor_name")
execution_id = job_data_json.get("execution_id")
if phase == BUILD_PHASE.ERROR:
self.on_job_complete(build_job, BuildJobResult.ERROR, executor_name, execution_id)
elif phase == BUILD_PHASE.COMPLETE:
self.on_job_complete(build_job, BuildJobResult.COMPLETE, executor_name, execution_id)
elif phase == BUILD_PHASE.INTERNAL_ERROR:
self.on_job_complete(build_job, BuildJobResult.INCOMPLETE, executor_name, execution_id)
elif phase == BUILD_PHASE.CANCELLED:
self.on_job_complete(build_job, BuildJobResult.CANCELLED, executor_name, execution_id)
return updated
def job_heartbeat(self, job_id):
"""Extend the processing time in the queue and updates the ttl of the job in the
orchestrator.
"""
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
except KeyError:
logger.warning("Job %s no longer exists in the orchestrator, likely expired", job_id)
return False
except Exception as e:
logger.error("Exception loading job %s from orchestrator: %s", job_id, e)
return False
max_expiration = datetime.utcfromtimestamp(job_data_json["max_expiration"])
max_expiration_remaining = max_expiration - datetime.utcnow()
max_expiration_sec = max(1, int(max_expiration_remaining.total_seconds()))
ttl = min(HEARTBEAT_PERIOD_SECONDS * 2, max_expiration_sec)
# Update job expirations
if (job_data_json["last_heartbeat"] and
dateutil.parser.isoparse(job_data_json["last_heartbeat"]) < datetime.utcnow() - HEARTBEAT_DELTA):
logger.warning(
"Heartbeat expired for job %s. Marking job as expired. Last heartbeat received at %s",
job_data_json["last_heartbeat"]
)
self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR)
return False
job_data_json["last_heartbeat"] = str(datetime.utcnow())
self._queue.extend_processing(
build_job.job_item,
seconds_from_now=JOB_TIMEOUT_SECONDS,
minimum_extension=MINIMUM_JOB_EXTENSION,
)
try:
self._orchestrator.set_key(
job_id, json.dumps(job_data_json), expiration=ttl
)
except OrchestratorConnectionError:
logger.error(
"Could not update heartbeat for job %s. Orchestrator is not available", job_id
)
return False
return True
def cancel_build(self, build_id):
build = model.build.get_repository_build(build_id)
if build.phase in EphemeralBuilderManager.PHASES_NOT_ALLOWED_TO_CANCEL_FROM:
return False
cancelled = model.build.update_phase_then_close(build_id, BUILD_PHASE.CANCELLED)
if cancelled:
try:
job_data = self._orchestrator.get_key(self._job_key(build_id))
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
self.on_job_complete(
build_job,
BuildJobResult.CANCELLED,
job_data_json.get("executor_name"),
job_data_json.get("execution_id"),
)
except KeyError:
logger.warning("Could not cleanup cancelled job %s. Job does not exist in orchestrator", job_id)
return cancelled
def determine_cached_tag(self, build_id, base_image_id):
job_id = self._job_key(build_id)
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
except KeyError:
logger.warning("Job %s does not exist in orchestrator: %s", job_id)
return None
except Exception as e:
logger.warning("Exception loading job from orchestrator: %s", e)
return None
return build_job.determine_cached_tag(base_image_id)
def schedule(self, build_id):
"""Schedule an existed job to be started on the configured control planes (executors)."""
logger.debug("Scheduling build %s", build_id)
allowed_worker_count = self._manager_config.get("ALLOWED_WORKER_COUNT", 1)
if self._running_workers() >= allowed_worker_count:
logger.warning("Could not schedule build %s. Number of workers at capacity: %s.", build_id, self._running_workers())
return False, TOO_MANY_WORKERS_SLEEP_DURATION
job_id = self._job_key(build_id)
try:
build_job = self._build_job_from_job_id(job_id)
except BuildJobDoesNotExistsError as bjne:
logger.warning("Failed to schedule job %s - Job no longer exists in the orchestrator, likely expired: %s", job_id, bjne)
return False, CREATED_JOB_TIMEOUT_SLEEP_DURATION
except BuildJobError as bje:
logger.warning("Failed to schedule job %s - Could not get job from orchestrator: %s", job_id, bje)
return False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION
registration_token = self.generate_build_token(
BUILD_JOB_REGISTRATION_TYPE,
build_job.build_uuid,
job_id,
EPHEMERAL_SETUP_TIMEOUT
)
started_with_executor = None
execution_id = None
for executor in self._ordered_executors:
namespace = build_job.namespace
if not executor.allowed_for_namespace(namespace):
logger.warning(
"Job %s (namespace: %s) cannot use executor %s",
job_id,
namespace,
executor.name,
)
continue
# Check if we can use this executor based on the retries remaining.
if executor.minimum_retry_threshold > build_job.retries_remaining:
build_fallback.labels(executor.name).inc()
logger.warning(
"Job %s cannot use executor %s as it is below retry threshold %s (retry #%s) - Falling back to next configured executor",
job_id,
executor.name,
executor.minimum_retry_threshold,
build_job.retries_remaining,
)
continue
logger.debug("Starting builder for job %s with selected executor: %s", job_id, executor.name)
try:
execution_id = executor.start_builder(registration_token, build_job.build_uuid)
except:
logger.exception("Exception when starting builder for job: %s - Falling back to next configured executor", job_id)
continue
started_with_executor = executor
# Break out of the loop now that we've started a builder successfully.
break
# If we didn't start the job, cleanup and return it to the queue.
if started_with_executor is None:
logger.error("Could not start ephemeral worker for build %s", build_job.build_uuid)
# Delete the associated build job record.
self._orchestrator.delete_key(job_id)
return False, EPHEMERAL_API_TIMEOUT
# Store metric data tracking job
metric_spec = json.dumps(
{"executor_name": started_with_executor.name, "start_time": time.time(),}
)
# Mark the job as scheduled
setup_time = started_with_executor.setup_time or EPHEMERAL_SETUP_TIMEOUT
if not self.job_scheduled(job_id, started_with_executor.name, execution_id, setup_time):
return False, EPHEMERAL_API_TIMEOUT
self._write_metric_spec(build_job.build_uuid, metric_spec)
return True, None
def _job_expired_callback(self, key_change):
""" Callback invoked when job key is changed, except for CREATE, SET events.
DELETE and EXPIRE exvents make sure the build is marked as completed and remove any
state tracking, executors left.
"""
if key_change.event == KeyEvent.EXPIRE:
job_metadata = json.loads(key_change.value)
build_job = BuildJob(AttrDict(job_metadata["job_queue_item"]))
executor_name = job_metadata.get("executor_name")
execution_id = job_metadata.get("execution_id")
job_result = BuildJobResult.EXPIRED
model.build.update_phase_then_close(build_job.build_uuid, RESULT_PHASES[job_result])
self.on_job_complete(build_job, job_result, executor_name, execution_id)
def _job_cancelled_callback(self, key_change):
if key_change.event not in (KeyEvent.CREATE, KeyEvent.SET):
return
job_metadata = json.loads(key_change.value)
build_job = BuildJob(AttrDict(job_metadata["job_queue_item"]))
executor_name = job_metadata.get("executor_name")
execution_id = job_metadata.get("execution_id")
job_result = BuildJobResult.CANCELLED
self.on_job_complete(build_job, job_result, executor_name, execution_id)
def _cleanup_job_from_orchestrator(self, build_job):
""" Cleanup the given job from the orchestrator.
This includes any keys related to that job: job keys, expiry keys, metric keys, ...
"""
lock_key = self._lock_key(build_job.build_uuid)
lock_acquired = self._orchestrator.lock(lock_key)
if lock_acquired:
try:
self._orchestrator.delete_key(self._job_key(build_job.build_uuid))
self._orchestrator.delete_key(self._metric_key(build_job.build_uuid))
except KeyError:
pass
finally:
self._orchestrator.delete_key(lock_key) # Release lock
def append_build_log(self, build_id, log_message):
"""
Append the logs from Docker's build output.
This checks if the given message is a "STEP" line from Docker's output,
and set the log type to "COMMAND" if so.
See https://github.com/quay/quay-builder/blob/master/docker/log_writer.go
to get the serialized message structure
"""
try:
log_data = json.loads(log_message)
except ValueError:
raise
fully_unwrapped = ""
keys_to_extract = ["error", "status", "stream"]
for key in keys_to_extract:
if key in log_data:
fully_unwrapped = log_data[key]
break
current_log_string = str(fully_unwrapped)
current_step = _extract_current_step(current_log_string)
if current_step:
self.append_log_message(self, build_id, current_log_string, log_type=self._build_logs.COMMAND)
else:
self.append_log_message(self, build_id, current_log_string)
def append_log_message(self, build_id, log_message, log_type=None, log_data=None):
"""
Append the given message to the buildlogs.
log_data adds additional context to the log message.
log_type can be one of: "command", "phase", "error"
If the log_message is an output line of Docker's build output, and not the first line of a RUN command,
log_type should be set to None.
For example, an entry for a phase change might have the following structure:
{
"type": "phase"
"message": "build-scheduled"
"data": {
"datetime": "2020-10-26 05:37:25.932196"
}
}
"""
log_data = log_data or {}
log_data["datetime"] = str(datetime.now())
try:
self._build_logs.append_log_message(build_id, log_message, log_type, log_data)
except Exception as e:
logger.exception("Could not append log to buildlogs for build %s - %s", e, build_id)
def _running_workers(self):
return sum([x.running_builders_count for x in self._ordered_executors])
def _terminate_executor(self, executor_name, execution_id):
"""Cleanup existing running executor running on `executor_name` with `execution_id`."""
executor = self._executor_name_to_executor.get(executor_name)
if executor is None:
logger.error("Could not find registered executor %s to terminate %s", executor_name, execution_id)
return
# Terminate the executor's execution
logger.debug("Terminating executor %s with execution id %s", executor_name, execution_id)
executor.stop_builder(execution_id)
def _write_metric_spec(self, build_id, payload):
metric_key = self._metric_key(build_id)
try:
self._orchestrator.set_key(
metric_key,
payload,
overwrite=False,
expiration=self.machine_max_expiration + 60,
)
except KeyError:
logger.warning(
"Metric already exists in orchestrator for build %s. Build was likely started before and requeued.",
build_id,
)
except (OrchestratorConnectionError, OrchestratorError) as oe:
logger.error("Error when writing metric for build %s to orchestrator: %s", build_id, oe)
def _write_duration_metric(self, metric, build_id, job_status=None):
try:
metric_data = self._orchestrator.get_key(self._metric_key(build_id))
parsed_metric_data = json.loads(metric_data)
start_time = parsed_metric_data["start_time"]
executor = parsed_metric_data.get("executor_name", "unknown")
if job_status is not None:
metric.labels(executor, str(job_status)).observe(time.time() - start_time)
else:
metric.labels(executor).observe(time.time() - start_time)
except Exception:
logger.exception("Could not write metric for build %s", build_id)
def _work_checker(self):
logger.debug("Initializing work checker")
while True:
logger.debug("Writing queue metrics")
self._queue.update_metrics()
with database.CloseForLongOperation(app.config):
time.sleep(WORK_CHECK_TIMEOUT)
logger.debug("Checking for more work from the build queue")
processing_time = EPHEMERAL_SETUP_TIMEOUT + SETUP_LEEWAY_SECONDS
job_item = self._queue.get(processing_time=processing_time, ordering_required=True)
if job_item is None:
logger.debug(
"No additional work found. Going to sleep for %s seconds", WORK_CHECK_TIMEOUT
)
continue
try:
build_job = BuildJob(job_item)
except BuildJobLoadException as bjle:
logger.error(
"BuildJobLoadException. Job data: %s. No | |
- 1:
next_level = list_array[index + 1]['level']
next_cont = list_array[index + 1]['cont']
else:
next_level = None
next_cont = None
# Output
if prev_level is None or level > prev_level:
start = 0 if prev_level is None else prev_level
for current in range(start, level):
gen.append(' ' * current + f'<{starter}>\n')
heap.append(ender)
if current < level - 1:
gen.append(' ' * current + f' <li>\n')
elif prev_level == level:
current = level - 1
if not closed and not cont:
gen.append(' ' * current + ' </li>\n')
elif level < prev_level:
start = prev_level
for current in range(start, level, -1):
ender = heap[-1]
if current != start:
gen.append(' ' * (current - 1) + ' </li>\n')
gen.append(' ' * (current - 1) + f'</{ender}>\n')
heap.pop()
current -= 2
gen.append(' ' * current + ' </li>\n')
# Line
s = ' ' * current + ' '
if cont:
s += '<br>'
else:
s += '<li>'
s += line
if (next_level is None or next_level <= level) and not (next_level == level and next_cont):
s += '</li>\n'
closed = True
#elif next_cont is not None and not next_cont)
else:
s += '\n'
closed = False
gen.append(s)
if len(heap) > 0:
while len(heap) > 0:
ender = heap[-1]
if not closed:
gen.append(' ' * (len(heap) - 1) + ' </li>\n')
gen.append(' ' * (len(heap) - 1) + f'</{ender}>\n')
closed = False
heap.pop()
def process_lines(lines, gen=None):
gen = Generation() if gen is None else gen
if isinstance(lines, str):
lines = [line + '\n' for line in lines.split('\n')]
# The 6 HTML constants are defined in Result class
in_table = False
in_definition_list = False
in_code_free_block = False
in_code_block = False
in_pre_block = False
code_lang = None
# 1st Pass : prefetch links, replace special HTML char, skip comments
# Empty line must be kept to separate lists!
after = []
for line in lines:
# Constant must be read first, are defined once, anywhere in the doc
if line.startswith('!const '):
command, value = process_constvar(line)
if command == 'TITLE':
gen["TITLE"] = value
elif command == 'ENCODING':
gen["ENCODING"] = value
elif command == 'ICON':
gen["ICON"] = value
elif command == 'LANG':
gen["LANG"] = value
elif command == 'BODY_CLASS':
gen["BODY_CLASS"] = value
elif command == 'BODY_ID':
gen["BODY_ID"] = value
else:
raise Exception('Unknown constant: ' + command + 'with value= ' + value)
elif line.startswith('!require ') and super_strip(line).endswith('.css'):
required = super_strip(line.replace('!require ', '', 1))
gen.header_links.append(f' <link href="{required}" rel="stylesheet">\n')
# Inline CSS
elif line.startswith('!css '):
gen.header_css.append(super_strip(line.replace('!css ', '', 1)))
else:
# Block of code
if len(line) > 2 and line[0:3] == '@@@':
if not in_code_free_block:
in_code_free_block = True
else:
in_code_free_block = False
if line.startswith('@@'):
in_code_block = True
else:
in_code_block = False
if not in_code_free_block and not in_code_block:
# Strip
line = super_strip(line)
# Special chars
line = safe(line)
# Link library
if len(line) > 0 and line[0] == '[' and multi_find(line, [']: https://', ']: http://']):
name = line[1:line.find(']: ')]
link = line[line.find(']: ') + len(']: '):]
gen.links[name] = link
continue
# Inner links
if line.find('[#') != -1:
char_index = 0
while char_index < len(line):
char = line[char_index]
prev_char, next_char, prev_prev_char = prev_next(line, char_index)
if char == '[' and next_char == '#' and prev_char != '\\': # [# ... ] inner link
ending = line.find(']', char_index)
if ending != -1:
link_name = line[char_index + 2:ending]
id_link = make_id(link_name)
if id_link in gen.inner_links:
warning(f"Multiple definitions of anchor: {id_link}")
gen.inner_links.append(id_link)
char_index = ending
continue
char_index += 1
# Inner links from Title
nb, title, id_title = find_title(line)
if nb > 0:
gen.inner_links.append(id_title)
after.append(line)
content = after
# Start of output
list_array = []
# 2nd Pass
index = -1
in_code_block = False
in_code_free_block = False
while index < len(content) - 1:
index += 1
line = content[index]
# Next line
if index < len(content) - 2:
next_line = content[index + 1]
else:
next_line = None
# Variables
if line.startswith('!var '):
command, value = process_constvar(line)
if command == 'EXPORT_COMMENT':
if value == 'true':
gen['EXPORT_COMMENT'] = True
elif value == 'false':
gen['EXPORT_COMMENT'] = False
elif command == 'PARAGRAPH_DEFINITION':
if value == 'true':
gen['DEFINITION_AS_PARAGRAPH'] = True
else:
gen['DEFINITION_AS_PARAGRAPH'] = False
elif command == 'DEFAULT_CODE':
if value in RECOGNIZED_LANGUAGES:
gen['DEFAULT_CODE'] = value
else:
warning(f'Not recognized language in var VAR_DEFAULT_CODE: {value}')
elif command == 'NEXT_PAR_ID':
gen['NEXT_PAR_ID'] = value if value != 'reset' else None
elif command == 'NEXT_PAR_CLASS':
gen['NEXT_PAR_CLASS'] = value if value != 'reset' else None
elif command == 'DEFAULT_PAR_CLASS':
gen['DEFAULT_PAR_CLASS'] = value if value != 'reset' else None
elif command == 'NEXT_TAB_CLASS':
gen['NEXT_TAB_CLASS'] = value if value != 'reset' else None
elif command == 'NEXT_TAB_ID':
gen['NEXT_TAB_ID'] = value if value != 'reset' else None
elif command == 'DEFAULT_TAB_CLASS':
gen['DEFAULT_TAB_CLASS'] = value if value != 'reset' else None
elif command == 'DEFAULT_FIND_IMAGE':
gen['DEFAULT_FIND_IMAGE'] = value if value != 'reset' else None
else:
raise Exception('Var unknown: ' + command + ' with value = ' + value)
continue
# Comment
if line.startswith(COMMENT_STARTER):
if gen['EXPORT_COMMENT']:
line = line.replace(COMMENT_STARTER, '<!--', 1) + ' -->'
gen.append(line + '\n')
continue
# Require CSS or JS file
if line.startswith('!require '):
required = line.replace('!require ', '', 1)
if required.endswith('.js'):
gen.append(f' <script src="{required}"></script>\n')
else:
raise Exception("I don't known how to handle this file: " + required)
continue
# Include HTML file
if line.startswith('!include '):
included = line.replace('!include ', '', 1).strip()
if gen.includes is not None:
filepath = None
for file in gen.includes:
if os.path.basename(file) == included:
filepath = file
if filepath is not None:
file = open(filepath, mode='r', encoding='utf8')
file_content = file.read()
file.close()
gen.append(file_content + '\n')
else:
warning(f'Included file {included} not found in includes.')
else:
warning('No included files for generation.')
continue
# Inline HTML
if line.startswith('!html '):
gen.append(line.replace('!html ', '', 1) + '\n')
continue
# HR
if line.startswith('---'):
if line.count('-') == len(line):
gen.append('<hr>\n')
continue
# BR
if line.find(' !! ') != -1:
line = line.replace(' !! ', '<br>')
# Block of pre
if line.startswith('>>'):
if not in_pre_block:
gen.append('<pre>\n')
in_pre_block = True
line = escape(line[2:])
gen.append(line + '\n')
continue
elif in_pre_block:
gen.append('</pre>\n')
in_pre_block = False
# Block of code 1 'code_free_block' (only first and last lines must start with @@@)
if len(line) > 2 and line[0:3] == '@@@':
# Writing start of block
gen.append('<pre class="code">\n')
code_lang = line.replace('@@@', '', 1).strip()
if len(code_lang) == 0:
code_lang = gen['DEFAULT_CODE']
# Finding its limit and processing
sub_index = index + 1
found = None
while sub_index < len(content):
line = content[sub_index]
if len(line) > 2 and line[0:3] == '@@@':
found = sub_index
break
gen.append(write_code(line, code_lang))
sub_index += 1
if not found:
raise Exception(f"No closing @@@ found for block of free code at line {index}")
# Closing block
gen.append('</pre>\n')
index = sub_index
continue
# Block of code 2 'code_block' (each lines must start with @@)
if line.startswith('@@') and (len(super_strip(line)) == 2 or line[2] != '@'):
# Writing start of block
gen.append('<pre class="code">\n')
code_lang = line.replace('@@', '', 1).strip()
if len(code_lang) == 0:
code_lang = gen['DEFAULT_CODE']
# Finding its limit and processing
sub_index = index + 1
found = None
while sub_index < len(content):
line = content[sub_index]
if not line.startswith('@@'):
break
line = line[2:] # remove starting @@
gen.append(write_code(line, code_lang))
sub_index += 1
# Closing block
gen.append('</pre>\n')
index = sub_index
continue
# Div {{#ids .cls}}
if line.startswith('{{') and line.endswith('}}'):
inside = line[2:-2]
if inside == 'end':
gen.append('</div>\n')
else:
cls = ''
ids = ''
state = 'start'
for c in inside:
# state
if c == '.':
state = 'cls'
elif c == ' ':
state = 'start'
elif c == '#':
state = 'ids'
# save
if state == 'cls':
cls += c
elif state == 'ids':
ids += c
if len(cls) > 0 and len(ids) > 0:
gen.append(f'<div id="{ids[1:]}" class="{cls[1:]}">\n')
elif len(cls) > 0:
gen.append(f'<div class="{cls[1:]}">\n')
elif len(ids) > 0:
gen.append(f'<div id="{ids[1:]}">\n')
else:
gen.append(f'<div id="{cls}">\n')
continue
# Bold & Italic & Strikethrough & Underline & Power
if multi_find(line, | |
import os
import platform
import shutil
import tempfile
import warnings
from collections import Counter
from os.path import join as pjoin
from typing import MutableMapping, Optional
import lmdb
import configparser
from . import constants as c
from . import __version__
class TxnRegisterSingleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(TxnRegisterSingleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class TxnRegister(metaclass=TxnRegisterSingleton):
"""Singleton to manage transaction thread safety in lmdb databases.
This is essentailly a reference counting transaction register, lots of room
for improvement here.
"""
def __init__(self):
self.WriterAncestors = Counter()
self.ReaderAncestors = Counter()
self.WriterTxn: MutableMapping[lmdb.Environment, lmdb.Transaction] = {}
self.ReaderTxn: MutableMapping[lmdb.Environment, lmdb.Transaction] = {}
def begin_writer_txn(self, lmdbenv: lmdb.Environment,
buffer: bool = False) -> lmdb.Transaction:
"""Start a write enabled transaction on the given environment
If multiple write transactions are requested for the same handle, only
one instance of the transaction handle will be returened, and will not
close until all operations on that handle have requested to close
Parameters
----------
lmdbenv : lmdb.Environment
the environment to open the transaction on
buffer : bool, optional
if buffer objects should be used (the default is False, which does
not use buffers)
Returns
-------
lmdb.Transaction
transaction handle to perform operations on
"""
if self.WriterAncestors[lmdbenv] == 0:
self.WriterTxn[lmdbenv] = lmdbenv.begin(write=True, buffers=buffer)
self.WriterAncestors[lmdbenv] += 1
return self.WriterTxn[lmdbenv]
def begin_reader_txn(self, lmdbenv: lmdb.Environment,
buffer: bool = False) -> lmdb.Transaction:
"""Start a reader only txn for the given environment
If there a read-only transaction for the same environment already exists
then the same reader txn handle will be returned, and will not close
until all operations on that handle have said they are finished.
Parameters
----------
lmdbenv : lmdb.Environment
the environment to start the transaction in.
buffer : bool, optional
weather a buffer transaction should be used (the default is False,
which means no buffers are returned)
Returns
-------
lmdb.Transaction
handle to the lmdb transaction.
"""
if self.ReaderAncestors[lmdbenv] == 0:
self.ReaderTxn[lmdbenv] = lmdbenv.begin(write=False, buffers=buffer)
self.ReaderAncestors[lmdbenv] += 1
return self.ReaderTxn[lmdbenv]
def commit_writer_txn(self, lmdbenv: lmdb.Environment) -> bool:
"""Commit changes made in a write-enable transaction handle
As multiple objects can have references to the same open transaction handle,
the data is not actually committed until all open transactions have called
the commit method.
Parameters
----------
lmdbenv : lmdb.Environment
the environment handle used to open the transaction
Raises
------
RuntimeError
If the internal reference counting gets out of sync
Returns
-------
bool
True if this operation actually committed, otherwise false
if other objects have references to the same (open) handle
"""
ancestors = self.WriterAncestors[lmdbenv]
if ancestors == 0:
msg = f'hash ancestors are zero but commit called on {lmdbenv}'
raise RuntimeError(msg)
elif ancestors == 1:
self.WriterTxn[lmdbenv].commit()
self.WriterTxn.__delitem__(lmdbenv)
ret = True
else:
ret = False
self.WriterAncestors[lmdbenv] -= 1
return ret
def abort_reader_txn(self, lmdbenv: lmdb.Environment) -> bool:
"""Request to close a read-only transaction handle
As multiple objects can have references to the same open transaction
handle, the transaction is not actuall aborted until all open transactions
have called the abort method
Parameters
----------
lmdbenv : lmdb.Environment
the environment handle used to open the transaction
Raises
------
RuntimeError
If the internal reference counting gets out of sync.
Returns
-------
bool
True if this operation actually aborted the transaction,
otherwise False if other objects have references to the same (open)
handle.
"""
ancestors = self.ReaderAncestors[lmdbenv]
if ancestors == 0:
raise RuntimeError(f'hash ancestors are zero but abort called')
elif ancestors == 1:
self.ReaderTxn[lmdbenv].abort()
self.ReaderTxn.__delitem__(lmdbenv)
ret = True
else:
ret = False
self.ReaderAncestors[lmdbenv] -= 1
return ret
"""
Todo, refactor to avoid the need for these imports to be below TxnRegister,
if they aren't right now, we get circular imports...
"""
from .records import commiting, heads, parsing, vcompat # noqa: E402
from .utils import readme_contents # noqa: E402
class Environments(object):
def __init__(self, pth: os.PathLike):
self.repo_path: os.PathLike = pth
self.refenv: Optional[lmdb.Environment] = None
self.hashenv: Optional[lmdb.Environment] = None
self.stageenv: Optional[lmdb.Environment] = None
self.branchenv: Optional[lmdb.Environment] = None
self.labelenv: Optional[lmdb.Environment] = None
self.stagehashenv: Optional[lmdb.Environment] = None
self.cmtenv: MutableMapping[str, lmdb.Environment] = {}
self._startup()
@property
def repo_is_initialized(self) -> bool:
"""Property to check if the repository is initialized, read-only attribute
Returns
-------
bool
True if repo environments are initialized, False otherwise
"""
ret = True if isinstance(self.refenv, lmdb.Environment) else False
return ret
def _startup(self) -> bool:
"""When first access to the Repo starts, attempt to open the db envs.
This function is designed to fail if a repository does not exist at the
:py:attribute:`repo_path` which is specified, so the user can
explicitly choose to initialize the repo. Once opened, the lmdb
environments should not be closed until the program terminates.
Returns
-------
bool False if no repository exists at the given path, otherwise True
Warns
-----
UserWarning Should the repository not exist at the provided repo path.
Raises
------
RuntimeError If the repository version is not compatible with the
current software.
"""
if not os.path.isfile(pjoin(self.repo_path, c.LMDB_BRANCH_NAME)):
msg = f'No repository exists at {self.repo_path}, please use `repo.init()` method'
warnings.warn(msg, UserWarning)
return False
repo_ver = vcompat.startup_check_repo_version(self.repo_path)
curr_ver = parsing.repo_version_raw_spec_from_raw_string(v_str=__version__)
if not vcompat.is_repo_software_version_compatible(repo_ver, curr_ver):
msg = f'repository written version: {repo_ver} is not comatible '\
f'with the current Hangar software version: {curr_ver}'
raise RuntimeError(msg)
self._open_environments()
return True
def _init_repo(self,
user_name: str,
user_email: str,
description: str = None,
remove_old: bool = False) -> os.PathLike:
"""Create a new hangar repositiory at the specified environment path.
Parameters
----------
user_name : str
Name of the repository user.
user_email : str
Email address of the respository user.
remove_old : bool, optional(default value = False)
DEVELOPER USE ONLY --- Remove all data and records stored in the
repository if this opetion is enabled, defaults to False.
Returns
-------
os.PathLike
The path to the newly created repository on disk.
Raises
------
OSError
If a hangar repository exists at the specified path, and `remove_old`
was not set to ``True``.
"""
if os.path.isfile(pjoin(self.repo_path, c.LMDB_BRANCH_NAME)):
if remove_old is True:
shutil.rmtree(self.repo_path)
else:
raise OSError(f'Hangar Directory: {self.repo_path} already exists')
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_STORE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_STAGE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_REMOTE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA))
print(f'Hangar Repo initialized at: {self.repo_path}')
if description:
userConf = {'name': user_name, 'email': user_email, 'description': description}
else:
userConf = {'name': user_name, 'email': user_email}
CFG = configparser.ConfigParser()
CFG.read_dict(userConf)
with open(pjoin(self.repo_path, c.CONFIG_USER_NAME), 'w') as f:
CFG.write(f)
readmeTxt = readme_contents(user_name, user_email, description)
with open(pjoin(self.repo_path, c.README_FILE_NAME), 'w') as f:
f.write(readmeTxt.getvalue())
self._open_environments()
vcompat.set_repository_software_version(branchenv=self.branchenv, ver_str=__version__)
heads.create_branch(self.branchenv, 'master', '')
heads.set_staging_branch_head(self.branchenv, 'master')
return self.repo_path
def checkout_commit(self, branch_name: str = '', commit: str = '') -> str:
"""Set up db environment with unpacked commit ref records.
Parameters
----------
repo_pth : str
path to the repository directory on the local disk
branch_name : str, optional
name of the branch to read, defaults to ''
commit : str, optional
name of the commit to read, defaults to ''
Returns
-------
str
commit hash which was checked out
"""
if commit != '':
commit_hash = commit
txt = f' * Checking out COMMIT: {commit_hash}'
elif branch_name != '':
commit_hash = heads.get_branch_head_commit(self.branchenv, branch_name)
txt = f' * Checking out BRANCH: {branch_name} with current HEAD: {commit_hash}'
else:
head_branch = heads.get_staging_branch_head(self.branchenv)
commit_hash = heads.get_branch_head_commit(self.branchenv, head_branch)
txt = f'\n Neither BRANCH or COMMIT specified.'\
f'\n * Checking out writing HEAD BRANCH: {head_branch}'
print(txt)
# On UNIX-like system, an open process still retains ability to
# interact with disk space allocated to a file when it is removed from
# disk. Windows does not, and will not allow file to be removed if a
# process is interacting with it. While the CM form is cleaner, this
# hack allows similar usage on Windows platforms.
if platform.system() != 'Windows':
with tempfile.TemporaryDirectory() as tempD:
tmpDF = os.path.join(tempD, f'{commit_hash}.lmdb')
tmpDB = lmdb.open(path=tmpDF, **c.LMDB_SETTINGS)
commiting.unpack_commit_ref(self.refenv, tmpDB, commit_hash)
self.cmtenv[commit_hash] = tmpDB
else:
tempD = tempfile.mkdtemp()
tmpDF = os.path.join(tempD, f'{commit_hash}.lmdb')
tmpDB = lmdb.open(path=tmpDF, **c.LMDB_SETTINGS)
commiting.unpack_commit_ref(self.refenv, tmpDB, commit_hash)
self.cmtenv[commit_hash] = tmpDB
return commit_hash
def _open_environments(self):
"""Open the standard lmdb databases at the repo path.
If any commits are checked out (in an unpacked state), read those in as
well.
"""
ref_pth = pjoin(self.repo_path, c.LMDB_REF_NAME)
hash_pth = pjoin(self.repo_path, c.LMDB_HASH_NAME)
stage_pth = pjoin(self.repo_path, c.LMDB_STAGE_REF_NAME)
branch_pth = pjoin(self.repo_path, c.LMDB_BRANCH_NAME)
label_pth = | |
int
AutoCADOfflineHelpNotInstalled_UseOnlineHelpButon: int
AutoCADOnlineHelpNotAccessible: int
AutoCADOnlineHelpNotAccessible_TryConnectInternetButon: int
AutoCADOnlineHelpNotAccessible_UseOfflineHelpButon: int
AutoCADParametricExpression: int
AutoCADParametricExpression_ParametricExpression_CancelButon: int
AutoCADParametricExpression_ParametricExpression_ContinueButon: int
AutoCADRelaxDragging: int
AutoCADRelaxDragging_DeleteButon: int
AutoCADRelaxDragging_RelaxButon: int
AutoCADRenderScaleSettings: int
AutoCADRenderScaleSettings_CancelRenderButon: int
AutoCADRenderScaleSettings_ConvertToMetersButon: int
AutoCADSignOut: int
AutoCADSubtractSurface: int
AutoCADSubtractSurface_SubtractSurface_CancelButon: int
AutoCADSubtractSurface_SubtractSurface_ContinueButon: int
AutoCADTooManyControlVertices: int
AutoCADTooManyControlVertices_TooManyControlVertices_CancelButon: int
AutoCADTooManyControlVertices_TooManyControlVertices_ContinueButon: int
AutoCADUnionSurface: int
AutoCADUnionSurface_UnionSurface_CancelButon: int
AutoCADUnionSurface_UnionSurface_ContinueButon: int
AutoCADUnsupportedObjects: int
AutoCADUnsupportedObjects_DoNotShowUnsupportedObjects_ButtonButon: int
AutoCADUnsupportedObjects_ShowBoundingBox_ButtonButon: int
AutoCADUnsupportedObjects_ShowObjectsIfAvailable_ButtonButon: int
AutoCADXrefLockFail: int
AutodeskMismatchGeoCS: int
AutodeskRemoveSectionJogs: int
AutodeskReqVersionNoOpen: int
AutodeskReqVersionOpenForWrite: int
AutodeskReqVersionReadOnly: int
AutodeskSharedCloudFile: int
AutoLISPLoadSettings: int
BatchStandardsCheckerReportFile: int
BEDITConstraintsFound: int
BlockCircularReference: int
BlockCircularReferenceToTable: int
BoundaryBoundaryDefinitionError: int
ColorDlgMissingColorBook: int
ConstraintDynamicBlockGripsWillBeHidden: int
ConstraintNonAssociativeSelection: int
ConstraintNonDimConstraintSelection: int
ConstraintOverConstraint: int
ConstraintWouldOverContrain: int
ConvertHatchObjectsWarning: int
CUICannotCopyNestedToolbarFlyouts: int
CUIDeleteReferencedImage: int
CUIDeleteUnreferencedImage: int
CUIFoldPanelContents: int
CUIImageNameAlreadyExist: int
CUIImageNameInvalid: int
CUIReset: int
CUIReset_CUIResetCancelButon: int
CUIReset_CUIResetContinueButon: int
CUIRestoreBackup: int
CUIRestoreBackup_CUIRestoreBackupCancelButon: int
CUIRestoreBackup_CUIRestoreBackupContinueButon: int
CustomizationComfirmCopytoRibbonPanels: int
CustomizationSaveChanges: int
CustomizationUndefinedObjectType: int
CustomizationUnsavedCUIChanges: int
DGN3DSeedFileRequired: int
DGNIncompatibleSeedFile: int
DGNIncompatibleSeedFile_SelectAnotherDGNExportButon: int
DGNIncompatibleSeedFile_SelectAnotherSeedButon: int
DGNInvalidDGNFile: int
DGNNoDesignModelsFound: int
DGNUIConfirmMappingRemoval: int
DgnUIDGNImportUnsupportObjects: int
DGNUIIncompatibleSeedFileSettings: int
DGNUIIncompatibleSeedFileSettings_ContinueExportButon: int
DGNUIInvalidPropertyName: int
DGNUIInvalidSeedFile: int
DGNUINumberOfElementsExceededLimit: int
DgnUIUnsupportDGNExportObjects: int
DigitalSignaturesUnsupportedDrawingFormat: int
DimensionFrozenLayer: int
DimensionNoDimensionsSelected: int
DimMLeaderStyleRedefineStyle: int
DimMLeaderStyleRedefineStyle_DoNotRedefineStyleButon: int
DimMLeaderStyleRedefineStyle_RedefineStyleButon: int
DrawingOpenForeignDWGFile: int
DrawingSaveAsAcModelDoc: int
DwgAidsRestoreAllContexts: int
DwgAidsRestoreClassicColors: int
DwgAidsRestoreCurrentContext: int
DwgAidsRestoreCurrentContext_RestoreCurrentContextButon: int
DWGRecoveryDamagedFile: int
DWGRecoveryDrawingRecovery: int
DWGRecoveryErrorsFound: int
DWGRecoveryRecoverSummary: int
EnterpriseWorkspaceCannotSaveChanges: int
ExportLayoutFileCreated: int
ExportLayoutFileCreatedSDI: int
ExportSelectWindow: int
ExportSelectWindowSuccessful: int
FBXExportMissingTextures: int
FBXExportNoEntitiesSelected: int
FBXExportNothingToExport: int
FBXExportUnsupportedObjects: int
FBXImportCancel: int
FBXImportFileNotFound: int
FBXImportNoEntitiesSelected: int
FBXImportOptionsDialogBoxInvalidEntry: int
FBXImportOptionsInsertAndCameras: int
FBXImportProcessingFile: int
FBXImportTextureNotFound: int
FBXImportUnsupportedFile: int
HatchBoundaryDefinitionErrorNRC: int
HatchBoundaryDefinitionErrorRC: int
HatchDenseHatchCreation: int
HatchDrawingHasLargeHatches: int
HatchDuplicatePatternSelected: int
HatchDuplicatePatternSelected_OverwriteButon: int
HatchDuplicatePatternSelected_SkipButon: int
HatchFrozenLayer: int
HatchInvalidPatternSelected: int
HatchInvalidPatternSelected_CancelButon: int
HatchInvalidPatternSelected_RevealInFinderButon: int
HatchInvalidPatternSelectedSandboxed: int
HatchInvalidPatternSelectedSandboxed_RemoveButon: int
HatchOpenBoundary: int
LayerManagerCannotAdjustPlotSetting: int
LayerManagerCannotMakeCurrent: int
LayerManagerCurrentLayerOff: int
LayerManagerCurrentLayerOffMac: int
LayerManagerDeletedBlockRefs: int
LayerManagerDeleteGroupWarning: int
LayerManagerDeleteGroupWarning_DeleteGroupAndLayersButon: int
LayerManagerDeleteGroupWarning_DeleteGroupWithoutLayersButon: int
LayerManagerExcessLayerFilters: int
LayerManagerHideSystemGroup: int
LayerManagerLayerCannotFreeze: int
LayerManagerLayerCannotFreezeMac: int
LayerManagerLayerDeleteMac: int
LayerManagerLayerDeleteMac_Calcel_DeleteButon: int
LayerManagerLayerDeleteMac_DeleteLayerAndMoveObjectsButon: int
LayerManagerLayerDeleteMac_DeleteLayerAndObjectsButon: int
LayerManagerLayerRename: int
LayerManagerMultipleDeleteConfirmation: int
LayerManagerMutipleLayerNotDeleted: int
LayerManagerNewLayerFilteredWarning: int
LayerManagerNoMatchingLayers: int
LayerManagerNoMatchingLayers_CreateGroupAnywayButon: int
LayerManagerNoMatchingLayers_EditTheGroupButon: int
LayerManagerShowMessageClose: int
LayerManagerShowMessageYesNo: int
LayerManagerSingleDeleteConfirmation: int
LayerManagerSingleLayerNotDeleted: int
LayerManagerUnableToModifyLayersWarning: int
LayerToolsIncompatibleVisualStyle: int
LayerWalkLayerStatesChange: int
LinetypeReloadLinetype: int
MainFrameCommandLineHideWindow: int
MaterialDeleteNestedMaps: int
MaterialsDesynchronizeMaps: int
MaterialsLibraryInUse: int
MaterialsMaterialInUseOnLockedLayer: int
MaterialsSynchronizeMaps: int
MaterialUIMaterialInUse: int
MaterialUIMigrateMaterial: int
MTEUnknownTextFonts: int
MTEXTAutoStackProperties1: int
MTEXTAutoStackProperties1WithoutDoNotShow: int
MTEXTAutoStackProperties2: int
MTextStyleChange: int
MTextUnsavedChanges: int
MTextWarningAboutBullets: int
MTextWarningAboutPasting: int
NavToolsNeedGreaterEqualNumber: int
NavToolsNeedGreaterNumber: int
NavToolsNeedNumInRange: int
ObjectNameNoFolderAccess: int
OPMAcPEXCtlCannotModifyProperty: int
OPMAcPEXCtlPatternNameNotFound: int
OpmNoObjectsFound: int
OptionOnlineTabDisableCloudDocuments: int
PhotoViewerFileNotFound: int
PhotoViewerFileOrFolderNotFound: int
PhotoViewerInvalidFileFormat: int
PhotoViewerRemoveImage: int
PlotAndPublishCancelEntireJob: int
PlotAndPublishCancelEntireSheet: int
PlotBatchPlotFromPlot: int
PlotBatchPlotFromPlot_BatchPlotFromPlot_CancelButon: int
PlotBatchPlotFromPlot_BatchPlotFromPlot_ContinueButon: int
PlotBatchPlotFromPlot_BatchPlotFromPlot_LearnMoreButon: int
PlotGuiPlotModelSpaceAlert: int
PlotGuiPlotModelSpaceAlert_TaskPlotDialogButton_181Buton: int
PlotGuiPlotModelSpaceAlert_TaskPlotDialogButton_182Buton: int
PlotPaperSizeNotFound: int
PlotProcessingBackgroundJob: int
PlotShadePlot: int
Printing3DNoInternetConn: int
Printing3DObjectsOnLockedLayer: int
Printing3DPrepareModel: int
PropertiesObjectsMoveToFrozenOrOffLayers: int
PropertiesObjectsOnLockedLayers: int
PulishNoPreset: int
PulishPresetIllegalChar: int
PulishPresetInvalidName: int
PulishPresetInvalidResolution: int
PulishPresetNameExist: int
PulishSaveDSD: int
QPDockToRibbon: int
QPOffPanelWarning: int
QPRemoveUndefObjType: int
QPRestoreDefault: int
QPRestoreDefault_KeepQPCustomButon: int
QPRestoreDefault_RestorQPeSettingsButon: int
QPSwitchToFloatMode: int
QPSyncWithTooptip: int
QVDrawingCloseAllOtherDrawings: int
QVDrawingCloseReadOnly: int
RecoverAllWarning: int
RenderIBLWarnSetBackgroundWithDisplayImageOn: int
RenderNoFacesToRender: int
RenderOutOfMemory: int
RenderSoftOutOfMemory: int
RibbonUnableToAddControlIntoQAT: int
RibbonUnableToAddSeparatorIntoQAT: int
RibbonUnableToRemoveFromQAT: int
RollOverRestoreDefault: int
RollOverRestoreDefault_KeepCustomButon: int
RollOverRestoreDefault_RestoreSettingsButon: int
ScaleListLargeScaleAlert: int
ScaleListResetScaleList: int
SceneUIPhotometricDistantLights: int
SceneUISunlightAndExposure: int
SceneUIViewportLightingMode: int
SecurityStartAutoCADInAdminMode: int
SecurityWritablePathWarning: int
SecurityWritablePathWarning_ContinueButon: int
SeekFileNameTooLong: int
seekSaveChanges: int
seekSaveChanges_TranslateAndSave1Buton: int
seekSaveChanges_TranslateAndSave2Buton: int
seekSaveFile: int
seekSaveFile_SaveFileButon: int
seekWebsiteNotAvailable: int
SheetSetManagerConfirmChanges: int
SheetSetManagerConfirmChangesForGroups: int
SheetSetManagerDrawingFileNotFound: int
SheetSetManagerLostSetAssociation: int
ShowMotionDeleteAllView: int
ShowMotionQuickViewDeleteCategoryViews: int
ShowMotionQuickViewRenameError: int
ShowMotionUpdateAll: int
SpellerCheckLayersLocked: int
StandardCloseReadOnly: int
StandardDeleteConfirmation: int
StandardDuplicateNameCopyOverwrite: int
StandardDuplicateNameError: int
StandardDuplicateNameReplaceCancel: int
StandardFileAlreadyExists: int
StandardFileAlreadyExists_RenameButon: int
StandardFileAlreadyExists_ReplaceButon: int
StandardFileConfirmationWithNoToAll: int
StandardFileConfirmationWithoutNoToAll: int
StandardFileInUse: int
StandardFileNotFound: int
StandardFilePathTooLong: int
StandardInvalidNameEmptyName: int
StandardInvalidNameTooLong: int
StandardInvalidNameUnsupportedCharacters: int
StandardInvalidPath: int
StandardInValidPropertyName: int
StandardObjectTypeCannotBeDeleted: int
StandardObjectUnsupportCharacters: int
StandardOffsetObjectIsNotPlanar: int
StandardPathOrFileNameNotSpecified: int
StandardWriteProtectedFile: int
SurfaceCVEditing: int
TextFindBlockRef: int
TextFindBlockRef_ReplaceAllButon: int
TextFindBlockRef_ReplaceThisButon: int
TextFindBlockRef_SkipButon: int
UnitsInsertUnits: int
UnitsRenderEngine: int
VMToolsUIOverwriteAnimationFrames: int
VMToolsUIWalkFlyToPerspectiveView: int
def __init__(self) -> AcadTaskDialogs:...
@staticmethod
def ShowCurrentLayerOff() -> int:...
@staticmethod
def ShowDuplicateNameCopyReplaceTD(objectname: str, duplicatename: str, copyname: str, pParentWnd: _n_8_t_0) -> int:...
@staticmethod
def ShowDuplicateNameErrorTD(objectname: str, duplicatename: str, pParentWnd: _n_8_t_0) -> int:...
@staticmethod
def ShowDuplicateNameReplaceCancelTD(objectname: str, duplicatename: str, pParentWnd: _n_8_t_0) -> int:...
@staticmethod
def ShowFileConfirmationWithNoToAll(featureName: str, featureType: str, fullFileName: str, fileName: str, pParentWnd: _n_8_t_0) -> int:...
@staticmethod
def ShowFileConfirmationWithoutNoToAll(featureName: str, featureType: str, fullFileName: str, fileName: str, pParentWnd: _n_8_t_0) -> int:...
@staticmethod
def ShowInvalidNameEmptyNameTD(objectname: str, fieldname: str, pParentWnd: _n_8_t_0):...
@staticmethod
def ShowInvalidNameTooLongTD(objectname: str, fieldname: str, pParentWnd: _n_8_t_0):...
@staticmethod
def ShowInvalidNameUnsupportedCharactersTD(objectname: str, fieldname: str, pParentWnd: _n_8_t_0):...
@staticmethod
def ShowLayerCannotFreeze():...
@staticmethod
def Source() -> _n_8_t_1:...
class AcAeUtilities(object):
def __init__(self) -> AcAeUtilities:...
@staticmethod
def GetBeditConstraintColor(constraintType: AcAeUtilities.ConstraintType) -> _n_1_t_0:...
@staticmethod
def GetBlockName() -> str:...
@staticmethod
def GetCurrentVisibilityStateName() -> str:...
@staticmethod
def GetVisibilitySets(visibilities: _n_10_t_0[str]):...
@staticmethod
def IsAuthorPaletteVisible() -> bool:...
@staticmethod
def IsInBlockEditor() -> bool:...
@staticmethod
def IsInBVMode() -> bool:...
@staticmethod
def IsVisibilityParameterPresent() -> bool:...
@staticmethod
def LearnBlockEditor():...
@staticmethod
def PickFirstBeforeInvokeBvHide() -> bool:...
@staticmethod
def PickFirstBeforeInvokeBvShow() -> bool:...
@staticmethod
def SetBeditConstraintColor(constraintType: AcAeUtilities.ConstraintType, color: _n_1_t_0):...
@staticmethod
def ShowAuthorPalette(bShow: bool):...
class ConstraintType(_n_8_t_2, _n_8_t_3, _n_8_t_4, _n_8_t_5):
FullyConstrained: int
OverConstrained: int
PartiallyConstrained: int
Unconstrained: int
value__: int
class AcDownloadCallback(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> AcDownloadCallback:...
def BeginInvoke(self, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self):...
class AcNavisworksProtocolAdapter(_n_8_t_10):
pass
class AcNavisworksService(INavisworksService, _n_8_t_11):
def __init__(self) -> AcNavisworksService:...
class ActiveThemeColor(object):
@property
def CurrentTheme(self) -> ColorThemeEnum:"""CurrentTheme { get; } -> ColorThemeEnum"""
@property
def InspectorBGHighlight(self) -> _n_14_t_0:"""InspectorBGHighlight { get; } -> Color"""
@property
def InspectorItem(self) -> _n_14_t_0:"""InspectorItem { get; } -> Color"""
@property
def PaletteBackground(self) -> _n_14_t_0:"""PaletteBackground { get; } -> Color"""
@property
def ColorThemeChanged(self) -> ColorThemeChangedEventHandler:
"""ColorThemeChanged Event: ColorThemeChangedEventHandler"""
@staticmethod
def Instance() -> ActiveThemeColor:...
class AppLoaderDownloadUtils(object):
def __init__(self) -> AppLoaderDownloadUtils:...
@staticmethod
def CancelDownloadJob(nToken: _n_8_t_12):...
class AppMenuUtil(object):
def __init__(self) -> AppMenuUtil:...
@staticmethod
def IsApplicationRegistered(appName: str) -> bool:...
@staticmethod
def OpenDocument(fileName: str):...
class AttachUtil(_n_8_t_11):
def __init__(self) -> AttachUtil:...
@staticmethod
def CheckDwgFile(csDwgPath: str) -> bool:...
@staticmethod
def CheckImageFile(csImagePath: str) -> bool:...
@staticmethod
def CmdLineImageAttach(strImageFile: str):...
@staticmethod
def CmdLineNavisworksAttach(strNavisworksFile: str):...
@staticmethod
def CmdLinePointCloudAttach(strPointCloudFile: str):...
@staticmethod
def CmdLineXAttach(strDWGFile: str) -> bool:...
@staticmethod
def GetImageFileExtensions() -> str:...
@staticmethod
def GetImageFilterString() -> str:...
@staticmethod
def GetOpenFilesResult(opt: _n_3_t_0, multiSelArray: _n_8_t_13[str]) -> _n_8_t_13[str]:...
@staticmethod
def ImageAdjust(objIds: _n_8_t_13[_n_2_t_0]):...
@staticmethod
def ImageAttach(strImageFile: str):...
@staticmethod
def ImageClip(objId: _n_2_t_0):...
@staticmethod
def isDialogShow() -> bool:...
@staticmethod
def IsPCAttachAllowed() -> bool:...
@staticmethod
def LoadBIMUnderlayArx():...
@staticmethod
def LoadBIMUnderlayCrx():...
@staticmethod
def LoadISM():...
@staticmethod
def LoadPointCloudArx():...
@staticmethod
def LoadPointCloudCrx():...
@staticmethod
def NavisworksAttach(strNavisworksFile: str):...
@staticmethod
def PointCloudAttach(strPointCloudFile: str):...
@staticmethod
def PointCloudClip(objId: _n_2_t_0):...
@staticmethod
def VPClip():...
@staticmethod
def XAttach(strDWGFileArray: _n_8_t_13[str]):...
@staticmethod
def XClip():...
class CipUtils(object):
def getMacroString(self, macro: str) -> str:...
@staticmethod
def Instance() -> CipUtils:...
def IsOperational(self) -> bool:...
def LogApplicationMenuCommandExecute(self, id: str, sCmd: str):...
def LogIcLaunch(self, icType: int, group: str, category: str, title: str, url: str):...
def LogIcQuery(self, queryString: str, buttonIndex: int):...
def LogModelessLayerItem(self, cmdStr: str, bInRibbon: bool):...
def LogQuickAccessToolbarCommandExecute(self, id: str, sCmd: str):...
def LogRibbonItemCommandExecute(self, sCmd: str, sTabName: str, sPanelName: str, bMenuMacro: bool, dockSide: int):...
def LogStatusBarElementVisibility(self, elementName: str, isVisible: bool):...
def SetUaLaunchType(self, uaLaunchType: str):...
def WaypointReachedWithStringAtt(self, waypoint: str, state: str, att: str, strAttValue: str):...
class CloudPrintingServiceManager(object):
@property
def Host(self) -> ICloudPrintingService:"""Host { get; set; } -> ICloudPrintingService"""
def __init__(self) -> CloudPrintingServiceManager:...
class ColorThemeChangedEventHandler(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> ColorThemeChangedEventHandler:...
def BeginInvoke(self, sender: object, e: _n_8_t_14, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self, sender: object, e: _n_8_t_14):...
class ColorThemeEnum(_n_8_t_2, _n_8_t_3, _n_8_t_4, _n_8_t_5):
Dark: int
Light: int
User: int
value__: int
class ComboBoxWrapper(_n_13_t_0, _n_21_t_0IOleControl, _n_21_t_0IOleObject, _n_21_t_0IOleInPlaceObject, _n_21_t_0IOleInPlaceActiveObject, _n_21_t_0IOleWindow, _n_21_t_0IViewObject, _n_21_t_0IViewObject2, _n_21_t_0IPersist, _n_21_t_0IPersistStreamInit, _n_21_t_0IPersistPropertyBag, _n_21_t_0IPersistStorage, _n_21_t_0IQuickActivate, _n_21_t_1, _n_21_t_2, _n_13_t_1, _n_21_t_3, _n_22_t_0, _n_21_t_4):
@property
def SelectionChanged(self) -> _n_8_t_15:
"""SelectionChanged Event: EventHandler"""
class CommandCallback(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> CommandCallback:...
def BeginInvoke(self, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self):...
class CommandPiper(_n_8_t_11):
@property
def BlockingCommands(self) -> int:"""BlockingCommands { get; set; } -> int"""
@property
def LayerCount(self) -> int:"""LayerCount { get; set; } -> int"""
@property
def QueueCount(self) -> int:"""QueueCount { get; } -> int"""
def __init__(self, layerMgrCtrlId: int, bInRibbon: bool) -> CommandPiper:...
def __init__(self) -> CommandPiper:...
@staticmethod
def CommandsSentButNotStartedClear():...
@staticmethod
def CommandWillStart(layerMgrCtrlId: int) -> bool:...
@staticmethod
def GetRegenLayers(layerIds: _n_2_t_1) -> int:...
@staticmethod
def IsBlockingCommand(cmdName: str) -> bool:...
def LayerClose(self):...
def LayerColor(self, value: _n_1_t_0, layerName: str) -> bool:...
def LayerColor(self, value: _n_1_t_0, layerNames: _n_9_t_0) -> bool:...
def LayerCurrent(self, layerName: str):...
def LayerDelete(self, layerName: str):...
def LayerDelete(self, layerNames: _n_9_t_0):...
def LayerDescription(self, value: str, existingValueEmpty: bool, layerName: str):...
def LayerDescription(self, value: str, existingValueEmpty: | |
# C2SMART Lab, NYU
# NCHRP 03-137
# @file DRAC_Calculation_Offline.py
# @author <NAME>
# @author <NAME>
# @date 2020-10-18
import pandas as pd
import numpy as np
from shapely.geometry import Polygon
import math
import time
import multiprocessing as mp
from itertools import repeat
from scipy import spatial
def frange(start, stop=None, step=None):
"""Returns the range by float numbers."""
if stop == None:
stop = start + 0.0
start = 0.0
if step == None:
step = 1.0
while True:
if step > 0 and start >= stop:
break
elif step < 0 and start <= stop:
break
yield ("%g" % start) # return float number
start = start + step
def dist(x1, y1, x2, y2):
"""
Returns the euclidean distance.
Keyword arguments:
>>> x1: float value for X for first point (ft.)
>>> y1: float value for Y for first point (ft.)
>>> x2: float value for X for 2nd point (ft.)
>>> y2: float value for Y for 2nd point (ft.)
RETURN: The euclidean distance(float, ft.).
"""
return float("{:.6f}".format(math.sqrt((x2-x1) ** 2 + (y2 - y1) ** 2)))
def get_heading(x1, y1, x2, y2):
"""
Returns the Heading based on two points
Keyword arguments:
>>> x1: Float value for X for first point (ft.)
>>> y1: Float value for Y for first point (ft.)
>>> x2: Float value for X for 2nd point (ft.)
>>> y2: Float value for Y for 2nd point (ft.)
RETURN: The new heading value(float).
"""
heading = 0
dx = x2 - x1
dy = y2 - y1
if dx != 0:
heading = float("{:.6f}".format((90 - math.degrees(math.atan2(dy, dx)) + 360) % 360))
elif dy > 0:
heading = 0
elif dy < 0:
heading = 180
return heading
def ttc_location(data_check, distance, start_time):
"""
Returns TTCmax Location (Please see the document for the detailed definition).
Keyword arguments:
>>> data_check: The working data frame selected from the main data frame.
>>> distance: The projecting distance based on the current speed (ft.).
>>> start_time: The time stamp of the processing step.
RETURN: TTCmax point X, TTCmax point Y, the nearest time stamp before the TTCmax location projected,
heading of the vehicle at the TTCmax point.
"""
# Start with jump 0.1 sec
dist1 = distance
Start_X = data_check.at[0, 'X']
Start_Y = data_check.at[0, 'Y']
TTC_X = np.NaN
TTC_Y = np.NaN
Heading = np.NaN
for i in range(len(data_check)-1):
Check_X = data_check.at[i + 1, 'X']
Check_Y = data_check.at[i + 1, 'Y']
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
if dist2 <= dist1:
dist1 = dist1 - dist2
Start_X = Check_X
Start_Y = Check_Y
start_time = float("{:.1f}".format(start_time + 0.1))
pass
else:
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
TTC_X = Start_X + dist1 * math.cos(rad)
TTC_Y = Start_Y + dist1 * math.sin(rad)
start_time = float("{:.1f}".format(start_time + 0.1))
break
return [TTC_X, TTC_Y, float("{:.1f}".format(start_time - 0.1)), Heading]
def ttc_location_online(data_check, distance, start_time):
"""
Returns TTCmax Location (Please see the document for the detailed definition) without projections potential trajectory.
This is the online version, can be used for single step length updating process.
Replace all the function [ttc_location] for the online version.
Keyword arguments:
>>> data_check: The working data frame selected from the main data frame.
>>> distance: The projecting distance based on the current speed (ft.).
>>> start_time: The time stamp of the processing step.
RETURN: TTCmax point X, TTCmax point Y, the nearest time stamp before the TTCmax location projected,
heading of the vehicle at the TTCmax point.
"""
dist1 = distance
Start_X = data_check.at[0, 'X']
Start_Y = data_check.at[0, 'Y']
Check_X = data_check.at[1, 'X']
Check_Y = data_check.at[1, 'Y']
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
TTC_X = Start_X + dist1 * math.cos(rad)
TTC_Y = Start_Y + dist1 * math.sin(rad)
start_time = float("{:.1f}".format(start_time + 0.1))
return [TTC_X, TTC_Y, float("{:.1f}".format(start_time - 0.1)), Heading]
def overlap(shape1, shape2):
"""
Checking overlap of two shapes.
Keyword arguments:
>>> shape1: list of for corners of vehicle1, sort by TL_x, TL_y, TR_x, TR_y, BL_x, BL_y, BR_x, BR_y
>>> shape2: list of for corners of vehicle2, sort by TL_x, TL_y, TR_x, TR_y, BL_x, BL_y, BR_x, BR_y
RETURN: True or False
"""
p1 = Polygon([(shape1[0], shape1[1]), (shape1[2], shape1[3]), (shape1[4], shape1[5]), (shape1[6], shape1[7])])
p2 = Polygon([(shape2[0], shape2[1]), (shape2[2], shape2[3]), (shape2[4], shape2[5]), (shape2[6], shape2[7])])
return p1.intersects(p2)
def rectangular(x, y, length, width, angle, style):
"""Returns the coordinates of the four points of a vehicle (rectangular) given the center of the front bumper.
Keyword arguments:
>>> x: X of the reference point (ft.).
>>> y: Y of the reference point (ft.).
>>> length: Length of the vehicle (ft.).
>>> width: Width of the vehicle (ft.).
>>> angle: Heading of the vehicle.
>>> style: Using front bumper or centroid as reference point (1:front bumper; 2: centroid)
RETURN: Top-Left-x, Top-Left-y, Top-Right-x, Top-Right-y, Bottom-Left-x, Bottom-Left-y, Bottom-Right-x, Bottom-Right-y
"""
if style == 1:
# Radian of heading
rad = math.pi / 2 - math.radians(angle)
# Radian of 90 degree
rad90 = math.atan2(1, 0)
# Radian of length and half width
t_rad = math.atan2(length, width/2)
TL_x = float("{:.4f}".format(x + width/2 * math.cos(rad+rad90)))
TL_y = float("{:.4f}".format(y + width/2 * math.sin(rad+rad90)))
TR_x = float("{:.4f}".format(x + width/2 * math.cos(rad-rad90)))
TR_y = float("{:.4f}".format(y + width/2 * math.sin(rad-rad90)))
BR_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+length**2) * math.cos(rad - rad90 - t_rad)))
BR_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+length**2) * math.sin(rad - rad90 - t_rad)))
BL_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+length**2) * math.cos(rad + rad90 + t_rad)))
BL_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+length**2) * math.sin(rad + rad90 + t_rad)))
return [TL_x, TL_y, TR_x, TR_y, BR_x, BR_y, BL_x, BL_y]
elif style == 2:
# Radian of heading
rad = math.pi / 2 - math.radians(angle)
# Radian of 90 degree
rad90 = math.atan2(1, 0)
# Radian of length and half width
t_rad_1 = math.atan2(length / 2, width / 2)
t_rad_2 = math.atan2(width / 2, length / 2)
TL_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+(length/2)**2) * math.cos(rad + t_rad_2)))
TL_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+(length/2)**2) * math.sin(rad + t_rad_2)))
TR_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+(length/2)**2) * math.cos(rad - t_rad_2)))
TR_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+(length/2)**2) * math.sin(rad - t_rad_2)))
BR_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+(length/2)**2) * math.cos(rad - rad90 - t_rad_1)))
BR_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+(length/2)**2) * math.sin(rad - rad90 - t_rad_1)))
BL_x = float("{:.4f}".format(x + math.sqrt((width/2)**2+(length/2)**2) * math.cos(rad + rad90 + t_rad_1)))
BL_y = float("{:.4f}".format(y + math.sqrt((width/2)**2+(length/2)**2) * math.sin(rad + rad90 + t_rad_1)))
return [TL_x, TL_y, TR_x, TR_y, BR_x, BR_y, BL_x, BL_y]
def main(Start_time, dataset, coor_style):
"""The main processing function.
Keyword arguments:
>>> Start_time: The processing time step.
>>> dataset: The loaded trajectory data generated by TCA.
>>> coor_style: The reference point style of generating the vehicle's shape(1: front bumper; 2:centroid)
RETURN: Time of detected conflict, Locations of involved vehicles, the DRAC value
"""
df = dataset
Start_time = float(Start_time)
Start_time = float("{:.1f}".format(Start_time))
# Extract all vehicles and related data in this time step
# Storing in working data frame df1, and working on df2
df2 = df[df.transtime == Start_time]
df2 = df2.dropna(subset=['Speed'])
df2 = df2.sort_values(by=['transtime'])
df2.index = pd.RangeIndex(start=0, stop=len(df2), step=1)
print("Processing Time Step:", Start_time, "Processing: ", len(df2), "Vehicle.")
# Pass steps have only one vehicle
if len(df2.Vehicle_ID.unique()) <= 1:
print("Lonely car...")
pass
# Main processing
else:
for i in range(len(df2)):
df_veh = df[(df.Vehicle_ID == df2.at[i, 'Vehicle_ID']) & (df.transtime < Start_time) & (df.Speed != 0.0)]
if len(df_veh) != 0:
df_veh = df_veh.tail(1)
df_veh.index = pd.RangeIndex(start=0, stop=len(df_veh), step=1)
df2.at[i, 'TTC_heading'] = get_heading(df_veh.at[0, 'X'], df_veh.at[0, 'Y'], df2.at[i, 'X'], df2.at[i, 'Y'])
else:
df_veh = df[(df.Vehicle_ID == df2.at[i, 'Vehicle_ID']) & (df.transtime > Start_time) & (df.Speed != 0.0)]
df_veh.index = pd.RangeIndex(start=0, stop=len(df_veh), step=1)
df2.at[i, 'TTC_heading'] = get_heading(df2.at[i, 'X'], df2.at[i, 'Y'], df_veh.at[0, 'X'], df_veh.at[0, 'Y'])
#######################################################################################################
# This is the DRAC calculation part
# Calculate the DRAC threshold
for p in range(len(df2)):
for q in range(p+1, len(df2)):
DRAC_TTC = float("{:.6f}".format(abs(df2.at[p, 'Speed'] - df2.at[q, 'Speed']) * 1.46667 / (2 * 8.2021)))
Dist0D1 = df2.at[p, 'Speed'] * DRAC_TTC * 1.4667
Dist0D2 = df2.at[q, 'Speed'] * DRAC_TTC * 1.4667
df_check0D1 = df[(df.Vehicle_ID == df2.at[i, 'Vehicle_ID']) & (df.transtime | |
<gh_stars>0
import os
from datetime import date
from datetime import datetime as dt
import time # performance test
import subprocess
from subprocess import CalledProcessError
import uuid
import psutil
from netCDF4 import Dataset
from numpy import squeeze
from pywps import Process
from pywps import LiteralInput, LiteralOutput
from pywps import ComplexInput, ComplexOutput
from pywps import Format, FORMATS
from pywps.app.Common import Metadata
from pywps.inout.storage import FileStorage
from blackswan.datafetch import _PRESSUREDATA_
from blackswan.datafetch import reanalyses as rl
from blackswan.ocgis_module import call
from blackswan import analogs
from blackswan.utils import rename_complexinputs
from blackswan.utils import get_variable, rename_variable
from blackswan.utils import get_files_size
from blackswan.calculation import remove_mean_trend
from blackswan.log import init_process_logger
import logging
LOGGER = logging.getLogger("PYWPS")
class AnalogsRe2ReProcess(Process):
def __init__(self):
inputs = [
LiteralInput("reanalyses", "Reanalyses Data",
abstract="Choose a reanalyses dataset as simulation",
default="NCEP_slp",
data_type='string',
min_occurs=1,
max_occurs=1,
allowed_values=['NCEP_slp', 'NCEP_z1000', 'NCEP_z850',
'NCEP_z700', 'NCEP_z600', 'NCEP_z500', 'NCEP_z400',
'NCEP_z300', 'NCEP_z250', 'NCEP_z200', 'NCEP_z150',
'NCEP_z100', 'NCEP_z70', 'NCEP_z50', 'NCEP_z30',
'NCEP_z20', 'NCEP_z10']
),
LiteralInput("Refreanalyses", "Reanalyses Data",
abstract="Choose a reanalyses dataset where look for analogs",
default="20CRV2c_prmsl",
data_type='string',
min_occurs=1,
max_occurs=1,
allowed_values=['20CRV2c_prmsl', '20CRV2c_z1000', '20CRV2c_z850',
'20CRV2c_z700', '20CRV2c_z600', '20CRV2c_z500',
'20CRV2c_z400', '20CRV2c_z300', '20CRV2c_z250',
'20CRV2c_z200', '20CRV2c_z150', '20CRV2c_z100',
'20CRV2c_z70', '20CRV2c_z50', '20CRV2c_z30',
'20CRV2c_z20', '20CRV2c_z10']
),
LiteralInput('BBox', 'Bounding Box',
data_type='string',
abstract="Enter a bbox: min_lon, max_lon, min_lat, max_lat."
" min_lon=Western longitude,"
" max_lon=Eastern longitude,"
" min_lat=Southern or northern latitude,"
" max_lat=Northern or southern latitude."
" For example: -80,50,20,70",
min_occurs=0,
max_occurs=1,
default='-20,40,30,70',
),
LiteralInput('dateSt', 'Start date of analysis period',
data_type='date',
abstract='First day of the period to be analysed',
default='2018-03-01',
min_occurs=0,
max_occurs=1,
),
LiteralInput('dateEn', 'End date of analysis period',
data_type='date',
abstract='Last day of the period to be analysed',
default='2018-03-05',
min_occurs=0,
max_occurs=1,
),
LiteralInput('refSt', 'Start date of reference period',
data_type='date',
abstract='First day of the period where analogues being picked',
default='1900-01-01',
min_occurs=0,
max_occurs=1,
),
LiteralInput('refEn', 'End date of reference period',
data_type='date',
abstract='Last day of the period where analogues being picked',
default='2014-12-31',
min_occurs=0,
max_occurs=1,
),
LiteralInput("seasonwin", "Seasonal window",
abstract="Number of days before and after the date to be analysed",
default='30',
data_type='integer',
min_occurs=0,
max_occurs=1,
),
LiteralInput("nanalog", "Nr of analogues",
abstract="Number of analogues to be detected",
default='20',
data_type='integer',
min_occurs=0,
max_occurs=1,
),
LiteralInput("dist", "Distance",
abstract="Distance function to define analogues",
default='euclidean',
data_type='string',
min_occurs=0,
max_occurs=1,
allowed_values=['euclidean', 'mahalanobis', 'cosine']
),
LiteralInput("outformat", "output file format",
abstract="Choose the format for the analogue output file",
default="ascii",
data_type='string',
min_occurs=0,
max_occurs=1,
allowed_values=['ascii', 'netCDF4']
),
LiteralInput("timewin", "Time window",
abstract="Number of days following the analogue day the distance will be averaged",
default='1',
data_type='integer',
min_occurs=0,
max_occurs=1,
),
LiteralInput("plot", "Plot",
abstract="Plot simulations and Mean/Best/Last analogs?",
default='No',
data_type='string',
min_occurs=0,
max_occurs=1,
allowed_values=['Yes', 'No']
),
]
outputs = [
ComplexOutput("analog_pdf", "Maps with mean analogs and simulation",
abstract="Analogs Maps",
supported_formats=[Format('image/pdf')],
as_reference=True,
),
ComplexOutput("config", "Config File",
abstract="Config file used for the Fortran process",
supported_formats=[Format("text/plain")],
as_reference=True,
),
ComplexOutput("analogs", "Analogues File",
abstract="mulit-column text file",
supported_formats=[Format("text/plain")],
as_reference=True,
),
ComplexOutput("formated_analogs", "Formated Analogues File",
abstract="Formated analogues file for viewer",
supported_formats=[Format("text/plain")],
as_reference=True,
),
ComplexOutput("output", "Analogues Viewer html page",
abstract="Interactive visualization of calculated analogues",
supported_formats=[Format("text/html")],
as_reference=True,
),
ComplexOutput('output_log', 'Logging information',
abstract="Collected logs during process run.",
as_reference=True,
supported_formats=[Format('text/plain')]
),
]
super(AnalogsRe2ReProcess, self).__init__(
self._handler,
identifier="analogs_re2re",
title="Analogues of circulation (based on 2 reanalyses datasets)",
abstract='Search for days with analogue pressure pattern for NCEP in 20CRV2c reanalyses data sets',
version="0.10",
metadata=[
Metadata('LSCE', 'http://www.lsce.ipsl.fr/en/index.php'),
Metadata('Doc', 'http://flyingpigeon.readthedocs.io/en/latest/'),
],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True,
)
def _handler(self, request, response):
init_process_logger('log.txt')
response.outputs['output_log'].file = 'log.txt'
LOGGER.info('Start process')
response.update_status('execution started at : {}'.format(dt.now()), 5)
process_start_time = time.time() # measure process execution time ...
start_time = time.time() # measure init ...
################################
# reading in the input arguments
################################
try:
response.update_status('read input parameter : %s ' % dt.now(), 6)
refSt = request.inputs['refSt'][0].data
refEn = request.inputs['refEn'][0].data
dateSt = request.inputs['dateSt'][0].data
dateEn = request.inputs['dateEn'][0].data
seasonwin = request.inputs['seasonwin'][0].data
nanalog = request.inputs['nanalog'][0].data
bboxDef = '-20,40,30,70' # in general format
bbox = []
bboxStr = request.inputs['BBox'][0].data
LOGGER.debug('BBOX selected by user: %s ' % (bboxStr))
bboxStr = bboxStr.split(',')
# Checking for wrong cordinates and apply default if nesessary
if (abs(float(bboxStr[0])) > 180 or
abs(float(bboxStr[1]) > 180) or
abs(float(bboxStr[2]) > 90) or
abs(float(bboxStr[3])) > 90):
bboxStr = bboxDef # request.inputs['BBox'].default # .default doesn't work anymore!!!
LOGGER.debug('BBOX is out of the range, using default instead: %s ' % (bboxStr))
bboxStr = bboxStr.split(',')
bbox.append(float(bboxStr[0]))
bbox.append(float(bboxStr[2]))
bbox.append(float(bboxStr[1]))
bbox.append(float(bboxStr[3]))
LOGGER.debug('BBOX for ocgis: %s ' % (bbox))
LOGGER.debug('BBOX original: %s ' % (bboxStr))
plot = request.inputs['plot'][0].data
distance = request.inputs['dist'][0].data
outformat = request.inputs['outformat'][0].data
timewin = request.inputs['timewin'][0].data
model_var = request.inputs['reanalyses'][0].data
model, var = model_var.split('_')
ref_model_var = request.inputs['Refreanalyses'][0].data
ref_model, ref_var = ref_model_var.split('_')
LOGGER.info('input parameters set')
response.update_status('Read in and convert the arguments', 7)
except Exception as e:
msg = 'failed to read input prameter %s ' % e
LOGGER.exception(msg)
raise Exception(msg)
######################################
# convert types and set environment
######################################
try:
response.update_status('Preparing enviroment converting arguments', 8)
LOGGER.debug('date: %s %s %s %s ' % (type(refSt), refEn, dateSt, dateSt))
# normalize == 'None':
seacyc = False
if outformat == 'ascii':
outformat = '.txt'
elif outformat == 'netCDF':
outformat = '.nc'
else:
LOGGER.exception('output format not valid')
except Exception as e:
msg = 'failed to set environment %s ' % e
LOGGER.exception(msg)
raise Exception(msg)
###########################
# set the environment
###########################
response.update_status('fetching data from archive', 9)
getlevel = False
if 'z' in var:
level = var.strip('z')
else:
level = None
##########################################
# fetch Data from original data archive
##########################################
try:
model_nc = rl(start=dateSt.year, end=dateEn.year,
dataset=model, variable=var,
getlevel=getlevel)
ref_model_nc = rl(start=refSt.year, end=refEn.year,
dataset=ref_model, variable=ref_var,
getlevel=getlevel)
LOGGER.info('reanalyses data fetched')
except Exception:
msg = 'failed to get reanalyses data'
LOGGER.exception(msg)
raise Exception(msg)
response.update_status('subsetting region of interest', 10)
# Checking memory and dataset size
model_size = get_files_size(model_nc)
ref_model_size = get_files_size(ref_model_nc)
m_size = max(model_size, ref_model_size)
memory_avail = psutil.virtual_memory().available
thrs = 0.2 # 20%
if (m_size >= thrs * memory_avail):
ser_r = True
else:
ser_r = False
LOGGER.debug('Available Memory: %s ' % (memory_avail))
LOGGER.debug('Dataset size: %s ' % (m_size))
LOGGER.debug('Threshold: %s ' % (thrs * memory_avail))
LOGGER.debug('Serial or at once: %s ' % (ser_r))
# #####################################################
# Construct descriptive filenames for the three files #
# listed in config file #
# TODO check strftime for years <1900 (!) #
# #####################################################
# refDatesString = dt.strftime(refSt, '%Y-%m-%d') + "_" + dt.strftime(refEn, '%Y-%m-%d')
# simDatesString = dt.strftime(dateSt, '%Y-%m-%d') + "_" + dt.strftime(dateEn, '%Y-%m-%d')
# Fix < 1900 issue...
refDatesString = refSt.isoformat().strip().split("T")[0] + "_" + refEn.isoformat().strip().split("T")[0]
simDatesString = dateSt.isoformat().strip().split("T")[0] + "_" + dateEn.isoformat().strip().split("T")[0]
archiveNameString = "base_" + var + "_" + refDatesString + '_%.1f_%.1f_%.1f_%.1f' \
% (bbox[0], bbox[2], bbox[1], bbox[3])
simNameString = "sim_" + var + "_" + simDatesString + '_%.1f_%.1f_%.1f_%.1f' \
% (bbox[0], bbox[2], bbox[1], bbox[3])
if ('z' in var):
# ------------------ NCEP -------------------
tmp_total = []
origvar = get_variable(model_nc)
for z in model_nc:
b0 = call(resource=z, variable=origvar, level_range=[int(level), int(level)], geom=bbox,
spatial_wrapping='wrap', prefix='levdom_' + os.path.basename(z)[0:-3])
tmp_total.append(b0)
time_range = [dateSt, dateEn]
tmp_total = sorted(tmp_total, key=lambda i: os.path.splitext(os.path.basename(i))[0])
inter_subset_tmp = call(resource=tmp_total, variable=origvar, time_range=time_range)
# Clean
for i in tmp_total:
tbr = 'rm -f %s' % (i)
os.system(tbr)
# Create new variable
ds = Dataset(inter_subset_tmp, mode='a')
z_var = ds.variables.pop(origvar)
dims = z_var.dimensions
new_var = ds.createVariable('z%s' % level, z_var.dtype, dimensions=(dims[0], dims[2], dims[3]))
new_var[:, :, :] = squeeze(z_var[:, 0, :, :])
ds.close()
simulation = call(inter_subset_tmp, variable='z%s' % level, prefix=simNameString)
# ------------------ 20CRV2c -------------------
tmp_total = []
origvar = get_variable(ref_model_nc)
for z in ref_model_nc:
tmp_n = 'tmp_%s' % (uuid.uuid1())
# select level and regrid
b0 = call(resource=z, variable=origvar, level_range=[int(level), int(level)],
spatial_wrapping='wrap', cdover='system',
regrid_destination=model_nc[0], regrid_options='bil', prefix=tmp_n)
# select domain
b01 = call(resource=b0, variable=origvar, geom=bbox, spatial_wrapping='wrap', prefix='levregr_' + os.path.basename(z)[0:-3])
tbr = 'rm -f %s' % (b0)
os.system(tbr)
tbr = 'rm -f %s.nc' % (tmp_n)
os.system(tbr)
tmp_total.append(b01)
time_range = [refSt, refEn]
tmp_total = sorted(tmp_total, key=lambda i: os.path.splitext(os.path.basename(i))[0])
ref_inter_subset_tmp = call(resource=tmp_total, variable=origvar, time_range=time_range)
# Clean
for i in tmp_total:
tbr = 'rm -f %s' % (i)
os.system(tbr)
# Create new variable
ds = Dataset(ref_inter_subset_tmp, mode='a')
z_var = ds.variables.pop(origvar)
dims = z_var.dimensions
new_var = ds.createVariable('z%s' % level, z_var.dtype, dimensions=(dims[0], dims[2], dims[3]))
new_var[:, :, :] = squeeze(z_var[:, 0, :, :])
ds.close()
archive = call(ref_inter_subset_tmp, variable='z%s' % level, prefix=archiveNameString)
else:
if ser_r:
LOGGER.debug('Process reanalysis step-by-step')
# ----- NCEP ------
tmp_total = []
for z in model_nc:
b0 = call(resource=z, variable=var, geom=bbox, spatial_wrapping='wrap',
prefix='Rdom_' + os.path.basename(z)[0:-3])
tmp_total.append(b0)
tmp_total = sorted(tmp_total, key=lambda i: os.path.splitext(os.path.basename(i))[0])
simulation = call(resource=tmp_total, variable=var, time_range=[dateSt, dateEn], prefix=simNameString)
# Clean
for i in tmp_total:
tbr = 'rm -f %s' % (i)
os.system(tbr)
# | |
from __future__ import division, print_function
import sys
import os
import time
import enum
import numpy as np
from mpi4py import MPI
import h5py
from .finite_differences import SOR_step, apply_operator
from scipy.fftpack import fft2, ifft2
def format_float(x, sigfigs=4, units=''):
"""Returns a string of the float f with a limited number of sig figs and a metric prefix"""
prefixes = { -24: u"y", -21: u"z", -18: u"a", -15: u"f", -12: u"p", -9: u"n", -6: u"u", -3: u"m",
0: u"", 3: u"k", 6: u"M", 9: u"G", 12: u"T", 15: u"P", 18: u"E", 21: u"Z", 24: u"Y" }
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = '%.0{}f'.format(digits) % significand
if exponent:
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += ' ' + prefix
except KeyError:
# Otherwise display in scientific notation
result += 'e' + str(exponent)
if units:
result += ' '
elif units:
result += ' '
return result + units
# Constants to represent differential operators.
class Operators(enum.IntEnum):
GRADX = 0
GRADY = 1
GRAD2X = 2
GRAD2Y = 3
class OperatorSum(dict):
"""Class for representing a weighted sum of operators. Supports
arithemetic operations, and coefficients can be numpy arrays for spatially
varying coefficients."""
# Tells numpy arrays to not try to use their arithmetic operations
# elementwise on us, instead they should defer to this class's arithmetic
# methods:
__array_priority__ = 1.0
def __add__(self, other):
new = OperatorSum(self)
for obj, coefficient in other.items():
new[obj] = new.get(obj, 0) + coefficient
return new
def __sub__(self, other):
new = OperatorSum(self)
for obj, coefficient in other.items():
new[obj] = new.get(obj, 0) - coefficient
return new
def __mul__(self, factor):
new = OperatorSum(self)
for obj, coefficient in new.items():
new[obj] = coefficient*factor
return new
def __div__(self, factor):
new = OperatorSum(self)
for obj, coefficient in new.items():
new[obj] = coefficient/factor
return new
__radd__ = __add__
__rsub__ = __sub__
__rmul__ = __mul__
__rdiv__ = __div__
# Objects representing operators, which can be added, subtracted etc from each
# other and multiplied by constants:
GRADX = OperatorSum({Operators.GRADX: np.ones((1, 1))})
GRADY = OperatorSum({Operators.GRADY: np.ones((1, 1))})
GRAD2X = OperatorSum({Operators.GRAD2X: np.ones((1, 1))})
GRAD2Y = OperatorSum({Operators.GRAD2Y: np.ones((1, 1))})
LAPLACIAN = GRAD2X + GRAD2Y
def get_factors(n):
"""return all the factors of n"""
factors = set()
for i in range(1, int(n**(0.5)) + 1):
if not n % i:
factors.update((i, n // i))
return factors
def get_best_2D_segmentation(size_x, size_y, N_segments):
"""Returns (best_n_segments_x, best_n_segments_y), describing the optimal
cartesian grid for splitting up a rectangle of size (size_x, size_y) into
N_segments equal sized segments such as to minimise surface area between
the segments."""
lowest_surface_area = None
for n_segments_x in get_factors(N_segments):
n_segments_y = N_segments // n_segments_x
surface_area = n_segments_x * size_y + n_segments_y * size_x
if lowest_surface_area is None or surface_area < lowest_surface_area:
lowest_surface_area = surface_area
best_n_segments_x, best_n_segments_y = n_segments_x, n_segments_y
return best_n_segments_x, best_n_segments_y
class Simulator2D(object):
def __init__(self, x_min_global, x_max_global, y_min_global, y_max_global, nx_global, ny_global,
periodic_x=False, periodic_y=False, operator_order=4):
"""A class for solving partial differential equations in two dimensions on
multiple cores using MPI"""
self.x_min_global = x_min_global
self.x_max_global = x_max_global
self.y_min_global = y_min_global
self.y_max_global = y_max_global
self.nx_global = nx_global
self.ny_global = ny_global
self.periodic_x = periodic_x
self.periodic_y = periodic_y
self.operator_order = operator_order
self.n_edge_pts = self.operator_order // 2
if not self.operator_order in [2, 4, 6]:
msg = "Only differential operators of order 2, 4, 6 supported."
raise ValueError(msg)
self.global_shape = (self.nx_global, self.ny_global)
self._setup_MPI_grid()
self.shape = (self.nx, self.ny)
self.dx = (self.x_max_global - self.x_min_global)/(self.nx_global - 1)
self.dy = (self.y_max_global - self.y_min_global)/(self.ny_global - 1)
self.x_min = self.x_min_global + self.dx * self.global_first_x_index
self.y_min = self.y_min_global + self.dy * self.global_first_y_index
self.x_max = self.x_min + self.dx * (self.nx - 1)
self.y_max = self.y_min + self.dy * (self.ny - 1)
self.x = np.linspace(self.x_min, self.x_max, self.nx).reshape((self.nx, 1))
self.y = np.linspace(self.y_min, self.y_max, self.ny).reshape((1, self.ny))
self.kx = self.ky = self.f_gradx = self.f_grady = self.f_grad2x = self.f_grad2y = self.f_laplacian = None
if self.MPI_size_x == 1:
# For FFTs, which can be done only on a single node in periodic directions:
if periodic_x:
self.kx = 2 * np.pi * np.fft.fftfreq(self.nx, d=self.dx).reshape((self.nx, 1))
# x derivative operator in Fourier space:
self.f_gradx = 1j*self.kx
self.f_grad2x = -self.kx**2
if periodic_y:
self.ky = 2 * np.pi * np.fft.fftfreq(self.ny, d=self.dy).reshape((1, self.ny))
# y derivative operator in Fourier space:
self.f_grady = 1j*self.ky
self.f_grad2y = -self.ky**2
if periodic_x and periodic_y:
# Laplace operator in Fourier space:
self.f_laplacian = self.f_grad2x + self.f_grad2y
def _setup_MPI_grid(self):
"""Split space up according to the number of MPI tasks. Set instance
attributes for spatial extent and number of points in this MPI task,
and create buffers and persistent communication requests for sending
data to adjacent processes"""
self.MPI_size = MPI.COMM_WORLD.Get_size()
self.MPI_size_x, self.MPI_size_y = get_best_2D_segmentation(self.nx_global, self.ny_global, self.MPI_size)
self.MPI_comm = MPI.COMM_WORLD.Create_cart([self.MPI_size_x, self.MPI_size_y],
periods=[self.periodic_x, self.periodic_y], reorder=True)
self.MPI_rank = self.MPI_comm.Get_rank()
self.MPI_x_coord, self.MPI_y_coord = self.MPI_comm.Get_coords(self.MPI_rank)
if self.MPI_x_coord > 0 or self.periodic_x:
self.MPI_rank_left = self.MPI_comm.Get_cart_rank((self.MPI_x_coord - 1, self.MPI_y_coord))
else:
self.MPI_rank_left = MPI.PROC_NULL
if self.MPI_x_coord < self.MPI_size_x -1 or self.periodic_x:
self.MPI_rank_right = self.MPI_comm.Get_cart_rank((self.MPI_x_coord + 1, self.MPI_y_coord))
else:
self.MPI_rank_right = MPI.PROC_NULL
if self.MPI_y_coord > 0 or self.periodic_y:
self.MPI_rank_down = self.MPI_comm.Get_cart_rank((self.MPI_x_coord, self.MPI_y_coord - 1))
else:
self.MPI_rank_down = MPI.PROC_NULL
if self.MPI_y_coord < self.MPI_size_y -1 or self.periodic_y:
self.MPI_rank_up = self.MPI_comm.Get_cart_rank((self.MPI_x_coord, self.MPI_y_coord + 1))
else:
self.MPI_rank_up = MPI.PROC_NULL
self.processor_name = MPI.Get_processor_name()
# Share out the points between processes in each direction:
self.nx, nx_remaining = divmod(self.nx_global, self.MPI_size_x)
if self.MPI_x_coord < nx_remaining:
# Give the remaining to the lowest ranked processes:
self.nx += 1
self.ny, ny_remaining = divmod(self.ny_global, self.MPI_size_y)
if self.MPI_y_coord < ny_remaining:
# Give the remaining to the lowest ranked processes:
self.ny += 1
# What are our coordinates in the global array?
self.global_first_x_index = self.nx * self.MPI_x_coord
# Be sure to count the extra points the lower ranked processes have:
if self.MPI_x_coord >= nx_remaining:
self.global_first_x_index += nx_remaining
self.global_first_y_index = self.ny * self.MPI_y_coord
# Be sure to count the extra points the lower ranked processes have:
if self.MPI_y_coord >= ny_remaining:
self.global_first_y_index += ny_remaining
# We need to tag our data to have a way other than rank to distinguish
# between multiple messages the two tasks might be sending each other
# at the same time:
TAG_LEFT_TO_RIGHT = 0
TAG_RIGHT_TO_LEFT = 1
TAG_DOWN_TO_UP = 2
TAG_UP_TO_DOWN = 3
# Buffers and MPI request objects for sending and receiving data to
# and from other processes. Sorted by whether the datatype is real or
# complex.
self.MPI_send_buffers = {}
self.MPI_receive_buffers = {}
self.MPI_requests = {}
for dtype in [np.float64, np.complex128]:
x_edge_shape = (self.n_edge_pts, self.ny)
y_edge_shape = (self.nx, self.n_edge_pts)
left_send_buffer = np.zeros(x_edge_shape, dtype=dtype)
left_receive_buffer = np.zeros(x_edge_shape, dtype=dtype)
right_send_buffer = np.zeros(x_edge_shape, dtype=dtype)
right_receive_buffer = np.zeros(x_edge_shape, dtype=dtype)
bottom_send_buffer = np.zeros(y_edge_shape, dtype=dtype)
bottom_receive_buffer = np.zeros(y_edge_shape, dtype=dtype)
top_send_buffer = np.zeros(y_edge_shape, dtype=dtype)
top_receive_buffer = np.zeros(y_edge_shape, dtype=dtype)
send_left = self.MPI_comm.Send_init(left_send_buffer, self.MPI_rank_left, tag=TAG_RIGHT_TO_LEFT)
send_right = self.MPI_comm.Send_init(right_send_buffer, self.MPI_rank_right, tag=TAG_LEFT_TO_RIGHT)
send_bottom = self.MPI_comm.Send_init(bottom_send_buffer, self.MPI_rank_down, tag=TAG_UP_TO_DOWN)
send_top = self.MPI_comm.Send_init(top_send_buffer, self.MPI_rank_up, tag=TAG_DOWN_TO_UP)
receive_left = self.MPI_comm.Recv_init(left_receive_buffer, self.MPI_rank_left, tag=TAG_LEFT_TO_RIGHT)
receive_right = self.MPI_comm.Recv_init(right_receive_buffer, self.MPI_rank_right, tag=TAG_RIGHT_TO_LEFT)
receive_bottom = self.MPI_comm.Recv_init(bottom_receive_buffer, self.MPI_rank_down, tag=TAG_DOWN_TO_UP)
receive_top = self.MPI_comm.Recv_init(top_receive_buffer, self.MPI_rank_up, tag=TAG_UP_TO_DOWN)
self.MPI_send_buffers[dtype] = (left_send_buffer, right_send_buffer, bottom_send_buffer, top_send_buffer)
self.MPI_receive_buffers[dtype] = (left_receive_buffer, right_receive_buffer,
bottom_receive_buffer, top_receive_buffer)
self.MPI_requests[dtype] = (send_left, send_right, send_bottom, send_top,
receive_left, receive_right, receive_bottom, receive_top)
self.pending_requests = None
def MPI_send_at_edges(self, psi):
"""Start an asynchronous MPI send data from the edges of psi to all
adjacent MPI processes."""
left_buffer, right_buffer, bottom_buffer, top_buffer = self.MPI_send_buffers[psi.dtype.type]
left_buffer[:] = psi[:self.n_edge_pts, :]
right_buffer[:] = psi[-self.n_edge_pts:, :]
bottom_buffer[:] = psi[:, :self.n_edge_pts]
top_buffer[:] = psi[:, -self.n_edge_pts:]
self.pending_requests = self.MPI_requests[psi.dtype.type]
MPI.Prequest.Startall(self.pending_requests)
def MPI_receive_at_edges(self):
"""Finalise an asynchronous MPI transfer from all adjacent MPI
processes. Data remains in the receive buffers and can be accessed by
the caller after this method returns."""
MPI.Prequest.Waitall(self.pending_requests)
self.pending_requests = None
def par_sum(self, psi):
"""Sum the given field over all MPI processes"""
local_sum = np.asarray(psi.sum())
result = np.zeros_like(local_sum)
self.MPI_comm.Allreduce(local_sum, result, MPI.SUM)
return result
def par_vdot(self, psi1, psi2):
""""Dots two vectors (with complex comjucation of the first) and sums
result over MPI processes"""
local_dot = np.asarray(np.vdot(psi1, psi2))
result = | |
<reponame>maayane/PhotoFit
"""*******************************************************
This module has functions converting distances
*****************************************************
"""
#print __doc__
import astropy
import math
from SOPRANOS import distances_conversions
from SOPRANOS import extinction
import numpy as np
import pdb
#import pylab
from astropy import constants as const
import numba
#def planck(wav, T):
# a=2*6.626070040e-34*(3e8)**2
# b=6.626070040e-34*(3e8)/(wav*T*1.38064852e-23)
# #a = 2*const.h.value*const.c.value**2
# #b = const.h.value*const.c.value/(wav*const.k_B.value*T) #convert into cgs
# intensity = a/ ( (wav**5) * (np.exp(b) - 1.0) )
# return intensity#*u.J/(u.s*u.m*u.m*u.m).cgs
#def planck_cgs(wav,T):
# a=2*const.h.cgs.value*(const.c.cgs.value)**2
# b=const.h.cgs.value*const.c.cgs.value/(wav*const.k_B.cgs.value*T)
# intensity=a/( (wav**5) * (np.exp(b) - 1.0) )
# return intensity
#def RayleighJeans(wav, T):
# a = 2*c*k*T
# intensity = a/wav**4
# return intensity
#def Wien(wav, T):
# a = 2*h*c**2
# b=h*c/(wav*k*T)
# intensity = (a/wav**5)*np.exp(-b)
# return intensity
def black_body_flux_density(Temp,wavelength,type=None,verbose=False,distance_pc=None,Radius=None,Ebv=None,R_ext=None,redshift=0,plot=False,R_one_per_T=True):
"""Description: Given a temperature, calculates a black body flux density B_lambda.
If a radius anda distance are given, calculate the apparent flux density (R/d)^2*B_lambda
Input :- Temperature [K]
- numpy array of wavelengths [m], tipically np.linspace(1e-10,1e-6,num=1000)
- type of formula:
'P' Planck
'RJ' Rayleigh-Jeans approximation
- Radius (optionnal) in solar radius
- distance (optionnal) in pc
- Ebv: (optionnal, default is none) extinction to APPLY to the theoretical bb spectrum
- redshift: (optionnal, default is none) z to apply to the theoretical spectrum with
Output :array of numpy.arrays [spectrum_cgs,spectrum_Hz,spectrum_A,spectrum_mJy,spectrum_phot] CAREFULLL! confusing between spectrum_cgs and spectrum_A has caused so much arm in the past!
- spectrum_cgs: wavelength [m], Emittance (flux density) in erg/sec/cm^2/cm(lambda)
- spectrum_Hz: wavelength [m], Emittance in erg/sec/cm^2/Hz
- spectrum_A: wavelength [m], Emittance in erg/sec/cm^2/Ang (lambda), 1e-8*Emittance (flux density) in erg/sec/cm^2/cm(lambda)
- spectrum_mjy: wavelength [m], Emittance [mJy]
- spectrum_phot: wavelength [m], number of photons [photons/sec/cm^2/Ang (lambda)]
Tested : ?
By : <NAME> Nov 2016
URL :
Example:[E_cgs, E_Hz, E_A,Emjy, E_phot] = black_body_models.black_body_models(3000, wavelengths, 'P')
Reliable: """
#if Ebv==0.:
# Ebv=None
#print('bla')
#pdb.set_trace()
h_cgs=const.h.cgs.value
c_cgs=const.c.cgs.value
kB_cgs=const.k_B.cgs.value
h_USI=const.h.value
c_USI=const.c.value
kB_USI=const.k_B.value
wavelength_in_cm=wavelength*1e2 # wavelength in cgs
wavelength_in_cm = wavelength_in_cm.astype(float)
#print('wavelength_in_cm',wavelength_in_cm)
#pdb.set_trace()
nu=c_cgs/wavelength_in_cm #frequency in s (because c is in cm/s and wavlength in cm)
if (Radius is not None and distance_pc is not None):
R_pc=distances_conversions.solar_radius_to_pc(Radius)
coeff=(R_pc/distance_pc)**2
if isinstance(Temp,np.ndarray) is True and R_one_per_T is True:
coeffx = (R_pc / distance_pc)
coeff=coeffx[:,np.newaxis]
#print('np.shape(coeff) is',np.shape(coeff))
else:
if verbose==True:
print('the radius or distance or both were not specified')
coeff=1.
#print coeff
#pdb.set_trace()
if type.lower() in (None,'p'):
if verbose==True:
print('formula used for black body: Planck')
#b_cgs=h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)
if verbose == True:
#print('b_cgs is', b_cgs)
#print('be aware that {0} elements in the exponent of the Planck formula lead to an infinite exponent'.format(np.shape(np.exp(b_cgs)[np.isinf(np.exp(b_cgs))==True])[0]))
print('denom shape is',np.shape(h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)))
if isinstance(Temp,np.ndarray) is True:
#E_cgs=np.zeros((np.shape(Temp)[0],np.shpe(wavelength)[0]))
#print(np.shape(wavelength_in_cm[np.newaxis,:]))
#print(np.shape(kB_cgs * Temp[:,np.newaxis]))
#print(np.shape(coeff * 2 * math.pi * h_cgs * c_cgs ** 2 ))
#print(np.shape(coeff * 2 * math.pi * h_cgs * c_cgs ** 2 / (np.power(wavelength_in_cm[np.newaxis,:],5) * (np.exp(h_cgs * c_cgs / (np.float64(wavelength_in_cm[np.newaxis,:]) * kB_cgs * Temp[:,np.newaxis])) - 1.0))))
E_cgs = coeff * 2 * math.pi * h_cgs * c_cgs ** 2 / (np.power(wavelength_in_cm[np.newaxis,:],5) * (np.exp(h_cgs * c_cgs / (np.float64(wavelength_in_cm[np.newaxis,:]) * kB_cgs * Temp[:,np.newaxis])) - 1.0))
E_Hz = coeff * 2 * math.pi * h_cgs * nu[np.newaxis,:] ** 3 / (c_cgs ** 2 * (np.exp(h_cgs * nu[np.newaxis,:] / (kB_cgs * Temp[:,np.newaxis])) - 1.0)) # this is the planck formula in Hz ()
E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1
E_mjy = 1e-26 * E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg
E_phot = coeff * 2 * math.pi * nu[np.newaxis,:] ** 2 / (c_cgs ** 2 * (np.exp(h_cgs * nu[np.newaxis,:] / (kB_cgs * Temp[:,np.newaxis])) - 1.0))
else:
#print('coeff is',coeff)
#pdb.set_trace()
E_cgs=coeff*2*math.pi*h_cgs*c_cgs**2/(wavelength_in_cm**5 *(np.exp(h_cgs*c_cgs/(np.float64(wavelength_in_cm)*kB_cgs*Temp)) - 1.0))
E_Hz=coeff*2*math.pi*h_cgs*nu**3/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0)) #this is the planck formula in Hz ()
E_A=E_cgs*1e-8 # because cm-1 =(1e8 A)-1
E_mjy=1e-26*E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg
E_phot=coeff*2*math.pi*nu**2/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0))
elif type.lower() == 'rj':
if verbose == True:
print('formula used for black body: Rayleigh-Jeans')
E_cgs=coeff*2*math.pi*c_cgs*kB_cgs*Temp/wavelength_in_cm**4
E_Hz=coeff*2*math.pi*kB_cgs*Temp*(nu/c_cgs)**2
E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1
E_mjy = 1e-26 * E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg
E_phot=None # I am not sure
else:
print('unknown formula')
pdb.set_trace()
#print('wavelength are',wavelength)
wavelength_fixed=wavelength*(redshift+1)
#print('wavelength_fixed',wavelength_fixed)
#pdb.set_trace()
E_A_fixed=E_A/(redshift+1)
if isinstance(Temp, np.ndarray) is True:
if Ebv==None:
#print('(np.shape(E_cgs)[0]) is',(np.shape(E_cgs)[0]))
spectrum_cgs=[np.array(list(zip(wavelength_fixed,E_cgs[i,:]))) for i in range(np.shape(E_cgs)[0])]#not sure how z influences
spectrum_Hz=[np.array(list(zip(wavelength_fixed,E_cgs[i,:]))) for i in range(np.shape(E_Hz)[0])]#not sure how z influences
spectrum_A=[np.array(list(zip(wavelength_fixed,E_A_fixed[i,:]))) for i in range(np.shape(E_A_fixed)[0])]
spectrum_mjy=[np.array(list(zip(wavelength_fixed,E_mjy[i,:]))) for i in range(np.shape(E_mjy)[0])]#not sure how z influences
spectrum_phot=[np.array(list(zip(wavelength_fixed,E_phot[i,:]))) for i in range(np.shape(E_phot)[0])]#not sure how z influences
else:
#print(wavelength)
#print('***')
#print(wavelength * 1e6)
#pdb.set_trace()
spectrum_cgs=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_cgs))),Ebv,R=R_ext)[:,1])))# apply_extinction_to_theoretical_flux needs wavelengths in micropmeters
spectrum_Hz=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_Hz))),Ebv,R=R_ext)[:,1])))
spectrum_A = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_A_fixed))),Ebv,R=R_ext)[:,1])))
#spextrum_A_befor_E=np.array(list(zip(wavelength_fixed,E_A_fixed)))
spectrum_mjy = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_mjy))),Ebv,R=R_ext)[:, 1])))
spectrum_phot = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_phot))),Ebv,R=R_ext)[:, 1])))
else:
if Ebv==None:
spectrum_cgs=np.array(list(zip(wavelength_fixed,E_cgs)))#not sure how z influences
spectrum_Hz=np.array(list(zip(wavelength_fixed,E_Hz)))#not sure how z influences
spectrum_A=np.array(list(zip(wavelength_fixed,E_A_fixed)))
spectrum_mjy=np.array(list(zip(wavelength_fixed,E_mjy)))#not sure how z influences
spectrum_phot=np.array(list(zip(wavelength_fixed,E_phot)))#not sure how z influences
else:
#print(wavelength)
#print('***')
#print(wavelength * 1e6)
#pdb.set_trace()
spectrum_cgs=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_cgs))),Ebv,R=R_ext)[:,1])))# apply_extinction_to_theoretical_flux needs wavelengths in micropmeters
spectrum_Hz=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_Hz))),Ebv,R=R_ext)[:,1])))
spectrum_A = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_A_fixed))),Ebv,R=R_ext)[:,1])))
#spextrum_A_befor_E=np.array(list(zip(wavelength_fixed,E_A_fixed)))
spectrum_mjy = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_mjy))),Ebv,R=R_ext)[:, 1])))
spectrum_phot = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_phot))),Ebv,R=R_ext)[:, 1])))
if plot==True:
pylab.figure()
pylab.plot(wavelength,E_A,label='sepctrum before applying z and E')
pylab.plot(wavelength_fixed,E_A_fixed,label='sepctrum redshifted z={0}'.format(redshift))
pylab.plot(spectrum_A[:,0],spectrum_A[:,1], label='sepctrum redshifted z={0} and extincted'.format(redshift))
pylab.legend()
pylab.show()
#print('managed till here')
#pdb.set_trace()
#print('np.shape(spectrum_A) is',np.shape(spectrum_A))
#print(spectrum_A[0])
#print(np.shape(spectrum_A[0]))
#print(spectrum_A)
#pdb.set_trace()
#print('spectrum_A is',spectrum_A)
return spectrum_cgs, spectrum_Hz, spectrum_A, spectrum_mjy, spectrum_phot
h_cgs=const.h.cgs.value
c_cgs=const.c.cgs.value
kB_cgs=const.k_B.cgs.value
@numba.jit(nopython=True)#, parallel=True)
def black_body_flux_density_fast(Temp,wavelength,formula_type='P',verbose=False,distance_pc=None,Radius=None,Ebv=None,R_ext=None,redshift=0,plot=False,R_one_per_T=True,h_cgs=h_cgs,c_cgs=c_cgs,kB_cgs=kB_cgs):
"""Description: Given a temperature, calculates a black body flux density B_lambda.
If a radius and a distance are given, calculate the apparent flux density (R/d)^2*B_lambda
Input :- Temperature [K]
- numpy array of wavelengths [m], tipically np.linspace(1e-10,1e-6,num=1000)
- type of formula:
'P' Planck
'RJ' Rayleigh-Jeans approximation
- Radius (optionnal) in solar radius
- distance (optionnal) in pc
- Ebv: (optionnal, default is none) extinction to APPLY to the theoretical bb spectrum
- redshift: (optionnal, default is none) z to apply to the theoretical spectrum with
Output :spectrum_A: wavelength [m], Emittance in erg/sec/cm^2/Ang (lambda), 1e-8*Emittance (flux density) in erg/sec/cm^2/cm(lambda)
Tested : ?
By : <NAME> 2019
URL :
black_body_spectrum = black_body_flux_density.black_body_flux_density_fast(Temp, wavelengths, 'P')
Reliable: 2
"""
wavelength_in_cm=wavelength*1e2 # wavelength in cgs
# wavelength_in_cm = wavelength_in_cm.astype(float)
#print('wavelength_in_cm',wavelength_in_cm)
# import pdb;pdb.set_trace()
nu=c_cgs/wavelength_in_cm #frequency in s (because c is in cm/s and wavlength in cm)
# if (Radius is not None and distance_pc is not None):
# print("hello")
# R_pc=distances_conversions.solar_radius_to_pc(Radius)
# coeff=(R_pc/distance_pc)**2
# if isinstance(Temp,np.ndarray) is True and R_one_per_T is True:
# coeffx = (R_pc / distance_pc)
# coeff=coeffx[:,np.newaxis]
# #print('np.shape(coeff) is',np.shape(coeff))
# else:
# if verbose==True:
# print('the radius or distance or both were not specified')
coeff=1.
#print coeff
#pdb.set_trace()
# if formula_type == 'P':
# if verbose==True:
# print('formula used for black body: Planck')
# #b_cgs=h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)
# if verbose == True:
# #print('b_cgs is', b_cgs)
# #print('be aware that {0} elements in the exponent of the Planck formula lead to an infinite exponent'.format(np.shape(np.exp(b_cgs)[np.isinf(np.exp(b_cgs))==True])[0]))
# print('denom shape is',np.shape(h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)))
# if isinstance(Temp,np.ndarray) is True:
# import pdb;pdb.set_trace()
# #E_cgs=np.zeros((np.shape(Temp)[0],np.shpe(wavelength)[0]))
# #print(np.shape(wavelength_in_cm[np.newaxis,:]))
# #print(np.shape(kB_cgs * Temp[:,np.newaxis]))
# #print(np.shape(coeff * 2 * math.pi * h_cgs * c_cgs ** 2 ))
# #print(np.shape(coeff * 2 * math.pi * h_cgs * c_cgs ** 2 / (np.power(wavelength_in_cm[np.newaxis,:],5) * (np.exp(h_cgs * c_cgs / (np.float64(wavelength_in_cm[np.newaxis,:]) * kB_cgs * Temp[:,np.newaxis])) - 1.0))))
# E_cgs = coeff * 2 * math.pi * h_cgs * c_cgs ** 2 / (np.power(wavelength_in_cm[np.newaxis,:],5) * (np.exp(h_cgs * c_cgs / (np.float64(wavelength_in_cm[np.newaxis,:]) * kB_cgs * Temp[:,np.newaxis])) - 1.0))
# E_Hz = coeff * 2 * math.pi * h_cgs * nu[np.newaxis,:] ** 3 / (c_cgs ** 2 * (np.exp(h_cgs * nu[np.newaxis,:] / (kB_cgs * Temp[:,np.newaxis])) - 1.0)) # this is the planck formula in Hz ()
# E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1
# E_mjy = 1e-26 * E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg
# E_phot = coeff * 2 * math.pi * nu[np.newaxis,:] ** 2 / (c_cgs ** 2 * (np.exp(h_cgs * nu[np.newaxis,:] / (kB_cgs * Temp[:,np.newaxis])) - 1.0))
# else:
#print('coeff is',coeff)
#pdb.set_trace()
E_cgs=coeff*2*math.pi*h_cgs*c_cgs**2/(wavelength_in_cm**5 *(np.exp(h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)) - 1.0))
E_Hz=coeff*2*math.pi*h_cgs*nu**3/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0)) #this is the planck formula in Hz ()
E_A=E_cgs*1e-8 # because cm-1 =(1e8 A)-1
E_mjy=1e-26*E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg
E_phot=coeff*2*math.pi*nu**2/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0))
# elif formula_type.lower() == 'rj':
# if verbose == True:
# print('formula used for black body: Rayleigh-Jeans')
# E_cgs=coeff*2*math.pi*c_cgs*kB_cgs*Temp/wavelength_in_cm**4
# E_Hz=coeff*2*math.pi*kB_cgs*Temp*(nu/c_cgs)**2
# E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1
# E_mjy = 1e-26 * E_Hz # | |
* @param {number} h=fh - высота для рисования
# * @returns {self}
# '''
# drawGelFragment(name, fx, fy, fw, fh, x, y, w = fw, h = fh) { // TODO: Проверить
# // self.ctx.drawImage(self.Gel[name], fx, fy, fw, fh, x, y, w, h)
# return self
# },
# ''' Создает текстуру из геля
# * @param {string} gelname - Имя геля
# * @param {string} repeat='repeat' - Повторение (repeat/no-repeat)
# * @returns {self}
# '''
# makeTexture(gelname, repeat = 'repeat') { // repeat/no-repeat
# return self.ctx.createPattern(self.Gel[gelname], repeat)
# },
# // Ввод
# ''' Окно ввода данных
# * @param {string} text - Текст заголовка окна
# * @param {string} [def] - Текст по умолчанию
# * @returns {self}
# '''
# input(text, def) {
# const tmp = prompt(text, def); // eslint-disable-line
# return Number(tmp) || tmp
# },
# // Вывод
# ''' Вывести текст на экран
# * @returns {self}
# '''
# println(...text) {
# const p = document.getElementById('p')
# p.style = 'position:fixed;top:0px;left:0px;width:100%;height:100%;-webkit-user-select:none; pointer-events: none;'
# p.innerHTML += text + '<br/>'
# return self
# },
# // Звук
# ''' Играть звук
# * @param {string} file - Файл звука
# * @param {bool} loop - Зациклить?
# * @param {string} channel=0 - Канал
# * @returns {self}
# '''
# playSound(file, loop = False, channel = 0) {
# if (!self.Player[0]) {
# console.warn('На вашей платформе не поддерживается воспроизведение звука!')
# return self
# }
# if (self.Player[channel] === undefined) {
# const p = document.createElement('audio')
# p.id = 'player' + channel
# document.getElementById('audio').appendChild(p)
# self.Player[channel] = document.getElementById('player' + channel)
# }
# self.Player[channel].setAttribute('src', file)
# self.Player[channel].setAttribute('loop', Number(loop))
# self.Player[channel].play()
# return self
# },
# ''' Приостановить воспроизведение звука на канале
# * @param {number} channel=-1 - Канал (-1 для остановки на всех каналах)
# * @returns {self}
# '''
# pauseSound(channel = -1) {
# if (!self.Player[0]) return self
# if (channel == -1) {
# for (const ch of self.Player) {
# ch.pause()
# }
# return self
# }
# if (self.Player[channel] === undefined) {
# self.debug('На данном канале нет плеера')
# return False
# }
# self.Player[channel].pause()
# return self
# },
# // Matheматика
# ''' Возвращает квадратный корень из числа
# * @param {number} number - Число
# * @returns {number}
# '''
# 'sqrt': number => Math.sqrt(number),
# ''' Возвращает случайное число
# * @param {number} min - От
# * @param {number} max - До
# * @returns {number}
# '''
# 'random': (min, max) => Math.floor(Math.random() * max) + min,
# ''' Возвращает синус угла
# * @param {number} angle - Угол в радианах
# * @returns {number}
# '''
# 'sin': angle => Math.sin(angle),
# ''' Возвращает косинус угла
# * @param {number} angle - Угол в радианах
# * @returns {number}
# '''
# 'cos': angle => Math.cos(angle),
# ''' Возвращает тангенс угла
# * @param {number} angle - Угол в радианах
# * @returns {number}
# '''
# 'tan': angle => Math.tan(angle),
# ''' Возвращает котангенс угла
# * @param {number} angle - Угол в радианах
# * @returns {number}
# '''
# 'ctg': angle => 1 / Math.tan(angle),
# ''' Возвращает арксинус угла (в радианах)
# * @param {number} number - Угол в радианах
# * @returns {number}
# '''
# 'asin': number => Math.asin(number),
# ''' Возвращает арккосинус угла (в радианах)
# * @param {number} number - Угол в радианах
# * @returns {number}
# '''
# 'acos': number => Math.acos(number),
# ''' Возвращает арктангенс угла (в радианах)
# * @param {number} number - Угол в радианах
# * @returns {number}
# '''
# 'atan': number => Math.atan(number),
# ''' Возвращает остаток от деления 2-х чисел
# * @param {number} x - Делимое
# * @param {number} y - Делитель
# * @returns {number}
# '''
# 'mod': (x, y) => x % y,
# ''' Возвращает модуль числа
# * @param {number} number - Число
# * @returns {number}
# '''
# 'abs': number => Math.abs(number),
# ''' Возводит число в степень
# * @param {number} number - Число
# * @param {number} power - Степень
# * @returns {number}
# '''
# 'pow': (number, power) => Math.pow(number, power),
# ''' Возвращает натуральный логарифм от числа
# * @param {number} number - Число
# * @returns {number}
# '''
# 'ln': number => Math.log(number),
# ''' Возвращает число e в степени
# * @param {number} power - Степень
# * @returns {number}
# '''
# 'exp': power => Math.exp(power),
# ''' Возвращает ограниченное значение переменной
# * @param {number} variable - Начальное значение
# * @param {number} min - Минимум (нижняя граница)
# * @param {number} max - Максимум (верхняя граница)
# * @returns {number}
# '''
# limit(variable, min, max) {
# return variable <= min ? min : max
# },
# ''' Возвращает минимальное значение из аргументов
# * @returns {number}
# '''
# 'min': (...a) => Math.min(...a),
# ''' Возвращает максимальное значение из аргументов
# * @returns {number}
# '''
# 'max': (...a) => Math.max(...a),
# ''' Переводит градусы в радианы
# * @param {number} deg - Значение в градусах
# * @returns {number} Радианы
# '''
# rad(deg) {
# if (deg === 90) return self.PI / 2
# if (deg === 270) return 3 * self.PI / 2
# return deg * self.DEG2RAD
# },
# ''' Переводит радианы в градусы
# * @param {number} rad - Значение в радианах
# * @returns {number} Градусы
# '''
# deg(rad) {
# return rad * self.RAD2DEG
# },
# // Строковые функции
# ''' Возвращает длину строки/массива
# * @param {string} str - Строка/массив
# * @returns {number}
# '''
# 'len': str => str.length,
# ''' Переводит число/значение в строку
# * @param {*} num - Число или другое значение
# * @returns {string}
# '''
# 'str': num => String(num),
# ''' Переводит строку в число (или возвращает NaN, если это невозможно)
# * @param {string} str - Строка с числом
# * @returns {number}
# '''
# 'val': str => Number(str),
# ''' Переводит строку в число (или возвращает NaN, если это невозможно)
# * Лучше использовать val
# * @param {string} str - Строка с числом
# * @param {number} [system=10] - Система исчисления
# * @returns {number} Int
# '''
# int(str, system = 10) {
# return parseInt(str, system)
# },
# ''' Переводит строку в число с плавающей точкой (или возвращает NaN, если это невозможно)
# * @param {string} str - Строка с числом
# * @returns {number} Float
# '''
# 'float': str => parseFloat(str),
# ''' Приводит все символы строки в ВЕРХНИЙ РЕГИСТР
# * @param {string} str - Строка
# * @returns {string}
# '''
# 'upper': str => str.toUpperCase(),
# ''' Приводит все символы строки в нижний регистр
# * @param {string} str - Строка
# * @returns {string}
# '''
# 'lower': str => str.toLowerCase(),
# ''' Возвращает часть строки
# * @param {string} str - Строка
# * @param {number} pos - Начало выделения
# * @param {number} len - Длина выделения
# * @returns {string}
# '''
# 'mid': (str, pos, len) => str.substr(pos, len),
# ''' Возвращает символ по его коду. Можно передать несколько кодов
# * @param {number} code - Код(ы) символа
# * @returns {string}
# '''
# 'chr': (...codes) => String.fromCharCode(...codes), // code to string
# ''' Возвращает код символа
# * @param {string} str - Строка
# * @param {number} [pos=0] - Позиция символа в строке
# * @returns {number}
# '''
# 'asc': (str, pos = 0) => str.charCodeAt(pos), // string to code
# ''' Разбивает строку и возвращает массив частей
# * @param {string} str - Строка
# * @param {string} char - Символ/регулярное выражение, по которому разбивать
# * @returns {array}
# '''
# 'split': (str, char) => str.split(char),
# ''' Переводит массив в строку, разделяя элементы разделителем
# * @param {array} array - массив
# * @param {string} [separator=' '] - разделитель
# * @returns {string}
# '''
# 'join': (array, separator = ' ') => array.join(separator),
# ''' Возвращает строку с замененной частью
# * @param {string} str - Строка
# * @param {string} reg - Строка/регулярное выражение для замены
# * @param {string} to - На что менять
# * @param {bool} [all=False] - Заменять все включения
# * @returns {string}
# '''
# replace(str, reg, to, all = False) {
# if (all) return str.replace(new RegExp(reg, 'g'))
# return str.replace(reg, to)
# },
# // Работа с локальными данными
# ''' Сохранить данные в хранилище
# * @param {string} name - Название ячейки
# * @param {*} _data - Данные
# * @returns {self}
# '''
# localSaveData(name, _data) {
# const data = typeof (_data) == 'object' ? self.toJSON(_data) : _data
# window.localStorage.setItem(name, data)
# return self
# },
# ''' Получить данные из хранилища
# * @param {string} name - Название ячейки
# * @returns {object}
# '''
# localReadData(name) {
# /* try {
# return self.parseJSON(window.localStorage.getItem(name))
# } catch (e) {
# return window.localStorage.getItem(name)
# }'''
# },
# ''' Возвращает объект из JSON строки
# * @param {string} json - JSON строка
# * @returns {object}
# '''
# 'parseJSON': (json) => {
# try {
# return JSON.parse(json)
# } catch (e) {
# return null
# }
# },
# ''' Возвращает JSON строку из объекта
# * @param {object} object - Объект
# * @param {function} [f=null] - | |
<= 1.:
dp_over_p = (M ** 2. / 5. + 1.) ** 3.5 - 1.
else:
dp_over_p = (F * M ** 7.) / (7. * M ** 2. - 1.) ** 2.5 - 1.
return dp_over_p
# #############################################################################
#
# conversions between cas, mach and altitude
#
# pick any two values, and find the third
#
# #############################################################################
def cas_mach2alt(
cas,
mach,
speed_units=default_speed_units,
alt_units=default_alt_units,
):
"""
Return the altitude that corresponds to a given CAS and mach.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
"""
dp = cas2dp(cas, speed_units=speed_units, press_units='pa')
dp_over_p = mach2dp_over_p(mach)
p = dp / dp_over_p
altitude = SA.press2alt(p, press_units='pa', alt_units=alt_units)
return altitude
def i_cas_mach2alt(data_items):
"""
Return the altitude that corresponds to a given CAS and mach, with an
interactive interface.
"""
data_items['cas'] = _get_CAS(data_items)
cas = data_items['cas']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
data_items['mach'] = _get_mach(data_items)
mach = data_items['mach']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
print
print ('CAS = ', cas, speed_units)
print ('Mach = ', mach)
# print 'Desired altitude units are: ', alt_units
print
alt = cas_mach2alt(cas, mach, speed_units, alt_units)
data_items['altitude'] = alt
return_string = 'Altitude = ' + str(alt) + ' ' + alt_units
print (return_string)
def cas_alt2mach(
cas,
altitude,
speed_units=default_speed_units,
alt_units=default_alt_units,
):
"""
Return the mach that corresponds to a given CAS and altitude.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
"""
dp = cas2dp(cas, speed_units=speed_units, press_units='pa')
p = SA.alt2press(altitude, alt_units=alt_units, press_units='pa')
dp_over_p = dp / p
mach = dp_over_p2mach(dp_over_p)
return mach
def i_cas_alt2mach(data_items):
"""
Return the mach that corresponds to a given CAS and altitude, using an
interactive interface.
"""
data_items['cas'] = _get_CAS(data_items)
cas = data_items['cas']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
data_items['altitude'] = _get_alt(data_items)
altitude = data_items['altitude']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
print
print ('CAS = ', cas, speed_units)
print ('Altitude = ', altitude, alt_units)
print
mach = cas_alt2mach(cas, altitude, speed_units, alt_units)
data_items['mach'] = mach
print ('Mach = ', mach)
def _cas_alt2mach2(
cas,
altitude,
speed_units=default_speed_units,
alt_units=default_alt_units,
):
"""
Alternative, trial variant of cas_alt2mach, using the equations from
USAF TPS notes.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
"""
PR = SA.alt2press_ratio(altitude, alt_units)
cas = U.speed_conv(cas, from_units=speed_units, to_units='m/s')
if cas <= A0:
# <= 661.48 kt
mach = M.sqrt(5. * (((1. / PR) * ((1. + 0.2 * (cas / A0) ** 2.)
** 3.5 - 1.) + 1.) ** (2. / 7.) - 1.))
else:
raise ValueError('CAS too high.')
return mach
def mach_alt2cas(
mach,
altitude,
alt_units=default_alt_units,
speed_units=default_speed_units,
):
"""
Return the calibrated Air Speed that corresponds to a given mach and
altitude.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
"""
p = SA.alt2press(altitude, alt_units=alt_units, press_units='pa')
dp_over_p = mach2dp_over_p(mach)
dp = dp_over_p * p
cas = dp2cas(dp, press_units='pa', speed_units=speed_units)
return cas
def i_mach_alt2cas(data_items):
"""
Return the calibrated Air Speed that corresponds to a given mach and
altitude, using an interactive interface.
"""
data_items['mach'] = _get_mach(data_items)
mach = data_items['mach']
data_items['altitude'] = _get_alt(data_items)
altitude = data_items['altitude']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
print
print ('Altitude = ', altitude, alt_units)
print ('Mach = ', mach)
print
cas = mach_alt2cas(mach, altitude, alt_units, speed_units)
data_items['cas'] = cas
return_string = 'CAS = ' + str(cas) + ' ' + speed_units
print (return_string)
# #############################################################################
#
# Mach and temperature to TAS
#
# and
#
# TAS and temperature to Mach
#
# #############################################################################
def mach2tas(
mach,
temp='std',
altitude='blank',
temp_units=default_temp_units,
alt_units=default_alt_units,
speed_units=default_speed_units,
):
"""
Return the TAS for a given mach number. The temperature or altitude
must also be specified. If the altitude is specified, the temperature
is assumed to be standard. If both the altitude and temperature are
specified, the altitude input is ignored.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R. The temperature defaults to std
temperature if it is not input.
If the units are not specified, the units in default_units.py are used.
Examples:
Determine the TAS in kt at 0.8 mach at a temperature of
-15 deg C:
>>> mach2tas(0.8, -15)
500.87884108468597
Determine the TAS in kt at 0.8 mach at 30,000 ft, assuming
standard temperature:
>>> mach2tas(0.8, altitude = 30000)
471.45798523415107
Determine the TAS in mph at 0.8 mach at 5000 m, assuming
standard temperature:
>>> mach2tas(0.8, altitude = 5000, alt_units = 'm', speed_units = 'mph')
573.60326790383715
Determine the TAS in km/h at 0.4 mach at a temperature of
300 deg K:
>>> mach2tas(0.4, 300, temp_units = 'K', speed_units = 'km/h')
499.99796329569176
"""
if temp == 'std':
if altitude != 'blank':
temp = SA.alt2temp(altitude, temp_units=temp_units,
alt_units=alt_units)
else:
raise ValueError ( 'At least one of the temperature or altitude must be specified.')
tas = mach * SA.temp2speed_of_sound(temp, temp_units, speed_units)
return tas
def i_mach2tas(data_items):
"""
Return the TAS that corresponds to a given Mach, altitude, and temperature
using an interactive interface.
"""
data_items['mach'] = _get_mach(data_items)
mach = data_items['mach']
data_items['altitude'] = _get_alt(data_items)
altitude = data_items['altitude']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
data_items['temp_units'] = _get_temp_units(data_items)
temp_units = data_items['temp_units']
data_items['temp'] = _get_temp(data_items)
temp = data_items['temp']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
print
print ('Mach = ', mach)
print ('Altitude = ', altitude, alt_units)
print ('Temperature =', temp, temp_units)
print
tas = mach2tas(
mach,
temp,
altitude,
temp_units,
alt_units,
speed_units,
)
data_items['tas'] = tas
print ('TAS = ', tas, speed_units)
def tas2mach(
tas,
temp='std',
altitude='blank',
temp_units=default_temp_units,
alt_units=default_alt_units,
speed_units=default_speed_units,
):
"""
Return the mach number for a given TAS. The temperature or altitude
must also be specified. If the altitude is specified, the temperature
is assumed to be standard. If both the altitude and temperature are
specified, the altitude input is ignored.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R. The temperature defaults to std
temperature if it is not input.
If the units are not specified, the units in default_units.py are used.
Examples:
Determine the mach number for a TAS of 500 kt at a temperature of
-15 deg C:
>>> tas2mach(500, -15)
0.79859632148519943
Determine the mach number for a TAS of 500 kt at a temperature of
0 deg F:
>>> tas2mach(500, 0, temp_units = 'F')
0.80292788758764277
Determine the mach number for a TAS of 500 kt at an altitude of
10,000 ft, assuming standard temperature:
>>> tas2mach(500, altitude = 10000)
0.78328945665870209
Determine the mach number for a TAS of 400 mph at an altitude of
5000 m, assuming standard temperature:
>>> tas2mach(400, altitude = 5000, speed_units = 'mph', alt_units = 'm')
0.55787687746166581
"""
if temp == 'std':
if altitude != 'blank':
temp = SA.alt2temp(altitude, temp_units=temp_units,
alt_units=alt_units)
else:
raise ValueError ( 'At least one of the temperature or altitude must be specified.')
mach = tas / SA.temp2speed_of_sound(temp, temp_units, speed_units)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# webull: wrapper around unofficial webull APIs
# https://github.com/tedchou12/webull.git
# Copyright 2019-2021 vin8tech
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import time
import logging
import sys
from datetime import datetime
import dateutil.parser
from pandas import DataFrame
from webull import webull as wb
from webull import paper_webull
import copy
from webull.streamconn import StreamConn
from kinetick.enums import COMMON_TYPES
from kinetick.models import Contract
from kinetick.utils import utils, asynctools
# ---------------------------------------------
LOGLEVEL = os.getenv('LOGLEVEL') or logging.getLevelName(logging.INFO)
utils.create_logger('webull-client', LOGLEVEL)
# =============================================
class Webull:
# -----------------------------------------
@staticmethod
def roundClosestValid(val, res=0.01, decimals=None):
if val is None:
return None
""" round to closest resolution """
if decimals is None and "." in str(res):
decimals = len(str(res).split('.')[1])
return round(round(val / res) * res, decimals)
# -----------------------------------------
def __init__(self, paper=False):
"""Initialize a new webull object."""
self.streamConnection = StreamConn(debug_flg=LOGLEVEL.upper() == "DEBUG")
self.streamConnection.price_func = self.handleServerEvents
self.streamConnection.order_func = self.handleServerEvents
self.username = ""
self.password = ""
self.paper = paper
if not paper:
self.wb = wb()
else:
self.wb = paper_webull()
self.connected = False
self.started = False
self.time = 0
self.commission = 0
self.orderId = int(time.time()) - 1553126400 # default
self.default_account = None
# auto-construct for every contract/order
self.tickerIds = {0: "SYMBOL"}
self.contracts = {}
self.orders = {}
self.account_orders = {}
self.account_symbols_orders = {}
self.symbol_orders = {}
self._accounts = {}
self._positions = {}
self._portfolios = {}
self._contract_details = {} # multiple expiry/strike/side contracts
self.contract_details = {}
self.localSymbolExpiry = {}
# do not reconnect if disconnected by user
# only try and reconnect if disconnected by network/other issues
self._disconnected_by_user = False
# -------------------------------------
self.log = logging.getLogger('webull-client') # get logger
# -------------------------------------
# holds market data
tickDF = DataFrame({
"datetime": [0], "buy": [0], "buysize": [0],
"sell": [0], "sellsize": [0], "last": [0], "lastsize": [0]
})
tickDF.set_index('datetime', inplace=True)
self.marketData = {0: tickDF} # idx = tickerId
# holds market quote data
quoteDF = DataFrame({
"datetime": [0], "bid": [0], "bidsize": [0],
"ask": [0], "asksize": [0],
"open": [0], "high": [0], "low": [0], "close": [0],
"volume": [0], "vwap": [0], "symbol": [0]
})
quoteDF.set_index('datetime', inplace=True)
self.marketQuoteData = {0: quoteDF} # idx = tickerId
# holds orderbook data
l2DF = DataFrame(index=range(5), data={
"bid": 0, "bidsize": 0,
"ask": 0, "asksize": 0
})
# holds time of sale
# holds quote
self.marketDepthData = {0: l2DF} # idx = tickerId
# trailing stops
self.trailingStops = {}
# "tickerId" = {
# orderId: ...
# lastPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# triggerable trailing stops
self.triggerableTrailingStops = {}
# "tickerId" = {
# parentId: ...
# stopOrderId: ...
# triggerPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# holds options data
optionsDF = DataFrame({
"datetime": [0], "oi": [0], "volume": [0], "underlying": [0], "iv": [0],
"bid": [0], "bidsize": [0], "ask": [0], "asksize": [0], "last": [0], "lastsize": [0],
# opt field
"price": [0], "dividend": [0], "imp_vol": [0], "delta": [0],
"gamma": [0], "vega": [0], "theta": [0],
"last_price": [0], "last_dividend": [0], "last_imp_vol": [0], "last_delta": [0],
"last_gamma": [0], "last_vega": [0], "last_theta": [0],
"bid_price": [0], "bid_dividend": [0], "bid_imp_vol": [0], "bid_delta": [0],
"bid_gamma": [0], "bid_vega": [0], "bid_theta": [0],
"ask_price": [0], "ask_dividend": [0], "ask_imp_vol": [0], "ask_delta": [0],
"ask_gamma": [0], "ask_vega": [0], "ask_theta": [0],
})
optionsDF.set_index('datetime', inplace=True)
self.optionsData = {0: optionsDF} # idx = tickerId
# historical data contrainer
self.historicalData = {} # idx = symbol
self.utc_history = False
# register exit
atexit.register(self.disconnect)
# fire connected/disconnected callbacks/errors once per event
self.connection_tracking = {
"connected": False,
"disconnected": False,
"errors": []
}
# -----------------------------------------
def log_msg(self, title, msg):
# log handler msg
logmsg = copy.copy(msg)
if hasattr(logmsg, "contract"):
logmsg.contract = self.contractString(logmsg.contract)
self.log.info("[" + str(title).upper() + "]: %s", str(logmsg))
# -----------------------------------------
def connect(self, username='<EMAIL>', password='<PASSWORD>', stream=False):
""" login to webull """
# connect
if not self.connected:
self.log.info("[CONNECTING TO WEBULL]")
if not self.paper:
self.wb.login(username, password)
if stream:
self.streamConnect()
self.connected = True
self._disconnected_by_user = False
self.username = username
self.password = password
self.log.info("[connected to webull]")
# time.sleep(1)
self.callbacks(caller="handleConnectionOpened", msg="<connectionOpened>")
else:
raise Exception("Already connected! Please disconnect to connect again.")
# -----------------------------------------
def disconnect(self):
if self.connected and self.wb is not None:
self.log.info("[DISCONNECTING FROM WEBULL]")
self.wb.logout()
if self.streamConnection and self.started:
self.streamConnection.client_streaming_quotes.loop_stop()
self.streamConnection.client_streaming_quotes.disconnect()
self._disconnected_by_user = True
self.connected = False
self.started = False
# -----------------------------------------
def getServerTime(self):
""" get the current time on Server """
self.time = datetime.utcnow()
# -----------------------------------------
# -----------------------------------------
def getAccountDetails(self):
""" get the current user details """
self.wb.get_account()
# -----------------------------------------
@staticmethod
def contract_to_dict(contract):
"""Convert Contract object to a dict containing any non-default values."""
default = Contract()
return {field: val for field, val in vars(contract).items() if val != getattr(default, field, None)}
# -----------------------------------------
@staticmethod
def contract_to_tuple(contract):
return (contract.symbol, contract.sec_type,
contract.exchange, contract.currency, contract.expiry,
contract.strike, contract.right)
# -----------------------------------------
def registerContract(self, contract):
""" used for when callback receives a contract
that isn't found in local database """
if contract.exchange == "":
return
"""
if contract not in self.contracts.values():
contract_tuple = self.contract_to_tuple(contract)
self.createContract(contract_tuple)
if self.tickerId(contract) not in self.contracts.keys():
contract_tuple = self.contract_to_tuple(contract)
self.createContract(contract_tuple)
"""
if self.getConId(contract) == 0:
contract_tuple = self.contract_to_tuple(contract)
self.createContract(contract_tuple)
# -----------------------------------------
# Start event handlers
# -----------------------------------------
def handleErrorEvents(self, msg):
""" logs error messages """
self.log.error("[#%s] %s" % (msg['errorCode'], msg['errorMsg']))
self.callbacks(caller="handleError", msg=msg)
# -----------------------------------------
def handleServerEvents(self, topic, data, msg=None):
if isinstance(topic, str):
if topic == "error":
self.handleErrorEvents(msg)
elif topic == "CONNECTION_CLOSED":
self.handleConnectionClosed(msg)
elif topic['type'] in [105, 106, 102]:
tickdata = {'tickerId': topic['tickerId'], 'data': data}
# mktdata = self.wb.get_quote(tId=topic['tickerId'])
self.log.debug('MSG %s', msg)
""" dispatch msg to the right handler """
if topic['type'] == 105:
self.handleTickPrice(msg=tickdata)
# self.handleTickSize(msg=tickdata)
# self.handleTickString(msg=tickdata)
elif topic['type'] == 106:
self.handleMarketDepth(msg=tickdata)
elif topic['type'] == 'ohlc':
self.handleHistoricalData(msg=data, tickerId=topic['tickerId'], completed=topic['completed'])
elif topic['type'] == 'quote':
quote_data = {'tickerId': data['tickerId'], 'data': data}
self.handleTickPrice(msg=quote_data)
# -----------------------------------------
# generic callback function - can be used externally
# -----------------------------------------
def callbacks(self, caller, msg, **kwargs):
pass
# -----------------------------------------
# Start admin handlers
# -----------------------------------------
def handleConnectionState(self, msg):
self.connected = not (msg.typeName == "error")
if self.connected:
self.connection_tracking["errors"] = []
self.connection_tracking["disconnected"] = False
if msg.typeName is not (self.connection_tracking["connected"]):
self.log.info("[CONNECTION TO WEBULL ESTABLISHED]")
self.connection_tracking["connected"] = True
self.callbacks(caller="handleConnectionOpened", msg="<connectionOpened>")
else:
self.connection_tracking["connected"] = False
if not self.connection_tracking["disconnected"]:
self.connection_tracking["disconnected"] = True
self.log.info("[CONNECTION TO WEBULL LOST]")
# -----------------------------------------
def handleConnectionClosed(self, msg):
self.connected = False
self.started = False
self.callbacks(caller="handleConnectionClosed", msg=msg)
# retry to connect
# if not self._disconnected_by_user:
# self.reconnect()
# -----------------------------------------
def handleContractDetails(self, msg, end=False):
""" handles contractDetails and contractDetailsEnd """
if end:
# mark as downloaded
self._contract_details[msg.reqId]['downloaded'] = True
self._contract_details[msg.reqId]['tickerId'] = msg.reqId
# move details from temp to permanent collector
self.contract_details[msg.reqId] = self._contract_details[msg.reqId]
del self._contract_details[msg.reqId]
# adjust fields if multi contract
if len(self.contract_details[msg.reqId]["contracts"]) > 1:
self.contract_details[msg.reqId]["m_contractMonth"] = ""
# m_summary should hold closest expiration
expirations = self.getExpirations(self.contracts[msg.reqId], expired=0)
contract = self.contract_details[msg.reqId]["contracts"][-len(expirations)]
self.contract_details[msg.reqId]["m_summary"] = vars(contract)
else:
self.contract_details[msg.reqId]["m_summary"] = vars(
self.contract_details[msg.reqId]["contracts"][0])
# update local db with correct contractString
for tid in self.contract_details:
oldString = self.tickerIds[tid]
newString = self.contractString(self.contract_details[tid]["contracts"][0])
if len(self.contract_details[msg.reqId]["contracts"]) > 1:
self.tickerIds[tid] = newString
if newString != oldString:
if oldString in self._portfolios:
self._portfolios[newString] = self._portfolios[oldString]
if oldString in self._positions:
self._positions[newString] = self._positions[oldString]
# fire callback
self.callbacks(caller="handleContractDetailsEnd", msg=msg)
# exit
return
# continue...
# collect data on all contract details
# (including those with multiple expiry/strike/sides)
details = vars(msg.contractDetails)
contract = details["m_summary"]
if msg.reqId in self._contract_details:
details['contracts'] = self._contract_details[msg.reqId]["contracts"]
else:
details['contracts'] = []
details['contracts'].append(contract)
details['downloaded'] = False
self._contract_details[msg.reqId] = details
# add details to local symbol list
if contract.m_localSymbol not in self.localSymbolExpiry:
self.localSymbolExpiry[contract.m_localSymbol] = details["m_contractMonth"]
# add contract's multiple expiry/strike/sides to class collectors
contractString = self.contractString(contract)
tickerId = self.tickerId(contractString)
self.contracts[tickerId] = contract
# continue if this is a "multi" contract
if tickerId == msg.reqId:
self._contract_details[msg.reqId]["m_summary"] = vars(contract)
else:
# print("+++", tickerId, contractString)
self.contract_details[tickerId] = details.copy()
self.contract_details[tickerId]["m_summary"] = vars(contract)
self.contract_details[tickerId]["contracts"] = [contract]
# fire callback
self.callbacks(caller="handleContractDetails", msg=msg)
# -----------------------------------------
# Account handling
# | |
vertex_ys = []
bounding_confidences = []
bounding_importance_fracs = []
dominant_blues = []
dominant_greens = []
dominant_reds = []
dominant_pixel_fracs = []
dominant_scores = []
label_descriptions = []
label_scores = []
nf_count = 0
nl_count = 0
for pet in test_data['PetID'].values:
try:
with open('../input/petfinder-adoption-prediction/test_metadata/' + pet + '-1.json', 'r') as f:
data = json.load(f)
vertex_x = data['cropHintsAnnotation']['cropHints'][0]['boundingPoly']['vertices'][2]['x']
vertex_xs.append(vertex_x)
vertex_y = data['cropHintsAnnotation']['cropHints'][0]['boundingPoly']['vertices'][2]['y']
vertex_ys.append(vertex_y)
bounding_confidence = data['cropHintsAnnotation']['cropHints'][0]['confidence']
bounding_confidences.append(bounding_confidence)
bounding_importance_frac = data['cropHintsAnnotation']['cropHints'][0].get('importanceFraction', -1)
bounding_importance_fracs.append(bounding_importance_frac)
dominant_blue = data['imagePropertiesAnnotation']['dominantColors']['colors'][0]['color'].get('blue',-1)
dominant_blues.append(dominant_blue)
dominant_green = data['imagePropertiesAnnotation']['dominantColors']['colors'][0]['color'].get('green',-1)
dominant_greens.append(dominant_green)
dominant_red = data['imagePropertiesAnnotation']['dominantColors']['colors'][0]['color'].get('red',-1)
dominant_reds.append(dominant_red)
dominant_pixel_frac = data['imagePropertiesAnnotation']['dominantColors']['colors'][0]['pixelFraction']
dominant_pixel_fracs.append(dominant_pixel_frac)
dominant_score = data['imagePropertiesAnnotation']['dominantColors']['colors'][0]['score']
dominant_scores.append(dominant_score)
if data.get('labelAnnotations'):
label_description = data['labelAnnotations'][0]['description']
label_descriptions.append(label_description)
label_score = data['labelAnnotations'][0]['score']
label_scores.append(label_score)
else:
nl_count += 1
label_descriptions.append('nothing')
label_scores.append(-1)
except FileNotFoundError:
nf_count += 1
vertex_xs.append(-1)
vertex_ys.append(-1)
bounding_confidences.append(-1)
bounding_importance_fracs.append(-1)
dominant_blues.append(-1)
dominant_greens.append(-1)
dominant_reds.append(-1)
dominant_pixel_fracs.append(-1)
dominant_scores.append(-1)
label_descriptions.append('nothing')
label_scores.append(-1)
print(nf_count)
test_data[ 'vertex_x'] = vertex_xs
test_data['vertex_y'] = vertex_ys
test_data['bounding_confidence'] = bounding_confidences
test_data['bounding_importance'] = bounding_importance_fracs
test_data['dominant_blue'] = dominant_blues
test_data['dominant_green'] = dominant_greens
test_data['dominant_red'] = dominant_reds
test_data['dominant_pixel_frac'] = dominant_pixel_fracs
test_data['dominant_score'] = dominant_scores
test_data['label_description'] = label_descriptions
test_data['label_score'] = label_scores
del vertex_xs,vertex_ys,bounding_confidences,bounding_importance_fracs,dominant_blues,dominant_greens,dominant_reds,dominant_pixel_fracs,dominant_scores
del label_descriptions,label_scores,doc_sent_mag,doc_sent_score
gc.collect()
train_data['label_description'] =train_data['label_description'].astype(np.str)
test_data['label_description'] =test_data['label_description'].astype(np.str)
def get_text(df):
x=""
if df['Type']==1:
x+="dog"+" "
if df['Type']==2:
x+="cat"+" "
for i in ['Breed1',"Breed2"]:
if df[i]==0:
continue
x+=breed_dict[str(df[i])]+" "
for i in ["Color1","Color2","Color3"]:
if df[i]==0:
continue
x+=color_dict[str(df[i])]+" "
x+=df['label_description']+" "
x=x+df['Description']
return x
train_data['Description']=train_data.apply(lambda x:get_text(x),1)
test_data['Description']=test_data.apply(lambda x:get_text(x),1)
text_list = train_data['Description'].values.tolist()
text_list.extend(test_data['Description'].values.tolist())
documents = text_list
texts = [[word for word in str(document).split(' ') ] for document in documents]
w2v = Word2Vec(texts, size=128, window=7, iter=8, seed=10, workers=2, min_count=3)
w2v.wv.save_word2vec_format('w2v_128.txt')
print("w2v model done")
del w2v
gc.collect()
embed_size = 128 # how big is each word vector
max_features = None # how many unique words to use (i.e num rows in embedding vector)
maxlen = 230 # max number of words in a question to use
## Tokenize the sentences
train_X = train_data["Description"].values
test_X = test_data["Description"].values
tokenizer = Tokenizer(num_words=max_features, filters='')
tokenizer.fit_on_texts(list(train_X)+list(test_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train_data['AdoptionSpeed'].values
word_index=tokenizer.word_index
features = [x for x in train_data.columns if x not in ["num_words","num_unique_words","num_stopwords","num_punctuations","mean_word_len",'label_description',"Name", 'PetID', "Description", 'AdoptionSpeed']]
cate_col=['Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'State']
onehot_col=['Type','Gender','MaturitySize','MaturitySize','FurLength','Vaccinated',
'Dewormed','Sterilized','Health','purebreed','Color1', 'Color2', 'Color3', ]
num_col=features
sc = StandardScaler()
data = pd.concat([train_data, test_data])
sc.fit(data[num_col])
del data
gc.collect()
train_data[num_col] = sc.transform(train_data[num_col])
test_data[num_col] = sc.transform(test_data[num_col])
train_num_feat = train_data[num_col]
test_num_feat = test_data[num_col]
train_img_feat.reset_index(inplace=True)
test_img_feat.reset_index(inplace=True)
train_img_feat.columns = ["PetID"]+["img_"+str(i) for i in range(train_img_feat.shape[1]-1)]
test_img_feat.columns = ["PetID"]+["img_"+str(i) for i in range(train_img_feat.shape[1]-1)]
del train_img_feat['PetID'], test_img_feat['PetID']
train_num_feat = pd.concat([train_num_feat, train_img_feat], axis=1).values
test_num_feat = pd.concat([test_num_feat, test_img_feat], axis=1).values
embedding_matrix=get_embedding_matrix(word_index)
def hybrid_model(embedding_matrix):
K.clear_session()
inp_text = Input(shape=(maxlen, ))
emb = Embedding(
input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)(inp_text)
x = SpatialDropout1D(rate=0.22)(emb)
x = Bidirectional(CuDNNLSTM(120, return_sequences=True, kernel_initializer=glorot_uniform(seed=123)))(x)
x1 = Conv1D(filters=100, kernel_size=1, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x2 = Conv1D(filters=90, kernel_size=2, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x3 = Conv1D(filters=30, kernel_size=3, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x4 = Conv1D(filters=10, kernel_size=5, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x1 = GlobalMaxPool1D()(x1)
x2 = GlobalMaxPool1D()(x2)
x3 = GlobalMaxPool1D()(x3)
x4 = GlobalMaxPool1D()(x4)
x5 = AttentionWeightedAverage()(x)
inp_num = Input(shape=(293, ))
x = concatenate([x1, x2, x3, x4, x5, inp_num])
x = Dense(200, kernel_initializer='glorot_uniform', activation=gelu)(x)
#x = PReLU()(x)
x = Dropout(0.22)(x)
x = BatchNormalization()(x)
x = Dense(200, kernel_initializer='glorot_uniform', activation=gelu)(x)
#x = PReLU()(x)
x = Dropout(0.22)(x)
x = BatchNormalization()(x)
out = Dense(1, kernel_initializer=glorot_uniform(seed=123))(x)
model = Model(inputs=[inp_text, inp_num], outputs=out)
model.compile(loss='mean_squared_error', optimizer=AdamW(weight_decay=0.02))
return model
kfold = StratifiedKFold(n_splits=5, random_state=1017, shuffle=True)
pred_oof=np.zeros((train_X.shape[0], ))
y_test = np.zeros((test_X.shape[0], ))
cv_scores = []
qwk_scores = []
all_coefficients = np.zeros((5, 4))
for i, (train_index, test_index) in enumerate(kfold.split(train_X, train_y)):
print("FOLD | {}/{}".format(i+1,5))
X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = train_X[train_index], train_X[test_index], train_num_feat[
train_index], train_num_feat[test_index], train_y[train_index], train_y[test_index]
#X_tr0 = get_keras_data(X_trall, cate_col)
#X_tr0['text']=X_tr
#X_tr0['num']=X_tr2
#X_vl0 = get_keras_data(X_vlall, cate_col)
#X_vl0['text']=X_vl
#X_vl0['num']=X_vl2
filepath="weights_best.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, min_lr=0.00001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr, earlystopping]
model = hybrid_model(embedding_matrix)
if i == 0:print(model.summary())
model.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=20, validation_data=([X_vl, X_vl2], y_vl), verbose=2, callbacks=callbacks,)
model.load_weights(filepath)
y_pred = np.squeeze(model.predict([X_vl, X_vl2], batch_size=256, verbose=2))
pred_oof[test_index] = y_pred
y_test += np.squeeze(model.predict([test_X, test_num_feat], batch_size=256, verbose=2))/5
optR = OptimizedRounder()
optR.fit(y_pred, y_vl)
len_0 = sum([1 for i in y_vl if i==0])
coefficients = optR.coefficients()
pred_test_y_k = optR.predict(y_pred, coefficients, len_0)
print("Valid Counts = ", Counter(y_vl))
print("Predicted Counts = ", Counter(pred_test_y_k))
print("Coefficients = ", coefficients)
qwk = cohen_kappa_score(y_vl, pred_test_y_k,weights='quadratic')
cv_score = rmse(y_vl, y_pred)
cv_scores.append(cv_score)
qwk_scores.append(qwk)
all_coefficients[i, :] = coefficients
print( ' cv score {}: RMSE {} QWK {}'.format(i+1, cv_score, qwk))
print("##"*40)
print('cv mean RMSE score : {}'.format( np.mean(cv_scores)))
print('cv std RMSE score : {}'.format( np.std(cv_scores)))
print('cv mean QWK score : {}'.format( np.mean(qwk_scores)))
print('cv std QWK score : {}'.format( np.std(qwk_scores)))
del train_num_feat,test_num_feat,train_X,test_X
gc.collect()
nn1_train = [r for r in pred_oof]
nn1_test = [r for r in y_test]
return nn1_train,nn1_test,embedding_matrix,train_img_feat,test_img_feat,train_data,test_data
###model 8
###nn1
nn1_train,nn1_test,embedding_matrix,train_img_feat,test_img_feat,train_data,test_data=nn1_model()
t8=time.time()
print("model8 cost:{} s".format(t8-t7))
####model 9
###nn2
def nn2_model(train,test,embedding_matrix,train_img_feat,test_img_feat):
embed_size = 128 # how big is each word vector
max_features = None # how many unique words to use (i.e num rows in embedding vector)
maxlen = 220 # max number of words in a question to use
## Tokenize the sentences
train_X = train["concat_text"].values
test_X = test["concat_text"].values
tokenizer = Tokenizer(num_words=max_features, filters='')
tokenizer.fit_on_texts(list(train_X)+list(test_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train['AdoptionSpeed'].values
word_index = tokenizer.word_index
features = [x for x in train.columns if x not in ["num_words","num_unique_words","num_stopwords","num_punctuations","mean_word_len",'Breed1',"breed","color","Breed2","State","concat_text",'label_description',"Name",'PetID',"Description",'AdoptionSpeed']]
num_col = features
sc = StandardScaler()
data = pd.concat([train, test])
sc.fit(data[num_col])
del data
gc.collect()
train[num_col] = sc.transform(train[num_col])
test[num_col] = sc.transform(test[num_col])
train_num_feat = train[num_col]
test_num_feat = test[num_col]
train_num_feat = pd.concat([train_num_feat, train_img_feat], axis=1).values
test_num_feat = pd.concat([test_num_feat, test_img_feat], axis=1).values
def hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.01):
K.clear_session()
inp_text = Input(shape=(maxlen, ))
emb = Embedding(
input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)(inp_text)
x = SpatialDropout1D(rate=sp, seed=1024)(emb)
x = Bidirectional(CuDNNLSTM(128, return_sequences=True, kernel_initializer=glorot_uniform(seed=123),
recurrent_initializer=orthogonal(gain=1.0, seed=10000)))(x)
#xx = Bidirectional(CuDNNGRU(60, return_sequences=False, kernel_initializer=glorot_uniform(seed=123)))(x)
#x1 = Conv1D(filters=filters[0], kernel_size=1, kernel_initializer=glorot_uniform(seed=123),
# padding='same', activation='relu')(x)
c = Conv1D(filters=filters[1], kernel_size=2, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
#x3 = Conv1D(filters=filters[2], kernel_size=3, kernel_initializer=glorot_uniform(seed=123),
# padding='same', activation='relu')(x)
#x4 = Conv1D(filters=10, kernel_size=5, kernel_initializer=glorot_uniform(seed=123),
# padding='same', activation='relu')(x)
#x1 = GlobalMaxPool1D()(x1)
x2 = GlobalMaxPool1D()(c)
x3 = GlobalAvgPool1D()(c)
#x3 = GlobalMaxPool1D()(x3)
#x4 = GlobalMaxPool1D()(x4)
x5 = AttentionWeightedAverage()(x)
inp_num = Input(shape=(test_num_feat.shape[1], ))
x = concatenate([x2, x3, x5, inp_num])
x = Dense(200, kernel_initializer=glorot_uniform(seed=123), activation=gelu
)(x)
#x = PReLU()(x)
x = Dropout(0.23, seed=1024)(x)
#x = BatchNormalization()(x)
#x = Dense(200, kernel_initializer=glorot_uniform(seed=123), activation=gelu)(x)
#x = PReLU()(x)
#x = Dropout(0.23, seed=1024)(x)
#x = BatchNormalization()(x)
out = Dense(1, kernel_initializer=glorot_uniform(seed=123))(x)
model = Model(inputs=[inp_text, inp_num], outputs=out)
model.compile(loss='mean_squared_error', optimizer=AdamW(weight_decay=weight_decay))
#model.compile(loss='mean_squared_error', optimizer='rmsprop')
return model
kfold = StratifiedKFold(n_splits=5, random_state=1017, shuffle=True)
pred_oof=np.zeros((train_X.shape[0], ))
y_test = np.zeros((test_X.shape[0], ))
cv_scores = []
qwk_scores = []
all_coefficients = np.zeros((5, 4))
for i, (train_index, test_index) in enumerate(kfold.split(train_X, train_y)):
print("FOLD | {}/{}".format(i+1,5))
X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = train_X[train_index], train_X[test_index], train_num_feat[
train_index], train_num_feat[test_index], train_y[train_index], train_y[test_index]
filepath="weights_best.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, min_lr=0.00001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr, earlystopping]
if i == 0:
model = hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.04)
elif i == 1:
model = hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.04)
elif i == 2:
model = hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.04)
elif i == 3:
model = hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.04)
elif i == 4:
model = hybrid_model(embedding_matrix=embedding_matrix, sp=0.22, filters=[96, 100, 30], weight_decay=0.04)
if i == 0:print(model.summary())
model.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=20, validation_data=([X_vl, X_vl2], y_vl), verbose=2, callbacks=callbacks,)
model.load_weights(filepath)
y_pred = np.squeeze(model.predict([X_vl, X_vl2], batch_size=256, verbose=2))
pred_oof[test_index] = y_pred
y_test += np.squeeze(model.predict([test_X, test_num_feat], batch_size=256, verbose=2))/5
optR = OptimizedRounder()
optR.fit(y_pred, y_vl)
len_0 = sum([1 for i in y_vl if i==0])
coefficients = optR.coefficients()
pred_test_y_k = optR.predict(y_pred, coefficients, len_0)
print("Valid Counts = ", Counter(y_vl))
print("Predicted Counts = ", Counter(pred_test_y_k))
print("Coefficients = ", coefficients)
qwk = | |
['AIR_WAYBILL', 'CERTIFICATE_OF_ORIGIN', 'COMMERCIAL_INVOICE', 'NAFTA_CERTIFICATE_OF_ORIGIN', 'PRO_FORMA_INVOICE']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on EnterpriseDocumentType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_RequirementType(self, value):
result = True
# Validate type RequirementType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['OPTIONAL', 'PROHIBITED', 'REQUIRED']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on RequirementType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Type is not None or
self.MinimumCopiesRequired is not None or
self.Letterhead is not None or
self.ElectronicSignature is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentGenerationDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DocumentGenerationDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'DocumentGenerationDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DocumentGenerationDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DocumentGenerationDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DocumentGenerationDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentGenerationDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Type is not None:
namespaceprefix_ = self.Type_nsprefix_ + ':' if (UseCapturedNS_ and self.Type_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sType>%s</%sType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Type), input_name='Type')), namespaceprefix_ , eol_))
if self.MinimumCopiesRequired is not None:
namespaceprefix_ = self.MinimumCopiesRequired_nsprefix_ + ':' if (UseCapturedNS_ and self.MinimumCopiesRequired_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMinimumCopiesRequired>%s</%sMinimumCopiesRequired>%s' % (namespaceprefix_ , self.gds_format_integer(self.MinimumCopiesRequired, input_name='MinimumCopiesRequired'), namespaceprefix_ , eol_))
if self.Letterhead is not None:
namespaceprefix_ = self.Letterhead_nsprefix_ + ':' if (UseCapturedNS_ and self.Letterhead_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLetterhead>%s</%sLetterhead>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Letterhead), input_name='Letterhead')), namespaceprefix_ , eol_))
if self.ElectronicSignature is not None:
namespaceprefix_ = self.ElectronicSignature_nsprefix_ + ':' if (UseCapturedNS_ and self.ElectronicSignature_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sElectronicSignature>%s</%sElectronicSignature>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ElectronicSignature), input_name='ElectronicSignature')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Type':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Type')
value_ = self.gds_validate_string(value_, node, 'Type')
self.Type = value_
self.Type_nsprefix_ = child_.prefix
# validate type EnterpriseDocumentType
self.validate_EnterpriseDocumentType(self.Type)
elif nodeName_ == 'MinimumCopiesRequired' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MinimumCopiesRequired')
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MinimumCopiesRequired')
self.MinimumCopiesRequired = ival_
self.MinimumCopiesRequired_nsprefix_ = child_.prefix
elif nodeName_ == 'Letterhead':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Letterhead')
value_ = self.gds_validate_string(value_, node, 'Letterhead')
self.Letterhead = value_
self.Letterhead_nsprefix_ = child_.prefix
# validate type RequirementType
self.validate_RequirementType(self.Letterhead)
elif nodeName_ == 'ElectronicSignature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ElectronicSignature')
value_ = self.gds_validate_string(value_, node, 'ElectronicSignature')
self.ElectronicSignature = value_
self.ElectronicSignature_nsprefix_ = child_.prefix
# validate type RequirementType
self.validate_RequirementType(self.ElectronicSignature)
# end class DocumentGenerationDetail
class DocumentRequirementsDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, RequiredDocuments=None, GenerationDetails=None, ProhibitedDocuments=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if RequiredDocuments is None:
self.RequiredDocuments = []
else:
self.RequiredDocuments = RequiredDocuments
self.RequiredDocuments_nsprefix_ = None
if GenerationDetails is None:
self.GenerationDetails = []
else:
self.GenerationDetails = GenerationDetails
self.GenerationDetails_nsprefix_ = None
if ProhibitedDocuments is None:
self.ProhibitedDocuments = []
else:
self.ProhibitedDocuments = ProhibitedDocuments
self.ProhibitedDocuments_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DocumentRequirementsDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DocumentRequirementsDetail.subclass:
return DocumentRequirementsDetail.subclass(*args_, **kwargs_)
else:
return DocumentRequirementsDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_RequiredDocuments(self):
return self.RequiredDocuments
def set_RequiredDocuments(self, RequiredDocuments):
self.RequiredDocuments = RequiredDocuments
def add_RequiredDocuments(self, value):
self.RequiredDocuments.append(value)
def insert_RequiredDocuments_at(self, index, value):
self.RequiredDocuments.insert(index, value)
def replace_RequiredDocuments_at(self, index, value):
self.RequiredDocuments[index] = value
def get_GenerationDetails(self):
return self.GenerationDetails
def set_GenerationDetails(self, GenerationDetails):
self.GenerationDetails = GenerationDetails
def add_GenerationDetails(self, value):
self.GenerationDetails.append(value)
def insert_GenerationDetails_at(self, index, value):
self.GenerationDetails.insert(index, value)
def replace_GenerationDetails_at(self, index, value):
self.GenerationDetails[index] = value
def get_ProhibitedDocuments(self):
return self.ProhibitedDocuments
def set_ProhibitedDocuments(self, ProhibitedDocuments):
self.ProhibitedDocuments = ProhibitedDocuments
def add_ProhibitedDocuments(self, value):
self.ProhibitedDocuments.append(value)
def insert_ProhibitedDocuments_at(self, index, value):
self.ProhibitedDocuments.insert(index, value)
def replace_ProhibitedDocuments_at(self, index, value):
self.ProhibitedDocuments[index] = value
def validate_RequiredDocumentType(self, value):
result = True
# Validate type RequiredDocumentType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['AIR_WAYBILL', 'CERTIFICATE_OF_ORIGIN', 'COMMERCIAL_INVOICE', 'COMMERCIAL_OR_PRO_FORMA_INVOICE', 'NAFTA_CERTIFICATE_OF_ORIGIN', 'PRO_FORMA_INVOICE']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on RequiredDocumentType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_EnterpriseDocumentType(self, value):
result = True
# Validate type EnterpriseDocumentType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['AIR_WAYBILL', 'CERTIFICATE_OF_ORIGIN', 'COMMERCIAL_INVOICE', 'NAFTA_CERTIFICATE_OF_ORIGIN', 'PRO_FORMA_INVOICE']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on EnterpriseDocumentType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.RequiredDocuments or
self.GenerationDetails or
self.ProhibitedDocuments
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentRequirementsDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DocumentRequirementsDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'DocumentRequirementsDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DocumentRequirementsDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DocumentRequirementsDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DocumentRequirementsDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentRequirementsDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for RequiredDocuments_ in self.RequiredDocuments:
namespaceprefix_ = self.RequiredDocuments_nsprefix_ + ':' if (UseCapturedNS_ and self.RequiredDocuments_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sRequiredDocuments>%s</%sRequiredDocuments>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(RequiredDocuments_), input_name='RequiredDocuments')), namespaceprefix_ , eol_))
for GenerationDetails_ in self.GenerationDetails:
namespaceprefix_ = self.GenerationDetails_nsprefix_ + ':' if (UseCapturedNS_ and self.GenerationDetails_nsprefix_) else ''
GenerationDetails_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='GenerationDetails', pretty_print=pretty_print)
for ProhibitedDocuments_ in self.ProhibitedDocuments:
namespaceprefix_ = self.ProhibitedDocuments_nsprefix_ + ':' if (UseCapturedNS_ and self.ProhibitedDocuments_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sProhibitedDocuments>%s</%sProhibitedDocuments>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(ProhibitedDocuments_), input_name='ProhibitedDocuments')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'RequiredDocuments':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'RequiredDocuments')
value_ = self.gds_validate_string(value_, node, 'RequiredDocuments')
self.RequiredDocuments.append(value_)
self.RequiredDocuments_nsprefix_ = child_.prefix
# validate type RequiredDocumentType
self.validate_RequiredDocumentType(self.RequiredDocuments[-1])
elif nodeName_ == 'GenerationDetails':
obj_ = DocumentGenerationDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.GenerationDetails.append(obj_)
obj_.original_tagname_ = 'GenerationDetails'
elif nodeName_ == 'ProhibitedDocuments':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProhibitedDocuments')
value_ = self.gds_validate_string(value_, node, 'ProhibitedDocuments')
self.ProhibitedDocuments.append(value_)
self.ProhibitedDocuments_nsprefix_ = child_.prefix
# validate type EnterpriseDocumentType
self.validate_EnterpriseDocumentType(self.ProhibitedDocuments[-1])
# end class DocumentRequirementsDetail
class ImageUploadStatusDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass | |
GMT 2020'),
('2020-01-01 10:00:00', 'DATE', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATE', 'blob', b'Wed Jan 01 10:00:00 GMT 2020'),
# Time
('2020-01-01 10:00:00', 'TIME', 'date', datetime.date(2020, 1, 1)),
('2020-01-01 10:00:00', 'TIME', 'datetime', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'TIME', 'timestamp', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'TIME', 'char(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'TIME', 'varchar(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'TIME', 'binary(50)', b'Wed Jan 01 10:00:00 GMT 2020\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
('2020-01-01 10:00:00', 'TIME', 'varbinary(50)', b'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'TIME', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'TIME', 'blob', b'Wed Jan 01 10:00:00 GMT 2020'),
# DateTime
('2020-01-01 10:00:00', 'DATETIME', 'date', datetime.date(2020, 1, 1)),
('2020-01-01 10:00:00', 'DATETIME', 'datetime', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'DATETIME', 'timestamp', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'DATETIME', 'char(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATETIME', 'varchar(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATETIME', 'binary(50)', b'Wed Jan 01 10:00:00 GMT 2020\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
('2020-01-01 10:00:00', 'DATETIME', 'varbinary(50)', b'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATETIME', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATETIME', 'blob', b'Wed Jan 01 10:00:00 GMT 2020'),
# Zoned DateTime
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'char(50)', '2020-01-01T10:00Z'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'varchar(50)', '2020-01-01T10:00Z'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'binary(20)', b'2020-01-01T10:00Z\x00\x00\x00'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'varbinary(50)', b'2020-01-01T10:00Z'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'text', '2020-01-01T10:00Z'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'blob', b'2020-01-01T10:00Z'),
# String
('120', 'STRING', 'tinyint', 120),
('120', 'STRING', 'tinyint unsigned', 120),
('120', 'STRING', 'smallint', 120),
('120', 'STRING', 'smallint unsigned', 120),
('120', 'STRING', 'mediumint', 120),
('120', 'STRING', 'mediumint unsigned', 120),
('120', 'STRING', 'int', 120),
('120', 'STRING', 'int unsigned', 120),
('120', 'STRING', 'bigint', 120),
('120', 'STRING', 'bigint unsigned', 120),
('120.0', 'STRING', 'decimal(5,2)', 120.0),
('120.0', 'STRING', 'numeric(5,2)', 120.0),
('120.0', 'STRING', 'float', 120.0),
('120.0', 'STRING', 'double', 120.0),
('1998-01-01', 'STRING', 'date', datetime.date(1998, 1, 1)),
('1998-01-01 06:11:22', 'STRING', 'datetime', datetime.datetime(1998, 1, 1, 6, 11, 22)),
('1998-01-01 06:11:22', 'STRING', 'timestamp', datetime.datetime(1998, 1, 1, 6, 11, 22)),
('06:11:22', 'STRING', 'time', datetime.timedelta(0, 22282)),
('string', 'STRING', 'char(6)', 'string'),
('string', 'STRING', 'varchar(6)', 'string'),
('string', 'STRING', 'binary(6)', b'string'),
('string', 'STRING', 'varbinary(6)', b'string'),
('string', 'STRING', 'text', 'string'),
('string', 'STRING', 'blob', b'string'),
('a', 'STRING', "enum('a', 'b')", 'a'),
('a', 'STRING', "set('a', 'b')", 'a'),
# Byte array
('string', 'BYTE_ARRAY', 'blob', b'string'),
]
@database('mysql')
@pytest.mark.parametrize('input,converter_type,database_type,expected', DATA_TYPES_MYSQL, ids=[f"{i[1]}-{i[2]}" for i in DATA_TYPES_MYSQL])
def test_data_types_mysql(sdc_builder, sdc_executor, input, converter_type, database_type, expected, database, keep_data):
if isinstance(database, MemSqlDatabase):
pytest.skip("Standard Tests are currently only written for MySQL and not for MemSQL (sadly STF threads both DBs the same way)")
_test_data_types(sdc_builder, sdc_executor, input, converter_type, database_type, expected, database, keep_data)
DATA_TYPES_POSTGRESQL = [
# Boolean
('true', 'BOOLEAN', 'char(4)', 'true'),
('true', 'BOOLEAN', 'int', 1),
('true', 'BOOLEAN', 'boolean', True),
# Byte
('65', 'BYTE', 'char(2)', '65'),
# Char
('a', 'CHAR', 'char(1)', 'a'),
('a', 'CHAR', 'varchar(1)', 'a'),
('a', 'CHAR', 'text', 'a'),
# Short
(120, 'SHORT', 'smallint', 120),
(120, 'SHORT', 'integer', 120),
(120, 'SHORT', 'bigint', 120),
(120, 'SHORT', 'decimal(5,2)', 120),
(120, 'SHORT', 'numeric(5,2)', 120),
(120, 'SHORT', 'real', 120),
(120, 'SHORT', 'double precision', 120),
(120, 'SHORT', 'char(3)', '120'),
(120, 'SHORT', 'varchar(3)', '120'),
(120, 'SHORT', 'text', '120'),
# Integer
(120, 'INTEGER', 'smallint', 120),
(120, 'INTEGER', 'integer', 120),
(120, 'INTEGER', 'bigint', 120),
(120, 'INTEGER', 'decimal(5,2)', 120),
(120, 'INTEGER', 'numeric(5,2)', 120),
(120, 'INTEGER', 'real', 120),
(120, 'INTEGER', 'double precision', 120),
(120, 'INTEGER', 'char(3)', '120'),
(120, 'INTEGER', 'varchar(3)', '120'),
(120, 'INTEGER', 'text', '120'),
# Long
(120, 'LONG', 'smallint', 120),
(120, 'LONG', 'integer', 120),
(120, 'LONG', 'bigint', 120),
(120, 'LONG', 'decimal(5,2)', 120),
(120, 'LONG', 'numeric(5,2)', 120),
(120, 'LONG', 'real', 120),
(120, 'LONG', 'double precision', 120),
(120, 'LONG', 'char(3)', '120'),
(120, 'LONG', 'varchar(3)', '120'),
(120, 'LONG', 'text', '120'),
# Float
(120.0, 'FLOAT', 'decimal(5,2)', 120.0),
(120.0, 'FLOAT', 'numeric(5,2)', 120.0),
(120.0, 'FLOAT', 'real', 120.0),
(120.0, 'FLOAT', 'double precision', 120.0),
(120.0, 'FLOAT', 'char(5)', '120.0'),
(120.0, 'FLOAT', 'varchar(5)', '120.0'),
(120.0, 'FLOAT', 'text', '120.0'),
# Double
(120.0, 'DOUBLE', 'decimal(5,2)', 120.0),
(120.0, 'DOUBLE', 'numeric(5,2)', 120.0),
(120.0, 'DOUBLE', 'real', 120.0),
(120.0, 'DOUBLE', 'double precision', 120.0),
(120.0, 'DOUBLE', 'char(5)', '120.0'),
(120.0, 'DOUBLE', 'varchar(5)', '120.0'),
(120.0, 'DOUBLE', 'text', '120.0'),
# Decimal
(120, 'DECIMAL', 'smallint', 120),
(120, 'DECIMAL', 'integer', 120),
(120, 'DECIMAL', 'bigint', 120),
(120, 'DECIMAL', 'decimal(5,2)', 120.0),
(120, 'DECIMAL', 'numeric(5,2)', 120.0),
(120, 'DECIMAL', 'real', 120.0),
(120, 'DECIMAL', 'double precision', 120.0),
(120, 'DECIMAL', 'char(6)', '120.00'),
(120, 'DECIMAL', 'varchar(6)', '120.00'),
(120, 'DECIMAL', 'text', '120.00'),
# Date
('2020-01-01 10:00:00', 'DATE', 'date', datetime.date(2020, 1, 1)),
('2020-01-01 10:00:00', 'DATE', 'timestamp', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'DATE', 'timestamp with time zone', datetime.datetime(2020, 1, 1, 10, 0, tzinfo=datetime.timezone.utc)),
('2020-01-01 10:00:00', 'DATE', 'char(30)', 'Wed Jan 01 10:00:00 GMT 2020 '),
('2020-01-01 10:00:00', 'DATE', 'varchar(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATE', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
# Time
('2020-01-01 10:00:00', 'TIME', 'time', datetime.time(10, 0)),
('2020-01-01 10:00:00', 'TIME', 'char(30)', 'Wed Jan 01 10:00:00 GMT 2020 '),
('2020-01-01 10:00:00', 'TIME', 'varchar(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'TIME', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
# DateTime
('2020-01-01 10:00:00', 'DATETIME', 'date', datetime.date(2020, 1, 1)),
('2020-01-01 10:00:00', 'DATETIME', 'timestamp', datetime.datetime(2020, 1, 1, 10, 0)),
('2020-01-01 10:00:00', 'DATETIME', 'timestamp with time zone', datetime.datetime(2020, 1, 1, 10, 0, tzinfo=datetime.timezone.utc)),
('2020-01-01 10:00:00', 'DATETIME', 'char(30)', 'Wed Jan 01 10:00:00 GMT 2020 '),
('2020-01-01 10:00:00', 'DATETIME', 'varchar(50)', 'Wed Jan 01 10:00:00 GMT 2020'),
('2020-01-01 10:00:00', 'DATETIME', 'text', 'Wed Jan 01 10:00:00 GMT 2020'),
# Zoned DateTime
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'char(25)', '2020-01-01 10:00:00+00 '),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'varchar(25)', '2020-01-01 10:00:00+00'),
('2020-01-01T10:00:00+00:00', 'ZONED_DATETIME', 'text', '2020-01-01 10:00:00+00'),
("2020-01-01T10:00:00+00:00", 'ZONED_DATETIME', 'timestamp with time zone', datetime.datetime(2020, 1, 1, 10, 0, tzinfo=datetime.timezone.utc)),
# String
('120', 'STRING', 'smallint', 120),
('120', 'STRING', 'integer', 120),
('120', 'STRING', 'bigint', 120),
('120', 'STRING', 'decimal(5,2)', 120.0),
('120', 'STRING', 'numeric(5,2)', 120.0),
('120', 'STRING', 'real', 120.0),
('120', 'STRING', 'double precision', 120.0),
('120', 'STRING', 'char(5)', '120 '),
('120', 'STRING', 'varchar(5)', '120'),
('120', 'STRING', 'text', '120'),
('2003-04-12 04:05:06', 'STRING', 'timestamp', datetime.datetime(2003, 4, 12, 4, 5, 6)),
('2020-01-01', 'STRING', 'date', datetime.date(2020, 1, 1)),
('10:00:00', 'STRING', 'time', datetime.time(10, 0)),
('true', 'STRING', 'boolean', True),
('{"a": "b"}', 'STRING', 'json', {'a': 'b'}),
('{"a": "b"}', 'STRING', 'jsonb', {'a': 'b'}),
# Byte array
('string', 'BYTE_ARRAY', 'bytea', b'string'),
]
@database('postgresql')
@pytest.mark.parametrize('input,converter_type,database_type,expected', DATA_TYPES_POSTGRESQL, ids=[f"{i[1]}-{i[2]}" for i in DATA_TYPES_POSTGRESQL])
def test_data_types_postgresql(sdc_builder, sdc_executor, input, converter_type, database_type, expected, database, keep_data):
_test_data_types(sdc_builder, sdc_executor, input, converter_type, database_type, expected, database, keep_data)
DATA_TYPES_SQLSERVER = [
# Boolean
('true', 'BOOLEAN', 'char(4)', '1 '),
('true', 'BOOLEAN', 'int', 1),
# Byte
('65', 'BYTE', 'char(2)', '65'),
# Char
('a', 'CHAR', 'char(1)', 'a'),
('a', 'CHAR', 'varchar(1)', 'a'),
('a', 'CHAR', 'nchar(1)', 'a'),
('a', 'CHAR', 'nvarchar(1)', 'a'),
('a', 'CHAR', 'text', 'a'),
('a', 'CHAR', 'ntext', 'a'),
# Short
(120, 'SHORT', 'tinyint', 120),
(120, 'SHORT', 'smallint', 120),
(120, 'SHORT', 'int', 120),
(120, 'SHORT', 'bigint', 120),
(120, 'SHORT', 'decimal(5,2)', 120),
(120, 'SHORT', 'numeric(5,2)', 120),
(120, 'SHORT', 'real', 120),
(120, 'SHORT', 'float', 120),
(120, 'SHORT', 'money', 120),
(120, 'SHORT', 'smallmoney', 120),
(120, 'SHORT', 'char(3)', '120'),
(120, 'SHORT', 'varchar(3)', '120'),
(120, 'SHORT', 'nchar(3)', '120'),
(120, 'SHORT', 'nvarchar(3)', '120'),
(120, 'SHORT', 'text', '120'),
(120, 'SHORT', 'ntext', '120'),
# Integer
(120, 'INTEGER', 'tinyint', 120),
(120, 'INTEGER', 'smallint', 120),
(120, 'INTEGER', 'int', 120),
(120, 'INTEGER', 'bigint', 120),
(120, 'INTEGER', 'decimal(5,2)', 120),
(120, 'INTEGER', 'numeric(5,2)', 120),
(120, 'INTEGER', 'real', 120),
(120, 'INTEGER', 'float', 120),
(120, 'INTEGER', 'money', 120),
(120, 'INTEGER', 'smallmoney', 120),
(120, 'INTEGER', 'char(3)', '120'),
(120, 'INTEGER', 'varchar(3)', '120'),
(120, 'INTEGER', 'nchar(3)', '120'),
(120, 'INTEGER', 'nvarchar(3)', '120'),
(120, 'INTEGER', 'text', '120'),
(120, 'INTEGER', 'ntext', '120'),
# Long
(120, 'LONG', 'tinyint', 120),
(120, 'LONG', 'smallint', 120),
(120, 'LONG', 'int', 120),
(120, 'LONG', 'bigint', 120),
(120, 'LONG', 'decimal(5,2)', 120),
(120, 'LONG', 'numeric(5,2)', 120),
(120, 'LONG', 'real', 120),
(120, 'LONG', 'float', 120),
(120, 'LONG', 'money', 120),
(120, 'LONG', 'smallmoney', 120),
(120, 'LONG', 'char(3)', '120'),
(120, 'LONG', 'varchar(3)', '120'),
(120, 'LONG', 'nchar(3)', '120'),
(120, 'LONG', 'nvarchar(3)', '120'),
(120, 'LONG', 'text', '120'),
(120, 'LONG', 'ntext', '120'),
# Float
(120.0, 'FLOAT', 'decimal(5,2)', 120.0),
(120.0, 'FLOAT', 'numeric(5,2)', 120.0),
(120.0, 'FLOAT', 'real', 120.0),
(120.0, 'FLOAT', 'float', 120.0),
(120.0, 'FLOAT', 'money', 120),
(120.0, 'FLOAT', 'smallmoney', 120),
(120.0, 'FLOAT', 'char(5)', '120 '),
(120.0, 'FLOAT', 'varchar(5)', '120'),
(120.0, 'FLOAT', 'nchar(5)', '120 '),
(120.0, 'FLOAT', 'nvarchar(5)', '120'),
(120.0, 'FLOAT', 'text', '120'),
(120.0, 'FLOAT', 'ntext', '120'),
# Double
(120.0, 'DOUBLE', 'decimal(5,2)', 120.0),
(120.0, 'DOUBLE', | |
<filename>jobman/runner.py
import os
import sys
import time
import tempfile
import inspect
import shutil
import optparse
from optparse import OptionParser
from .tools import DD, expand, format_help, resolve, UsageError
from .channel import StandardChannel
from . import tools
from . import workdirgen
################################################################################
# Running
################################################################################
def parse_and_run(command, arguments):
if command == None:
# allow other parameter for help used in other program
for arg in arguments:
if arg in ["--help", "-h"]:
command = "help"
arguments = []
parser, runner = runner_registry.get(command, (None, None))
if not runner:
raise UsageError('Unknown runner: "%s"' % command)
if parser:
options, arguments = parser.parse_args(arguments)
else:
options = optparse.Values()
return run(runner, [options] + arguments)
def run(runner, arguments):
argspec = inspect.getargspec(runner)
minargs = len(argspec[0])
if argspec[3]:
minargs -= len(argspec[3])
maxargs = len(argspec[0])
if minargs > len(arguments) or maxargs < len(arguments) and not argspec[1]:
s = format_help(runner)
raise UsageError(s)
return runner(*arguments)
def run_cmdline():
try:
if len(sys.argv) <= 1:
raise UsageError(
'Usage: "%s <command> [<arguments>*]" \nor "%s help" for help'
% (sys.argv[0], sys.argv[0]))
cmd = None
args = []
for arg in sys.argv[1:]:
if cmd is not None or arg.startswith('-'):
args.append(arg)
else:
cmd = arg
warn_if_sql_failure()
return parse_and_run(cmd, args)
except UsageError as e:
print('Usage error:')
print(e)
def warn_if_sql_failure():
"""Display a warning if sqlalchemy or psycopg2 could not be imported.
This warning is not displayed if the user is running the 'cmdline' command,
which does not require SQL features.
"""
if len(sys.argv) >= 2 and sys.argv[1] == 'cmdline':
return
from jobman import sql
for module in ('sqlalchemy',): # , 'psycopg2'):
if not getattr(sql, '%s_ok' % module):
# Note: we use `RuntimeWarning` instead of `ImportWarning` because
# the latter are ignored by default, and we do not want it to be
# ignored.
print("WARNING: SQL-related module '%s' could not be imported: SQL"
" features will most likely crash" % module)
################################################################################
# Registry
################################################################################
runner_registry = dict()
################################################################################
# Default runners
################################################################################
################################################################################
# cmdline
################################################################################
parser_cmdline = OptionParser(
usage='%prog cmdline [options] <experiment> <parameters>',
add_help_option=False)
parser_cmdline.add_option('-f', '--force', action='store_true',
dest='force', default=False,
help='force running the experiment even if it is already running or completed')
parser_cmdline.add_option('--redirect-stdout', action='store_true',
dest='redirect_stdout', default=False,
help='redirect stdout to the workdir/stdout file')
parser_cmdline.add_option('--redirect-stderr', action='store_true',
dest='redirect_stderr', default=False,
help='redirect stderr to the workdir/stdout file')
parser_cmdline.add_option('-r', '--redirect', action='store_true',
dest='redirect', default=False,
help='redirect stdout and stderr to the workdir/stdout and workdir/stderr files')
parser_cmdline.add_option('-w', '--workdir', action='store',
dest='workdir', default=None,
help='the working directory in which to run the experiment')
parser_cmdline.add_option('--workdir-dir', action='store',
dest='workdir_dir', default=None,
help='The directory where the workdir should be created')
parser_cmdline.add_option('-g', '--workdir-gen', action='store',
dest='workdir_gen', default='date',
help='function serving to generate the relative path of the workdir')
parser_cmdline.add_option('-n', '--dry-run', action='store_true',
dest='dry_run', default=False,
help='use this option to run the whole experiment in a temporary working directory (cleaned after use)')
parser_cmdline.add_option('-2', '--sigint', action='store_true',
dest='allow_sigint', default=False,
help='allow sigint (CTRL-C) to interrupt a process')
parser_cmdline.add_option('-p', '--parser', action='store',
dest='parser', default='filemerge',
help='parser to use for the argument list provided on the command line (takes a list of strings, returns a state)')
parser_cmdline.add_option('--finish-up-after', action='store',
dest='finish_up_after',
default=None,
help='Duration (in seconds) after which the call to channel.switch() will return "finish-up". Asks the experiment to reach the next checkpoint, save, and exit. It is up to the experimentto use channel.switch() and respect it.')
parser_cmdline.add_option('--save-every', action='store',
dest='save_every',
default=None,
help='Interval (in seconds) after which the call to channel.switch() will return "save". Asks the experiment to save at the next checkpoint. It is up to the experiment use channel.switch() and respect it.')
def runner_cmdline(options, experiment, *strings):
"""
Start an experiment with parameters given on the command line.
Usage: cmdline [options] <experiment> <parameters>
Run an experiment with parameters provided on the command
line. See the help topics for experiment and parameters for
syntax information.
Example use:
jobman cmdline mymodule.my_experiment \\
stopper::pylearn.stopper.nsteps \\ # use pylearn.stopper.nsteps
stopper.n=10000 \\ # the argument "n" of nsteps is 10000
lr=0.03
you can use the jobman.experiments.example1 as a working
mymodule.my_experiment
"""
parser = getattr(tools, options.parser, None) or resolve(options.parser)
_state = parser(*strings)
state = expand(_state)
state.setdefault('jobman', DD()).experiment = experiment
state.jobman.time = time.ctime()
experiment = resolve(experiment)
if options.workdir and options.dry_run:
raise UsageError('Please use only one of: --workdir, --dry-run.')
if options.workdir and options.workdir_dir:
raise UsageError('Please use only one of: --workdir, --workdir_dir.')
if options.workdir:
workdir = options.workdir
elif options.dry_run or options.workdir_dir:
if options.workdir_dir and not os.path.exists(options.workdir_dir):
os.mkdir(options.workdir_dir)
workdir = tempfile.mkdtemp(dir=options.workdir_dir)
else:
workdir_gen = getattr(workdirgen, options.workdir_gen,
None) or resolve(options.workdir_gen)
workdir = workdir_gen(state)
print("The working directory is:", os.path.join(os.getcwd(), workdir))
channel = StandardChannel(workdir,
experiment, state,
redirect_stdout=options.redirect or options.redirect_stdout,
redirect_stderr=options.redirect or options.redirect_stderr,
finish_up_after=options.finish_up_after or None,
save_interval=options.save_every or None
)
channel.catch_sigint = not options.allow_sigint
channel.run(force=options.force)
if options.dry_run:
shutil.rmtree(workdir, ignore_errors=True)
runner_registry['cmdline'] = (parser_cmdline, runner_cmdline)
# ################################################################################
# ### filemerge
# ################################################################################
# parser_filemerge = OptionParser(usage = '%prog filemerge [options] <experiment> <file> <file2> ...', add_help_option=False)
# parser_filemerge.add_option('-f', '--force', action = 'store_true', dest = 'force', default = False,
# help = 'force running the experiment even if it is already running or completed')
# parser_filemerge.add_option('--redirect-stdout', action = 'store_true', dest = 'redirect_stdout', default = False,
# help = 'redirect stdout to the workdir/stdout file')
# parser_filemerge.add_option('--redirect-stderr', action = 'store_true', dest = 'redirect_stderr', default = False,
# help = 'redirect stderr to the workdir/stdout file')
# parser_filemerge.add_option('-r', '--redirect', action = 'store_true', dest = 'redirect', default = False,
# help = 'redirect stdout and stderr to the workdir/stdout and workdir/stderr files')
# parser_filemerge.add_option('-w', '--workdir', action = 'store', dest = 'workdir', default = None,
# help = 'the working directory in which to run the experiment')
# parser_filemerge.add_option('-n', '--dry-run', action = 'store_true', dest = 'dry_run', default = False,
# help = 'use this option to run the whole experiment in a temporary working directory (cleaned after use)')
# def runner_filemerge(options, experiment, *files):
# """
# Start an experiment with parameters given in files.
# Usage: filemerge [options] <experiment> <file> <file2> ...
# Run an experiment with parameters provided in plain text files.
# A single experiment will be run with the union of all the
# parameters listed in the files.
# Example:
# <in file blah1.txt>
# text.first = "hello"
# text.second = "world"
# <in file blah2.txt>
# number = 12
# numbers.a = 55
# numbers.b = 56
# Given these files, the following command using filemerge:
# $ jobman filemerge mymodule.my_experiment blah1.txt blah2.txt
# is equivalent to this one using cmdline:
# $ jobman cmdline mymodule.my_experiment \\
# text.first=hello text.second=world \\
# number=12 numbers.a=55 numbers.b=56
# you can use the jobman.experiments.example1 as a working
# mymodule.my_experiment
# """
# _state = parse_files(*files)
# state = expand(_state)
# state.setdefault('jobman', DD()).experiment = experiment
# experiment = resolve(experiment)
# if options.workdir and options.dry_run:
# raise UsageError('Please use only one of: --workdir, --dry-run.')
# if options.workdir:
# workdir = options.workdir
# elif options.dry_run:
# workdir = tempfile.mkdtemp()
# else:
# workdir = format_d(state, sep=',', space = False)
# channel = StandardChannel(workdir,
# experiment, state,
# redirect_stdout = options.redirect or options.redirect_stdout,
# redirect_stderr = options.redirect or options.redirect_stderr)
# channel.run(force = options.force)
# if options.dry_run:
# shutil.rmtree(workdir, ignore_errors=True)
# runner_registry['filemerge'] = (parser_filemerge, runner_filemerge)
################################################################################
# help
################################################################################
def runner_help(options, topic=None):
"""
Get help for a topic.
Usage: help <topic>
"""
def bold(x):
return '\033[1m%s\033[0m' % x
if topic is None:
print(bold('Topics: (use help <topic> for more info)'))
print('example Example of defining and running an experiment.')
print('experiment How to define an experiment.')
print('parameters How to list the parameters for an experiment.')
print()
print(bold('Available commands: (use help <command> for more info)'))
for name, (parser, command) in sorted(runner_registry.items()):
print(name.ljust(20), format_help(command).split('\n')[0])
return
elif topic == 'experiment':
helptext = """
jobman serves to run experiments. To define an experiment, you
only have to define a function respecting the following protocol in
a python file or module:
def my_experiment(state, channel):
# experiment code goes here
The return value of my_experiment may be channel.COMPLETE or
channel.INCOMPLETE. If the latter is returned, the experiment may
be resumed at a later point. Note that the return value `None`
is interpreted as channel.COMPLETE.
If a command defined by jobman has an <experiment> parameter,
that parameter must be a string such that it could be used in a
python import statement to import the my_experiment function. For
example if you defined my_experiment in my_module.py, you can pass
'my_module.my_experiment' as the experiment parameter.
When entering my_experiment, the current working directory will be
set for you to a directory specially created for the experiment.
The location and name of that directory vary depending on which
jobman command you run. You may create logs, save files, pictures,
results, etc. in it.
state is an object containing the parameters given to the experiment.
For example, if you run the followinc command:
jobman cmdline | |
RegisterForm(request.form)
if request.method == 'POST' and form.validate():
username = form.username.data
folder = os.path.exists(app.root_path + r"\static\uploads\users\{}".format(username))
if folder == True:
flash('Folder Name Already Exists', 'warning')
return redirect(url_for('user_register'))
cur = mysql.connection.cursor()
cur.execute("SELECT username FROM users WHERE username = %s", [username])
res = cur.fetchone()
if username in str(res):
cur.close()
msg = "User Name Already Exists"
return render_template('user_register.html', form=form, msg=msg)
else:
cur.close()
first_name = form.first_name.data.lower()
last_name = form.last_name.data.lower()
email = request.form['email'].lower()
gender = request.form['gender']
country = request.form['country']
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
file = request.files['file']
# if file.filename == '':
# flash('You Have to Select a File!', 'warning')
if file and allowed_file(file.filename):
try:
rmtree(app.root_path + r"\static\uploads\users\{}".format(username))
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
except:
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
filename = secure_filename(file.filename)
dir = app.root_path + r"\static\uploads\users\{}".format(username)
file.save(os.path.join(dir, filename))
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(permission, first_name, last_name,\
email, gender, country, username, password, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
("user", first_name, last_name, email, gender,\
country, username, password, filename))
mysql.connection.commit()
cur.close()
flash('You Have Created Account successfully!', 'success')
return redirect(url_for('user_login'))
elif file.filename == '' or 'file' not in request.files:
try:
rmtree(app.root_path + r"\static\uploads\users\{}".format(username))
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
except:
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
copy(app.root_path + r'\static\admin.png', app.root_path + r'\static\uploads\users\{}\admin.png'.format(username))
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(permission, first_name, last_name,\
email, gender, country, username, password, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
("user", first_name, last_name, email, gender, \
country, username, password, '<PASSWORD>'))
mysql.connection.commit()
cur.close()
flash('You Have Created Account successfully!', 'success')
return redirect(url_for('user_login'))
return render_template('user_register.html', form=form)
# user login page
@app.route('/user_login', methods=['GET', 'POST'])
def user_login():
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM users WHERE username = BINARY %s AND permission='user'", [username])
if result > 0:
data = cur.fetchone()
password = data['password']
if sha256_crypt.verify(password_candidate, password):
session['user_logged_in'] = True
session['user_username'] = username
cur.close()
flash('Now You Are Logged In ', 'success')
return redirect(url_for('user_account'))
else:
cur.close()
error = 'Wrong Password!'
return render_template('user_login.html', error=error)
else:
cur.close()
error = 'Username Can Not Be Found!'
return render_template('user_login.html', error=error)
return render_template('user_login.html')
# check if user is still logged in
def is_user_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'user_logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please Login', 'danger')
return redirect(url_for('user_login'))
return wrap
# user log out
@app.route('/user_logout')
@is_user_logged_in
def user_logout():
session.clear()
flash('You Are Now Logged Out', 'success')
return redirect(url_for('user_login'))
# user account page
@app.route('/user_account', methods=['post', 'get'])
@is_user_logged_in
def user_account():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM buy_orders WHERE user_name = %s", [session['user_username']])
orders = cur.fetchall()
cur.execute("SELECT files FROM users WHERE username = %s", [session['user_username']])
image = cur.fetchone()
user_image = image['files']
cur.close()
return render_template('user_account.html', orders=orders, user_image=user_image)
# upload user profile picture
@app.route('/user_profile_picture', methods=['post'])
@is_user_logged_in
def user_profile_picture():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part', 'warning')
return redirect(url_for('user_account'))
file = request.files['file']
if file.filename == '':
flash('You Have to Select a File!', 'warning')
return redirect(url_for('user_account'))
if file and allowed_file(file.filename):
try:
rmtree(app.root_path + r"\static\uploads\users\{}".format(session['user_username']))
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(session['user_username']))
except:
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(session['user_username']))
filename = secure_filename(file.filename)
dir = app.root_path + r"\static\uploads\users\{}".format(session['user_username'])
file.save(os.path.join(dir, filename))
cur = mysql.connection.cursor()
cur.execute("UPDATE users SET files = %s WHERE username = %s AND permission = %s;", [filename, session['user_username'], 'user'])
mysql.connection.commit()
cur.close()
flash('You Have successfully uploaded Your Profile Picture!', 'success')
return redirect(url_for('user_account'))
return redirect(url_for('user_account'))
# delete user account
@app.route('/delete_user_account', methods=['post', 'get'])
@is_user_logged_in
def delete_user_account():
rmtree(app.root_path + r"\static\uploads\users\{}".format(session['user_username']))
cur = mysql.connection.cursor()
cur.execute("DELETE FROM orders WHERE user_name = %s", [session['user_username']])
cur.execute("DELETE FROM buy_orders WHERE user_name = %s", [session['user_username']])
cur.execute("DELETE FROM reviews WHERE user_name = %s", [session['user_username']])
cur.execute("DELETE FROM slider_reviews WHERE user_name = %s", [session['user_username']])
cur.execute("DELETE FROM users WHERE username = %s", [session['user_username']])
mysql.connection.commit()
cur.close()
session.clear()
flash('You Have Deleted Your Account successfully!', 'success')
return redirect(url_for('home'))
# user registration validators form
class CartbuyForm(Form):
address = StringField('Address', [validators.InputRequired(), validators.length(min=10, max=200)])
phone_number = IntegerField('Phone Number', [validators.InputRequired()])
comments = TextAreaField('Comments', [validators.InputRequired()])
# cart page
@app.route('/add_to_cart', methods=['post', 'get'])
@is_user_logged_in
def add_to_cart():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM orders WHERE user_name = %s", [session['user_username']])
orders = cur.fetchall()
cur.execute("SELECT user_name FROM orders WHERE user_name = %s", [session['user_username']])
f = cur.fetchall()
cur.execute("SELECT SUM((price * quantity) - (quantity * discount)) FROM orders WHERE user_name = %s", [session['user_username']])
# cur.execute("SELECT SUM((price * quantity) - (quantity * discount)) AS total FROM orders WHERE user_name = %s", [session['user_username']])
order_price = cur.fetchone()
cur.execute("SELECT SUM(quantity) FROM orders WHERE user_name = %s", [session['user_username']])
quantities = cur.fetchone()
cur.close()
return render_template('cart.html', orders=orders, price=order_price['SUM((price * quantity) - (quantity * discount))'], quantity=quantities['SUM(quantity)'], f=f)
# buy orders page
@app.route('/buy', methods=['post', 'get'])
@is_user_logged_in
def buy():
cur = mysql.connection.cursor()
nat = cur.execute("SELECT * FROM orders WHERE user_name = %s", [session['user_username']])
if nat > 0:
cur.close()
form = CartbuyForm(request.form)
if request.method == 'POST' and form.validate():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM orders WHERE user_name = %s", [session['user_username']])
buy_orders = cur.fetchall()
for order in buy_orders:
user_id = order['user_id']
user_name = order['user_name']
product_id = order['product_id']
product_name = order['product_name']
quantity = order['quantity']
price = order['price']
discount = order['discount']
files = order['files']
cur.execute("INSERT INTO buy_orders(user_id, user_name, status, product_id, product_name,\
quantity, price, discount, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(user_id, user_name, 'Pending', product_id, product_name, \
quantity, price, discount, files))
mysql.connection.commit()
result = cur.execute("SELECT country FROM buy_orders WHERE country = '' AND user_name = %s", [session['user_username']])
if result > 0:
country = request.form['country']
region = request.form['region']
address = form.address.data
phone_number = form.phone_number.data
comments = form.comments.data
cur.execute("UPDATE buy_orders SET country = %s, region = %s, address = %s, phone_number = %s, comments = %s WHERE country = '' AND user_name = %s", \
[country, region, address, phone_number, comments, session['user_username']])
cur.execute("SELECT * FROM orders WHERE user_name = %s", [session['user_username']])
confirm_orders = cur.fetchall()
for confirm_order in confirm_orders:
product_name = confirm_order['product_name']
quantity = confirm_order['quantity']
cur.execute("UPDATE products SET number_of_sales = number_of_sales + 1 WHERE product_name = %s", [product_name])
cur.execute("UPDATE products SET quantity = quantity - %s WHERE product_name = %s", [quantity, product_name])
mysql.connection.commit()
for confir_order in confirm_orders:
produc_name = confir_order['product_name']
quantity = confir_order['quantity']
cur.execute("UPDATE slider_products SET number_of_sales = number_of_sales + 1 WHERE product_name = %s", [produc_name])
cur.execute("UPDATE slider_products SET quantity = quantity - %s WHERE product_name = %s", [quantity, produc_name])
mysql.connection.commit()
cur.execute("DELETE FROM orders WHERE user_name = %s", [session['user_username']])
mysql.connection.commit()
cur.close()
flash('Your order is successfully sent!', 'success')
return redirect(url_for('home'))
elif result == 0:
cur.close()
flash('you can not be able to buy until you add product to your cart', 'danger')
return redirect(url_for('add_to_cart'))
return render_template('buy.html', form=form)
elif nat == 0:
cur.close()
flash('you can not be able to buy until you add product to your cart', 'danger')
return redirect(url_for('add_to_cart'))
# add product to the cart
@app.route('/add_product_to_cart/<id>', methods=['post', 'get'])
@is_user_logged_in
def add_product_to_cart(id):
cur = mysql.connection.cursor()
result = cur.execute("SELECT product_name FROM orders WHERE product_id = %s AND user_name = %s ", [id, session['user_username']])
if result > 0:
cur.close()
flash('You can not add this product because its already added before!', 'danger')
return redirect(url_for('add_to_cart'))
if result == 0:
cur.execute("SELECT * FROM products WHERE id = %s", [id])
product = cur.fetchone()
product_id = product['id']
product_name = product['product_name']
product_price = product['price']
product_discount = product['discount']
product_files = product['files']
user_name = session['user_username']
cur.execute("SELECT id FROM users WHERE username = %s", [session['user_username']])
res = cur.fetchone()
user_id = res['id']
cur.execute("INSERT INTO orders(user_id, user_name, status, product_id, quantity,\
product_name, price, discount, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(user_id, user_name, 'Pending', product_id, 1, product_name, \
product_price, product_discount, product_files))
mysql.connection.commit()
cur.close()
flash('Added successfully to your cart', 'success')
return redirect(url_for('add_to_cart'))
return redirect(url_for('home'))
# add product to the cart from slider
@app.route('/add_product_to_cart_from_slider/<id>', methods=['post', 'get'])
@is_user_logged_in
def add_product_to_cart_from_slider(id):
cur = mysql.connection.cursor()
proid = (int(id) * int(-1))
result = cur.execute("SELECT product_name FROM orders WHERE product_id = %s AND user_name = %s ", [proid, session['user_username']])
if result > 0:
cur.close()
flash('You can not add this product because its already added before!', 'danger')
return redirect(url_for('add_to_cart'))
if result == 0:
cur.execute("SELECT * FROM slider_products WHERE id = %s", [id])
product = cur.fetchone()
# product_id = product['id']
product_id = (int(product['id']) * int(-1))
product_name = product['product_name']
product_price = product['price']
product_discount = product['discount']
product_files = product['files']
user_name = session['user_username']
cur.execute("SELECT id FROM users WHERE username = %s", [session['user_username']])
res = cur.fetchone()
user_id = res['id']
cur.execute("INSERT INTO orders(user_id, user_name, status, product_id, quantity,\
product_name, price, discount, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(user_id, user_name, 'Pending', product_id, 1, | |
<gh_stars>1-10
# A CAN message.
import binascii
from copy import deepcopy
from .signal import NamedSignalValue
from ..utils import format_or
from ..utils import start_bit
from ..utils import encode_data
from ..utils import decode_data
from ..utils import create_encode_decode_formats
from ..errors import Error
from ..errors import EncodeError
from ..errors import DecodeError
class Message(object):
"""A CAN message with frame id, comment, signals and other
information.
If `strict` is ``True`` an exception is raised if any signals are
overlapping or if they don't fit in the message.
"""
def __init__(self,
frame_id,
name,
length,
signals,
comment=None,
senders=None,
send_type=None,
cycle_time=None,
dbc_specifics=None,
autosar_specifics=None,
is_extended_frame=False,
bus_name=None,
signal_groups=None,
strict=True,
protocol=None):
frame_id_bit_length = frame_id.bit_length()
if is_extended_frame:
if frame_id_bit_length > 29:
raise Error(
'Extended frame id 0x{:x} is more than 29 bits in '
'message {}.'.format(frame_id, name))
elif frame_id_bit_length > 11:
raise Error(
'Standard frame id 0x{:x} is more than 11 bits in '
'message {}.'.format(frame_id, name))
self._frame_id = frame_id
self._is_extended_frame = is_extended_frame
self._name = name
self._length = length
self._signals = signals
self._signals.sort(key=start_bit)
# if the 'comment' argument is a string, we assume that is an
# english comment. this is slightly hacky because the
# function's behavior depends on the type of the passed
# argument, but it is quite convenient...
if isinstance(comment, str):
# use the first comment in the dictionary as "The" comment
self._comments = { None: comment }
else:
# assume that we have either no comment at all or a
# multi-lingual dictionary
self._comments = comment
self._senders = senders if senders else []
self._send_type = send_type
self._cycle_time = cycle_time
self._dbc = dbc_specifics
self._autosar = autosar_specifics
self._bus_name = bus_name
self._signal_groups = signal_groups
self._codecs = None
self._signal_tree = None
self._strict = strict
self._protocol = protocol
self.refresh()
def _create_codec(self, parent_signal=None, multiplexer_id=None):
"""Create a codec of all signals with given parent signal. This is a
recursive function.
"""
signals = []
multiplexers = {}
# Find all signals matching given parent signal name and given
# multiplexer id. Root signals' parent and multiplexer id are
# both None.
for signal in self._signals:
if signal.multiplexer_signal != parent_signal:
continue
if ((multiplexer_id is not None)
and (multiplexer_id not in signal.multiplexer_ids)):
continue
if signal.is_multiplexer:
children_ids = set()
for s in self._signals:
if s.multiplexer_signal != signal.name:
continue
children_ids.update(s.multiplexer_ids)
# Some CAN messages will have muxes containing only
# the multiplexer and no additional signals. At Tesla
# these are indicated in advance by assigning them an
# enumeration. Here we ensure that any named
# multiplexer is included, even if it has no child
# signals.
if signal.choices:
children_ids.update(signal.choices.keys())
for child_id in children_ids:
codec = self._create_codec(signal.name, child_id)
if signal.name not in multiplexers:
multiplexers[signal.name] = {}
multiplexers[signal.name][child_id] = codec
signals.append(signal)
return {
'signals': signals,
'formats': create_encode_decode_formats(signals,
self._length),
'multiplexers': multiplexers
}
def _create_signal_tree(self, codec):
"""Create a multiplexing tree node of given codec. This is a recursive
function.
"""
nodes = []
for signal in codec['signals']:
multiplexers = codec['multiplexers']
if signal.name in multiplexers:
node = {
signal.name: {
mux: self._create_signal_tree(mux_codec)
for mux, mux_codec in multiplexers[signal.name].items()
}
}
else:
node = signal.name
nodes.append(node)
return nodes
@property
def frame_id(self):
"""The message frame id.
"""
return self._frame_id
@frame_id.setter
def frame_id(self, value):
self._frame_id = value
@property
def is_extended_frame(self):
"""``True`` if the message is an extended frame, ``False`` otherwise.
"""
return self._is_extended_frame
@is_extended_frame.setter
def is_extended_frame(self, value):
self._is_extended_frame = value
@property
def name(self):
"""The message name as a string.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def length(self):
"""The message data length in bytes.
"""
return self._length
@length.setter
def length(self, value):
self._length = value
@property
def signals(self):
"""A list of all signals in the message.
"""
return self._signals
@property
def signal_groups(self):
"""A list of all signal groups in the message.
"""
return self._signal_groups
@signal_groups.setter
def signal_groups(self, value):
self._signal_groups = value
@property
def comment(self):
"""The message comment, or ``None`` if unavailable.
Note that we implicitly try to return the English comment if
multiple languages were specified.
"""
if self._comments is None:
return None
elif self._comments.get(None) is not None:
return self._comments.get(None)
elif self._comments.get("FOR-ALL") is not None:
return self._comments.get("FOR-ALL")
return self._comments.get('EN')
@property
def comments(self):
"""The dictionary with the descriptions of the message in multiple
languages. ``None`` if unavailable.
"""
return self._comments
@comment.setter
def comment(self, value):
self._comments = { None: value }
@comments.setter
def comments(self, value):
self._comments = value
@property
def senders(self):
"""A list of all sender nodes of this message.
"""
return self._senders
@property
def send_type(self):
"""The message send type, or ``None`` if unavailable.
"""
return self._send_type
@property
def cycle_time(self):
"""The message cycle time, or ``None`` if unavailable.
"""
return self._cycle_time
@property
def dbc(self):
"""An object containing dbc specific properties like e.g. attributes.
"""
return self._dbc
@dbc.setter
def dbc(self, value):
self._dbc = value
@property
def autosar(self):
"""An object containing AUTOSAR specific properties
e.g. auxiliary data required to implement CRCs, secure on-board
communication (secOC) or container messages.
"""
return self._autosar
@autosar.setter
def autosar(self, value):
self._autosar = value
@property
def bus_name(self):
"""The message bus name, or ``None`` if unavailable.
"""
return self._bus_name
@bus_name.setter
def bus_name(self, value):
self._bus_name = value
@property
def protocol(self):
"""The message protocol, or ``None`` if unavailable. Only one protocol
is currently supported; ``'j1939'``.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
self._protocol = value
@property
def signal_tree(self):
"""All signal names and multiplexer ids as a tree. Multiplexer signals
are dictionaries, while other signals are strings.
>>> foo = db.get_message_by_name('Foo')
>>> foo.signal_tree
['Bar', 'Fum']
>>> bar = db.get_message_by_name('Bar')
>>> bar.signal_tree
[{'A': {0: ['C', 'D'], 1: ['E']}}, 'B']
"""
return self._signal_tree
def _get_mux_number(self, decoded, signal_name):
mux = decoded[signal_name]
if isinstance(mux, str) or isinstance(mux, NamedSignalValue):
signal = self.get_signal_by_name(signal_name)
mux = signal.choice_string_to_number(mux)
return mux
def _check_signals_ranges_scaling(self, signals, data):
for signal in signals:
value = data[signal.name]
# Choices are checked later.
if isinstance(value, str):
continue
if signal.minimum is not None:
if value < signal.minimum:
raise EncodeError(
"Expected signal '{}' value greater than or equal to "
"{} in message '{}', but got {}.".format(signal.name,
signal.minimum,
self._name,
value))
if signal.maximum is not None:
if value > signal.maximum:
raise EncodeError(
"Expected signal '{}' value less than or equal to "
"{} in message '{}', but got {}.".format(signal.name,
signal.maximum,
self.name,
value))
def _check_signals(self, signals, data, scaling):
for signal in signals:
if signal.name not in data:
raise EncodeError(
"Expected signal value for '{}' in data, but got {}.".format(
signal.name,
data))
if scaling:
self._check_signals_ranges_scaling(signals, data)
def _check_unknown_signals(self, signals, data):
signal_set = set(map(lambda x: x.name, signals))
for signal in data:
if signal not in signal_set:
raise EncodeError(
f"No signal named '{signal}' specified in CAN bus "
f"description database.")
def _encode(self, node, data, scaling, strict):
if strict:
self._check_signals(node['signals'], data, scaling)
encoded = encode_data(data,
node['signals'],
node['formats'],
scaling)
padding_mask = node['formats'].padding_mask
multiplexers = node['multiplexers']
all_signals = list(node['signals'])
for signal in multiplexers:
mux = self._get_mux_number(data, signal)
try:
node = multiplexers[signal][mux]
if strict:
self._check_signals(node['signals'], data, scaling)
except KeyError:
raise EncodeError('expected multiplexer id {}, but got {}'.format(
format_or(multiplexers[signal]),
mux))
mux_encoded, mux_padding_mask, mux_signals = \
self._encode(node, data, scaling, strict)
all_signals.extend(mux_signals)
encoded |= mux_encoded
padding_mask &= mux_padding_mask
return encoded, padding_mask, all_signals
def encode(self, data, scaling=True, padding=False, strict=True):
"""Encode given data as a message of this type.
If `scaling` is ``False`` no scaling of signals is performed.
If `padding` is ``True`` unused bits are encoded as 1.
If `strict` is ``True`` the specified signals must exactly be the
ones expected, and their values must be within their allowed ranges,
or an `EncodeError` exception is raised.
>>> foo = db.get_message_by_name('Foo')
>>> foo.encode({'Bar': 1, 'Fum': 5.0})
b'\\x01\\x45\\x23\\x00\\x11'
"""
encoded, padding_mask, all_signals = \
self._encode(self._codecs, data, scaling, strict=strict)
if strict:
self._check_unknown_signals(all_signals, data)
if padding:
encoded |= padding_mask
encoded |= (0x80 << (8 * self._length))
encoded = hex(encoded)[4:].rstrip('L')
return binascii.unhexlify(encoded)[:self._length]
def _decode(self, node, data, decode_choices, scaling):
decoded = decode_data(data,
node['signals'],
node['formats'],
decode_choices,
scaling)
multiplexers = node['multiplexers']
for signal in multiplexers:
mux = self._get_mux_number(decoded, signal)
try:
node = multiplexers[signal][mux]
except KeyError:
raise DecodeError('expected multiplexer id {}, but got {}'.format(
format_or(multiplexers[signal]),
mux))
decoded.update(self._decode(node,
data,
decode_choices,
scaling))
return decoded
def decode(self, data, decode_choices=True, scaling=True):
"""Decode given data as a message of this type.
If `decode_choices` is | |
% key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `get_registration_instance_launch_history`")
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params) or (params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `get_registration_instance_launch_history`")
if 'instance_id' in params and params['instance_id'] < 0:
raise ValueError("Invalid value for parameter `instance_id` when calling `get_registration_instance_launch_history`, must be a value greater than or equal to `0`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/instances/{instanceId}/launchHistory'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
if 'instance_id' in params:
path_params['instanceId'] = params['instance_id']
query_params = {}
if 'include_history_log' in params:
query_params['includeHistoryLog'] = params['include_history_log']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LaunchHistoryListSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registration_instance_progress(self, registration_id, instance_id, **kwargs):
"""
Get details of an instance of a registration.
Get registration progress for instance `instanceId` of `registrationId`'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registration_instance_progress(registration_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param int instance_id: The instance of this registration (required)
:param bool include_child_results: Include information about each learning object, not just the top level in the results
:param bool include_interactions_and_objectives: Include interactions and objectives in the results
:param bool include_runtime: Include runtime details in the results
:return: RegistrationSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registration_instance_progress_with_http_info(registration_id, instance_id, **kwargs)
else:
(data) = self.get_registration_instance_progress_with_http_info(registration_id, instance_id, **kwargs)
return data
def get_registration_instance_progress_with_http_info(self, registration_id, instance_id, **kwargs):
"""
Get details of an instance of a registration.
Get registration progress for instance `instanceId` of `registrationId`'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registration_instance_progress_with_http_info(registration_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param int instance_id: The instance of this registration (required)
:param bool include_child_results: Include information about each learning object, not just the top level in the results
:param bool include_interactions_and_objectives: Include interactions and objectives in the results
:param bool include_runtime: Include runtime details in the results
:return: RegistrationSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id', 'instance_id', 'include_child_results', 'include_interactions_and_objectives', 'include_runtime']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registration_instance_progress" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `get_registration_instance_progress`")
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params) or (params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `get_registration_instance_progress`")
if 'instance_id' in params and params['instance_id'] < 0:
raise ValueError("Invalid value for parameter `instance_id` when calling `get_registration_instance_progress`, must be a value greater than or equal to `0`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/instances/{instanceId}'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
if 'instance_id' in params:
path_params['instanceId'] = params['instance_id']
query_params = {}
if 'include_child_results' in params:
query_params['includeChildResults'] = params['include_child_results']
if 'include_interactions_and_objectives' in params:
query_params['includeInteractionsAndObjectives'] = params['include_interactions_and_objectives']
if 'include_runtime' in params:
query_params['includeRuntime'] = params['include_runtime']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegistrationSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registration_instance_statements(self, registration_id, instance_id, **kwargs):
"""
Get xAPI statements for an instance of a registration.
Get xAPI statements for instance `instanceId` of `registrationId`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registration_instance_statements(registration_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param int instance_id: The instance of this registration (required)
:param datetime since: Only items updated since the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param datetime until: Only items updated before the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param str more: Value for this parameter will be provided in the 'more' property of registration lists, where needed. An opaque value, construction and parsing may change without notice.
:return: XapiStatementResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registration_instance_statements_with_http_info(registration_id, instance_id, **kwargs)
else:
(data) = self.get_registration_instance_statements_with_http_info(registration_id, instance_id, **kwargs)
return data
def get_registration_instance_statements_with_http_info(self, registration_id, instance_id, **kwargs):
"""
Get xAPI statements for an instance of a registration.
Get xAPI statements for instance `instanceId` of `registrationId`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registration_instance_statements_with_http_info(registration_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param int instance_id: The instance of this registration (required)
:param datetime since: Only items updated since the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param datetime until: Only items updated before the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param str more: Value for this parameter will be provided in the 'more' property of registration lists, where needed. An opaque value, construction and parsing may change without notice.
:return: XapiStatementResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id', 'instance_id', 'since', 'until', 'more']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registration_instance_statements" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `get_registration_instance_statements`")
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params) or (params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `get_registration_instance_statements`")
if 'instance_id' in params and params['instance_id'] < 0:
raise ValueError("Invalid value for parameter `instance_id` when calling `get_registration_instance_statements`, must be a value greater than or equal to `0`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/instances/{instanceId}/xAPIStatements'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
if 'instance_id' in params:
path_params['instanceId'] = params['instance_id']
query_params = {}
if 'since' in params:
query_params['since'] = params['since']
if 'until' in params:
query_params['until'] = params['until']
if 'more' in params:
query_params['more'] = params['more']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header | |
<reponame>KLordy/flink<gh_stars>0
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Iterable, List, Iterator, Dict, Tuple
from pyflink.common.typeinfo import TypeInformation, Types
__all__ = [
'ValueStateDescriptor',
'ValueState',
'ListStateDescriptor',
'ListState',
'MapStateDescriptor',
'MapState',
'ReducingStateDescriptor',
'ReducingState'
]
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
IN = TypeVar('IN')
OUT = TypeVar('OUT')
class State(ABC):
"""
Interface that different types of partitioned state must implement.
"""
@abstractmethod
def clear(self) -> None:
"""
Removes the value mapped under the current key.
"""
pass
class ValueState(State, Generic[T]):
"""
:class:`State` interface for partitioned single-value state. The value can be retrieved or
updated.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
"""
@abstractmethod
def value(self) -> T:
"""
Returns the current value for the state. When the state is not partitioned the returned
value is the same for all inputs in a given operator instance. If state partitioning is
applied, the value returned depends on the current operator input, as the operator
maintains an independent state for each partition.
"""
pass
@abstractmethod
def update(self, value: T) -> None:
"""
Updates the operator state accessible by :func:`value` to the given value. The next time
:func:`value` is called (for the same state partition) the returned state will represent
the updated value. When a partitioned state is updated with null, the state for the current
key will be removed and the default value is returned on the next access.
"""
pass
class AppendingState(State, Generic[IN, OUT]):
"""
Base interface for partitioned state taht supports adding elements and inspecting the current
state. Elements can either be kept in a buffer (list-like) or aggregated into one value.
This state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
@abstractmethod
def get(self) -> OUT:
"""
Returns the elements under the current key.
"""
pass
@abstractmethod
def add(self, value: IN) -> None:
"""
Adding the given value to the tail of this list state.
"""
pass
class MergingState(AppendingState[IN, OUT]):
"""
Extension of AppendingState that allows merging of state. That is, two instance of MergingState
can be combined into a single instance that contains all the information of the two merged
states.
"""
pass
class ReducingState(MergingState[T, T]):
"""
:class:`State` interface for reducing state. Elements can be added to the state, they will be
combined using a reduce function. The current state can be inspected.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
pass
class AggregatingState(MergingState[IN, OUT]):
"""
:class:`State` interface for aggregating state, based on an
:class:`~pyflink.datastream.functions.AggregateFunction`. Elements that are added to this type
of state will be eagerly pre-aggregated using a given AggregateFunction.
The state holds internally always the accumulator type of the AggregateFunction. When
accessing the result of the state, the function's
:func:`~pyflink.datastream.functions.AggregateFunction.get_result` method.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
pass
class ListState(MergingState[T, Iterable[T]]):
"""
:class:`State` interface for partitioned list state in Operations.
The state is accessed and modified by user functions, and checkpointed consistently
by the system as part of the distributed snapshots.
Currently only keyed list state is supported.
When it is a keyed list state, the state key is automatically supplied by the system, so the
user function always sees the value mapped to the key of the current element. That way, the
system can handle stream and state partitioning consistently together.
"""
@abstractmethod
def update(self, values: List[T]) -> None:
"""
Updating existing values to to the given list of values.
"""
pass
@abstractmethod
def add_all(self, values: List[T]) -> None:
"""
Adding the given values to the tail of this list state.
"""
pass
def __iter__(self) -> Iterator[T]:
return iter(self.get())
class MapState(State, Generic[K, V]):
"""
:class:`State` interface for partitioned key-value state. The key-value pair can be added,
updated and retrieved.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state key is automatically supplied by the system, so the function always sees the value
mapped to the key of the current element. That way, the system can handle stream and state
partitioning consistently together.
"""
@abstractmethod
def get(self, key: K) -> V:
"""
Returns the current value associated with the given key.
"""
pass
@abstractmethod
def put(self, key: K, value: V) -> None:
"""
Associates a new value with the given key.
"""
pass
@abstractmethod
def put_all(self, dict_value: Dict[K, V]) -> None:
"""
Copies all of the mappings from the given map into the state.
"""
pass
@abstractmethod
def remove(self, key: K) -> None:
"""
Deletes the mapping of the given key.
"""
pass
@abstractmethod
def contains(self, key: K) -> bool:
"""
Returns whether there exists the given mapping.
"""
pass
@abstractmethod
def items(self) -> Iterable[Tuple[K, V]]:
"""
Returns all the mappings in the state.
"""
pass
@abstractmethod
def keys(self) -> Iterable[K]:
"""
Returns all the keys in the state.
"""
pass
@abstractmethod
def values(self) -> Iterable[V]:
"""
Returns all the values in the state.
"""
pass
@abstractmethod
def is_empty(self) -> bool:
"""
Returns true if this state contains no key-value mappings, otherwise false.
"""
pass
def __getitem__(self, key: K) -> V:
return self.get(key)
def __setitem__(self, key: K, value: V) -> None:
self.put(key, value)
def __delitem__(self, key: K) -> None:
self.remove(key)
def __contains__(self, key: K) -> bool:
return self.contains(key)
def __iter__(self) -> Iterator[K]:
return iter(self.keys())
class StateDescriptor(ABC):
"""
Base class for state descriptors. A StateDescriptor is used for creating partitioned State in
stateful operations.
"""
def __init__(self, name: str, type_info: TypeInformation):
"""
Constructor for StateDescriptor.
:param name: The name of the state
:param type_info: The type information of the value.
"""
self.name = name
self.type_info = type_info
def get_name(self) -> str:
"""
Get the name of the state.
:return: The name of the state.
"""
return self.name
class ValueStateDescriptor(StateDescriptor):
"""
StateDescriptor for ValueState. This can be used to create partitioned value state using
RuntimeContext.get_state(ValueStateDescriptor).
"""
def __init__(self, name: str, value_type_info: TypeInformation):
"""
Constructor of the ValueStateDescriptor.
:param name: The name of the state.
:param value_type_info: the type information of the state.
"""
super(ValueStateDescriptor, self).__init__(name, value_type_info)
class ListStateDescriptor(StateDescriptor):
"""
StateDescriptor for ListState. This can be used to create state where the type is a list that
can be appended | |
= self.get_extended_attention_mask(attn_mask)
extended_attention_mask = self.get_extended_attention_mask(input_ids, token_type_ids, attention_mask)
if input_ids.size(1)>len_vis_input:
img_embed_out = self.img_embeddings(input_ids[:, :len_vis_input+2], img, img_pos, token_type_ids[:, :len_vis_input+2]) # img_embed_out: torch.Size([32, 5, 768])
txt_embed_out = self.txt_embeddings(input_ids[:, len_vis_input+2:], token_type_ids[:, len_vis_input+2:]) # txt_embed_out: torch.Size([32, 507, 768])
embedding_output = torch.cat([img_embed_out, txt_embed_out], 1) # TODO: Check B x (TXT + IMG) x HID
else:
txt_embed_out = self.txt_embeddings(input_ids, token_type_ids) # txt_embed_out: torch.Size([32, 507, 768])
embedding_output = torch.cat([txt_embed_out], 1) # TODO: Check B x (TXT + IMG) x HID
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
""" for VLP, based on UniLM """
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, args, mask_word_id=0, num_labels=2,
search_beam_size=1, length_penalty=1.0, eos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None,
ngram_size=3, min_len=0, len_vis_input=None):
super(BertForSeq2SeqDecoder, self).__init__(config)
bert = BertModelIncr(config, args)
self.bert = CXRBertDecoder(config,args)
self.cls = BertPreTrainingHeads(config, bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction = 'mean',ignore_index=0)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.len_vis_input = len_vis_input
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
def forward(self, vis_feats, _, input_ids, token_type_ids, position_ids, attention_mask, gt_token, device, task_idx=None, sample_mode='greedy',):
if self.search_beam_size > 1:
return self.beam_search(vis_feats, input_ids, token_type_ids, position_ids, attention_mask, gt_token, device, task_idx)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
output_probs = []
total_cross_entropy_loss = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids[:, :1] * 0 + self.mask_word_id
next_pos = input_length
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
a = gt_token.tolist()
if curr_length == 1:# and start_pos-258 < len(a[0]):
a_list = []
for itr in range(0,torch.tensor(a).size()[0]):
a_list.append([a[itr][start_pos-258]])
b = torch.cat([torch.tensor(a_list)], dim=1).to(device)
x_input_ids = torch.cat((b, mask_ids), dim=1)
else:
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
gt_id = curr_ids.new_tensor([a[0]])
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(vis_feats, x_input_ids, curr_token_type_ids, curr_position_ids,
curr_attention_mask, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers,
output_all_encoded_layers=True, len_vis_input=self.len_vis_input)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
if sample_mode == 'greedy':
max_probs, max_ids = torch.max(prediction_scores, dim=-1)
total_cross_entropy_loss.append(prediction_scores)
elif sample_mode == 'sample':
prediction_scores.squeeze_(1)
prediction_probs = F.softmax(prediction_scores, dim=-1).detach()
max_ids = torch.multinomial(prediction_probs, num_samples=1,
replacement=True)
max_probs = torch.gather(F.log_softmax(prediction_scores, dim=-1),
1, max_ids) # this should be logprobs
else:
raise NotImplementedError
output_ids.append(max_ids)
output_probs.append(max_probs)
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1), torch.cat(output_probs, dim=1), torch.cat(total_cross_entropy_loss, dim=1)
def beam_search(self, vis_feats, input_ids, token_type_ids, position_ids, attention_mask, gt_token, device, task_idx=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids[:, :1] * 0 + self.mask_word_id
next_pos = input_length
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
total_cross_entropy_loss = []
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(vis_feats, x_input_ids, curr_token_type_ids, curr_position_ids,
curr_attention_mask, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers,
output_all_encoded_layers=True, len_vis_input=self.len_vis_input)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos-input_length+1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K)#, dtype=torch.long)
back_ptrs = back_ptrs.type(torch.cuda.LongTensor)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)#, dtype=torch.int64)
back_ptrs = back_ptrs.type(torch.cuda.LongTensor)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).float())
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
mask_ids = first_expand(mask_ids)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n-1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i] + self.length_penalty * (fid + 1)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
out_tensor[i, :length, ...] = tensor
return out_tensor
for k in ('pred_seq','scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
| |
<gh_stars>0
from testutils import assert_raises
x = [1, 2, 3]
assert x[0] == 1
assert x[1] == 2
# assert x[7]
y = [2, *x]
assert y == [2, 1, 2, 3]
y.extend(x)
assert y == [2, 1, 2, 3, 1, 2, 3]
assert x * 0 == [], "list __mul__ by 0 failed"
assert x * -1 == [], "list __mul__ by -1 failed"
assert x * 2 == [1, 2, 3, 1, 2, 3], "list __mul__ by 2 failed"
# index()
assert ['a', 'b', 'c'].index('b') == 1
assert [5, 6, 7].index(7) == 2
assert_raises(ValueError, lambda: ['a', 'b', 'c'].index('z'))
x = [[1,0,-3], 'a', 1]
y = [[3,2,1], 'z', 2]
assert x < y, "list __lt__ failed"
x = [5, 13, 31]
y = [1, 10, 29]
assert x > y, "list __gt__ failed"
x = [0, 1, 2]
assert x.pop() == 2
assert x == [0, 1]
def test_pop(lst, idx, value, new_lst):
assert lst.pop(idx) == value
assert lst == new_lst
test_pop([0, 1, 2], -1, 2, [0, 1])
test_pop([0, 1, 2], 0, 0, [1, 2])
test_pop([0, 1, 2], 1, 1, [0, 2])
test_pop([0, 1, 2], 2, 2, [0, 1])
assert_raises(IndexError, lambda: [].pop())
assert_raises(IndexError, lambda: [].pop(0))
assert_raises(IndexError, lambda: [].pop(-1))
assert_raises(IndexError, lambda: [0].pop(1))
assert_raises(IndexError, lambda: [0].pop(-2))
recursive = []
recursive.append(recursive)
assert repr(recursive) == "[[...]]"
# insert()
x = ['a', 'b', 'c']
x.insert(0, 'z') # insert is in-place, no return value
assert x == ['z', 'a', 'b', 'c']
x = ['a', 'b', 'c']
x.insert(100, 'z')
assert x == ['a', 'b', 'c', 'z']
x = ['a', 'b', 'c']
x.insert(-1, 'z')
assert x == ['a', 'b', 'z', 'c']
x = ['a', 'b', 'c']
x.insert(-100, 'z')
assert x == ['z', 'a', 'b', 'c']
assert_raises(OverflowError, lambda: x.insert(100000000000000000000, 'z'))
x = [[], 2, {}]
y = x.copy()
assert x is not y
assert x == y
assert all(a is b for a, b in zip(x, y))
y.append(4)
assert x != y
a = [1, 2, 3]
assert len(a) == 3
a.remove(1)
assert len(a) == 2
assert not 1 in a
assert_raises(ValueError, lambda: a.remove(10), 'Remove not exist element')
foo = bar = [1]
foo += [2]
assert (foo, bar) == ([1, 2], [1, 2])
x = [1]
x.append(x)
assert x in x
assert x.index(x) == 1
assert x.count(x) == 1
x.remove(x)
assert x not in x
class Foo(object):
def __eq__(self, x):
return False
foo = Foo()
foo1 = Foo()
x = [1, foo, 2, foo, []]
assert x == x
assert foo in x
assert 2 in x
assert x.index(foo) == 1
assert x.count(foo) == 2
assert x.index(2) == 2
assert [] in x
assert x.index([]) == 4
assert foo1 not in x
x.remove(foo)
assert x.index(foo) == 2
assert x.count(foo) == 1
x = []
x.append(x)
assert x == x
a = [1, 2, 3]
b = [1, 2, 3]
c = [a, b]
a.append(c)
b.append(c)
assert a == b
assert [foo] == [foo]
for size in [1, 2, 3, 4, 5, 8, 10, 100, 1000]:
lst = list(range(size))
orig = lst[:]
lst.sort()
assert lst == orig
assert sorted(lst) == orig
assert_raises(ZeroDivisionError, lambda: sorted(lst, key=lambda x: 1/x))
lst.reverse()
assert sorted(lst) == orig
assert sorted(lst, reverse=True) == lst
assert sorted(lst, key=lambda x: -x) == lst
assert sorted(lst, key=lambda x: -x, reverse=True) == orig
assert sorted([(1, 2, 3), (0, 3, 6)]) == [(0, 3, 6), (1, 2, 3)]
assert sorted([(1, 2, 3), (0, 3, 6)], key=lambda x: x[0]) == [(0, 3, 6), (1, 2, 3)]
assert sorted([(1, 2, 3), (0, 3, 6)], key=lambda x: x[1]) == [(1, 2, 3), (0, 3, 6)]
assert sorted([(1, 2), (), (5,)], key=len) == [(), (5,), (1, 2)]
lst = [3, 1, 5, 2, 4]
class C:
def __init__(self, x): self.x = x
def __lt__(self, other): return self.x < other.x
lst.sort(key=C)
assert lst == [1, 2, 3, 4, 5]
lst = [3, 1, 5, 2, 4]
class C:
def __init__(self, x): self.x = x
def __gt__(self, other): return self.x > other.x
lst.sort(key=C)
assert lst == [1, 2, 3, 4, 5]
lst = [5, 1, 2, 3, 4]
def f(x):
lst.append(1)
return x
assert_raises(ValueError, lambda: lst.sort(key=f)) # "list modified during sort"
assert lst == [1, 2, 3, 4, 5]
# __delitem__
x = ['a', 'b', 'c']
del x[0]
assert x == ['b', 'c']
x = ['a', 'b', 'c']
del x[-1]
assert x == ['a', 'b']
x = y = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]
del x[2:14:3]
assert x == y
assert x == [1, 2, 4, 5, 7, 8, 11, 12, 14, 15]
assert y == [1, 2, 4, 5, 7, 8, 11, 12, 14, 15]
x = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]
del x[-5:]
assert x == [1, 2, 3, 4, 5, 6, 7, 8, 10]
x = list(range(12))
del x[10:2:-2]
assert x == [0,1,2,3,5,7,9,11]
def bad_del_1():
del ['a', 'b']['a']
assert_raises(TypeError, bad_del_1)
def bad_del_2():
del ['a', 'b'][2]
assert_raises(IndexError, bad_del_2)
# __setitem__
# simple index
x = [1, 2, 3, 4, 5]
x[0] = 'a'
assert x == ['a', 2, 3, 4, 5]
x[-1] = 'b'
assert x == ['a', 2, 3, 4, 'b']
# make sure refrences are assigned correctly
y = []
x[1] = y
y.append(100)
assert x[1] == y
assert x[1] == [100]
#index bounds
def set_index_out_of_bounds_high():
x = [0, 1, 2, 3, 4]
x[5] = 'a'
def set_index_out_of_bounds_low():
x = [0, 1, 2, 3, 4]
x[-6] = 'a'
assert_raises(IndexError, set_index_out_of_bounds_high)
assert_raises(IndexError, set_index_out_of_bounds_low)
# non stepped slice index
a = list(range(10))
x = a[:]
y = a[:]
assert x == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# replace whole list
x[:] = ['a', 'b', 'c']
y[::1] = ['a', 'b', 'c']
assert x == ['a', 'b', 'c']
assert x == y
# splice list start
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
x[:1] = ['a', 'b', 'c']
y[0:1] = ['a', 'b', 'c']
z[:1:1] = ['a', 'b', 'c']
zz[0:1:1] = ['a', 'b', 'c']
assert x == ['a', 'b', 'c', 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert x == y
assert x == z
assert x == zz
# splice list end
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
x[5:] = ['a', 'b', 'c']
y[5::1] = ['a', 'b', 'c']
z[5:10] = ['a', 'b', 'c']
zz[5:10:1] = ['a', 'b', 'c']
assert x == [0, 1, 2, 3, 4, 'a', 'b', 'c']
assert x == y
assert x == z
assert x == zz
# insert sec
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
x[1:1] = ['a', 'b', 'c']
y[1:0] = ['a', 'b', 'c']
z[1:1:1] = ['a', 'b', 'c']
zz[1:0:1] = ['a', 'b', 'c']
assert x == [0, 'a', 'b', 'c', 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert x == y
assert x == z
assert x == zz
# same but negative indexes?
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
x[-1:-1] = ['a', 'b', 'c']
y[-1:9] = ['a', 'b', 'c']
z[-1:-1:1] = ['a', 'b', 'c']
zz[-1:9:1] = ['a', 'b', 'c']
assert x == [0, 1, 2, 3, 4, 5, 6, 7, 8, 'a', 'b', 'c', 9]
assert x == y
assert x == z
assert x == zz
# splice mid
x = a[:]
y = a[:]
x[3:5] = ['a', 'b', 'c', 'd', 'e']
y[3:5:1] = ['a', 'b', 'c', 'd', 'e']
assert x == [0, 1, 2, 'a', 'b', 'c', 'd', 'e', 5, 6, 7, 8, 9]
assert x == y
x = a[:]
x[3:5] = ['a']
assert x == [0, 1, 2, 'a', 5, 6, 7, 8, 9]
# assign empty to non stepped empty slice does nothing
x = a[:]
y = a[:]
x[5:2] = []
y[5:2:1] = []
assert x == a
assert y == a
# assign empty to non stepped slice removes elems
x = a[:]
y = a[:]
x[2:8] = []
y[2:8:1] = []
assert x == [0, 1, 8, 9]
assert x == y
# make sure refrences are assigned correctly
yy = []
x = a[:]
y = a[:]
x[3:5] = ['a', 'b', 'c', 'd', yy]
y[3:5:1] = ['a', 'b', 'c', 'd', yy]
assert x == [0, 1, 2, 'a', 'b', 'c', 'd', [], 5, 6, 7, 8, 9]
assert x == y
yy.append(100)
assert x == [0, 1, 2, 'a', 'b', 'c', 'd', [100], 5, 6, 7, 8, 9]
assert x == y
assert x[7] == yy
assert x[7] == [100]
assert y[7] == yy
assert y[7] == [100]
# no zero step
def no_zero_step_set():
x = [1, 2, 3, 4, 5]
x[0:4:0] = [11, 12, 13, 14, 15]
assert_raises(ValueError, no_zero_step_set)
# stepped slice index
# forward slice
x = a[:]
x[2:8:2] = ['a', 'b', 'c']
assert x == [0, 1, 'a', 3, 'b', 5, 'c', 7, 8, 9]
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
c = ['a', 'b', 'c', 'd', 'e']
x[::2] = c
y[-10::2] = c
z[0:10:2] = c
zz[-13:13:2] = c # slice indexes will be truncated to bounds
assert x == ['a', 1, 'b', 3, 'c', 5, 'd', 7, 'e', 9]
assert x == y
assert x == z
assert x == zz
# backward slice
x = a[:]
x[8:2:-2] = ['a', 'b', 'c']
assert x == [0, 1, 2, 3, 'c', 5, 'b', 7, 'a', 9]
x = a[:]
y = a[:]
z = a[:]
zz = a[:]
c = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', | |
#! /usr/bin/python
## SIMULATION OF A SYSTEM AT CONSTANT TEMPERATURE
##### PLEASE READ ALL THE COMMENTS!!!!
#### REMEMBER THAT IN PYTHON INDEX BEGINS IN 0 !!!
###################################################################
## If initial forces are set to zero because the initial geometry #
## is a stationary point #
## !! IT IS DONE IN THE INSTANTATION !! #
##################################################################
import Mytools as my
import numpy as np
import os
import random as rnd
from math import *
import sys
__metaclass__= type
############### CLASS DEFINITION ###########################
class Hessian_problem(Exception):
pass
class Point:
""" Represent a point in a space of three dimensions.
attributes: x,y,z."""
def __init__(self, x = 0.0, y = 0.0, z = 0.0):
self.x = x
self.y = y
self.z = z
def __str__(self):
return'%g, %g, %g' % (self.x, self.y, self.z)
class Atoms:
"""Represent an atom in a molecule.
attributes: mass, cart, vel, force"""
def __init__(self, mass = 0.0):
self.mass = mass
self.cart = Point()
self.vel = Point()
self.force = Point()
def momenta(self,mv):
t1 = 0.5 * mv[0] * mv[0] /self.mass
t2 = 0.5 * mv[1] * mv[1] /self.mass
t3 = 0.5 * mv[2] * mv[2] /self.mass
return t1+t2+t3
def scaling_A(self,factor,mv):
self.vel.x += factor*mv[0]/self.mass
self.vel.y += factor*mv[1]/self.mass
self.vel.z += factor*mv[2]/self.mass
def scaling_B(self,factor):
self.vel.x = factor*self.vel.x
self.vel.y = factor*self.vel.y
self.vel.z = factor*self.vel.z
def gradiente(self,vector):
self.force.x = -vector[0]
self.force.y = -vector[1]
self.force.z = -vector[2]
def cinetica(self):
k = 0.5*my.cuad(self.vel,self.vel)*self.mass
return k
class Molecule:
"""Contains all the internal and cartesian coordinates of the atoms.
attributes: atoms, bonds, angles, dihedrals, b0, a0, d0"""
def __init__(self,all_atoms_cart = []):
self.cart = all_atoms_cart
def internas_bonds(self,conex,nbond):
self.bonds = [my.enlaces(self.cart[conex[i][0]-1], self.cart[conex[i][1]-1]) \
for i in range(nbond)]
def internas_ang(self,conex,nang):
self.angles = [my.angulos(self.cart[conex[i][0]-1], self.cart[conex[i][1]-1], \
self.cart[conex[i][2]-1]) for i in range(nang)]
def internas_dihe(self,conex,ndih):
try:
self.qt_1 = self.dihedrals
self.dihedrals = [my.dihedros(self.cart[conex[i][0]-1], self.cart[conex[i][1]-1], \
self.cart[conex[i][2]-1], self.cart[conex[i][3]-1],self.qt_1[i]) for i in range(ndih)]
except AttributeError:
self.qt_1 = [0.0 for w in range(ndih)]
self.dihedrals = [my.dihedros(self.cart[conex[i][0]-1], self.cart[conex[i][1]-1], \
self.cart[conex[i][2]-1], self.cart[conex[i][3]-1],self.qt_1[i]) for i in range(ndih)]
def potential(self,mtx_transp,grad_int,mtx_Hess):
qo = self.b0 + self.a0 + self.d0
q1 = self.bonds + self.angles + self.dihedrals
Q = np.array(([q1[i] - qo[i] for i in range(len(qo))]), dtype = float)
pot = grad_int+np.dot(mtx_Hess,Q)
Gx = np.dot(mtx_transp,pot)
return Gx,pot
def second_state_pot(self,vector_grad,mtx_Hess,gap):
qo = self.b0 + self.a0 + self.d0
q1 = self.bonds + self.angles + self.dihedrals
Q = np.array(([q1[i] - qo[i] for i in range(len(qo))]), dtype = float)
product1 = np.dot(Q,vector_grad)
product2 = np.dot(Q,np.dot(mtx_Hess,Q))
gap_dynamic = gap + product1 + 0.5 * product2
return gap_dynamic
def E_potencial(self,potencial):
qo = self.b0 + self.a0 + self.d0
q1 = self.bonds + self.angles + self.dihedrals
Q = np.array(([q1[i] - qo[i] for i in range(len(qo))]), dtype = float)
E = 0.5 * np.dot(Q,potencial)
return E
def output(self,file,counter,number_atoms,element,EA,ET):
if counter % 20 == 0: # geometries saved
file.write('%g \n' % number_atoms)
file.write(' EA %g ET:1 %g ET:2 %g \n' % (EA,ET[0],ET[1]))
for i in range(number_atoms):
if i ==0:
file.write('%s 0. 0. 0. \n' % (element[i]))
else:
x = (self.cart[i].x-self.cart[0].x)*my.a0
y = (self.cart[i].y-self.cart[0].y)*my.a0
z = (self.cart[i].z-self.cart[0].z)*my.a0
file.write('%s %g %g %g \n' % (element[i],x,y,z))
def rangos(self,Hessii,Etot,n_interv):
""" 0.5*k*DQ^2 = E_tot"""
my.hess_check(Hessii)
d = [dict() for n in range(len(Hessii))]
DQ = [sqrt(2.0 * Etot/hii) for hii in Hessii]
n1 = 0
for bond in self.bonds:
n2 = 0
low = self.b0[n1] - DQ[n1]; interv = 2.*DQ[n1] /float(n_interv)
for i in range(n_interv):
rango = (low + i*interv, low + (i+1)*interv)
d[n1][rango] = 0
n1 += 1
n1 = len(self.b0)
for ang in self.angles:
low = self.a0[n1-len(self.b0)] - DQ[n1]; interv = 2.*DQ[n1] /float(n_interv)
for i in range(n_interv):
rango = (low + i*interv, low + (i+1)*interv)
d[n1][rango] = 0
n1 += 1
n1 = len(self.b0) + len(self.a0)
cte =len(self.b0) + len(self.a0)
for dieh in self.dihedrals:
low = self.d0[n1-cte] - DQ[n1]; interv = 2.*DQ[n1] /float(n_interv)
for i in range(n_interv):
rango = (low + i*interv, low + (i+1)*interv)
d[n1][rango] = 0
n1 += 1
return d
def histogram(self,freq):
n1 = 0
for bond in self.bonds:
for key in freq[n1]:
if key[0] <= bond and bond < key[1]:
freq[n1][key] += 1
n1 += 1
n1 = len(self.b0)
for ang in self.angles:
for key in freq[n1]:
if key[0] <= ang and ang < key[1]:
freq[n1][key] += 1
n1 += 1
n1 = len(self.b0) + len(self.a0)
for dieh in self.dihedrals:
for key in freq[n1]:
if key[0] <= dieh and dieh < key[1]:
freq[n1][key] += 1
n1 += 1
return freq
class Thermostat:
def __init__(self, Q1 = 0., Q2 = 0., x1 = 0., vx1 = 0., x2 = 0., vx2 = 0.):
self.Q1 = Q1
self.Q2 = Q2
self.x1 = x1
self.vx1 = vx1
self.x2 = x2
self.vx2 = vx2
self.scale = 1.
def Freq(ET,D,FC,factor,Low,High):
if ET > Low and ET < High:
x = (ET-Low) / factor
D[int(x)] += 1
return D
def calc_grad_ext(F_ext,atom1,atom2):
v1 = my.attr_val(atom1); v2 = my.attr_val(atom2)
# vector = [v1[i]-v2[i] for i in range(3)]
vector = [v2[i]-v1[i] for i in range(3)]
norm = np.linalg.norm(vector)
U = [x/norm for x in vector]
g_ext = [F_ext*x for x in U]
return g_ext
def check_anchor(F,atom1,atom2,numat):
L = []
for x in range(numat):
if atom1 == x:
L.append(Point(F[0],F[1],F[2]))
elif atom2 == x:
L.append(Point(-F[0],-F[1],-F[2]))
else:
L.append(Point())
return L
def flat_points(lists_points):
L = []
for val in lists_points:
x,y,z = my.attr_val(val)
L.append(x)
L.append(y)
L.append(z)
return L
########### READING DATA ########################
# READING DATA FROM THE INPUT FILE
input = open('input.dat')
files = [line for line in input.read().split()]
#reading of the Cartesian Cordinates, number of Atoms, type of Atoms,
#Masses and Energy
file_st1 = files[0]
cord, numat, typ, symb, mass, st1_energy, st1_grad_cart, st1_hess_cart = \
my.initial_data(file_st1,'state1')
# READING CONECTIVITY
#my.mtx_conect('conex.dat')
conexion =list(open('internas.dat'))
nbond = int(conexion[0])
nang = int(conexion[nbond+1])
ndih = int(conexion[nbond+nang+2])
bond = [tuple([int(m) for m in w.split()]) for w in conexion[1:nbond+1]]
ang = [tuple([int(m) for m in w.split()]) for w in conexion[nbond+2:nbond+nang+2]]
dih = [tuple([int(m) for m in w.split()]) for w in conexion[nbond+nang+3:nbond+nang+ndih+3]]
redun = bond + ang + dih
ndim = len(redun)
# The final temperature which you want to heat the molecule, in Kelvin'
T = float(files[3])
#The time in femtoseconds in which you want to heat up the system'
time = float(files[4])
#The time in Femtoseconds in which you want to run the dynamics'
run_time = float(files[5])
input.close()
# External Forces
# atomic unit of force a.u. 8.238722e-8
mod_Fext = float(files[6])/my.auN
# Anchor points
anchor1 = int(files[7]) - 1
anchor2 = int(files[8]) - 1
######### INSTANTIATION OF ATOMS ###############################
for i in xrange(len(symb)):
symb[i] = Atoms()
# Assignation of mass and cartesian Coordinates
for m in xrange(numat):
j = 3*m
symb[m].cart.x = cord[j]
symb[m].cart.y = cord[j+1]
symb[m].cart.z = cord[j+2]
symb[m].mass = mass[m]
######### INSTANTIATION OF MOLECULES #########################
# Instantiation of the molecule
mol = Molecule([var.cart for var in symb] )
# Transformation to internal coordinates
mol.internas_bonds(bond,nbond)
mol.internas_ang(ang,nang)
mol.internas_dihe(dih,ndih)
# Internal coordinates of the minimun
mol.b0 = mol.bonds
mol.a0 = mol.angles
mol.d0 = mol.dihedrals
q0 = mol.bonds + mol.angles + mol.dihedrals
## CALCULATING THE GRADIENT AND HESSIAN MATRIX IN INTERNAL COORDINATES
## FOR THE FIRST STATE
second_derv = my.segunda_wilson(symb,ndim,numat,bond,ang,dih)
derv_trans = np.transpose(second_derv)
Bwilson,transp = my.matrix_transf(symb,bond,ang,dih)
G_mtx = np.dot(Bwilson,transp)
G_inv = my.invertir_mtx(G_mtx)
Grad_st1 =np.dot(G_inv,np.dot(Bwilson,st1_grad_cart))
mtx_B_G1 = np.dot(derv_trans,Grad_st1)
mtx_resta1 = st1_hess_cart - mtx_B_G1
Hess_st1 = np.dot(np.dot(np.dot(G_inv,Bwilson),mtx_resta1),np.dot(transp,G_inv))
########### DATA OF THE SECOND AND THIRD STATES ############
#############################################################
file_st2 = files[1]
st2_energy, st2_grad_cart, st2_hess_cart = my.initial_data(file_st2,'state2')
# CALCULATING THE GRADIENT AND HESSIAN MATRIX IN INTERNAL COORDINATES
# FOR THE SECOND STATE
Grad_st2 =np.dot(G_inv,np.dot(Bwilson,st2_grad_cart))
mtx_B_G2 = np.dot(derv_trans,Grad_st2)
mtx_resta2 = st2_hess_cart - mtx_B_G2
Hess_st2 =np.dot(np.dot(np.dot(G_inv,Bwilson),mtx_resta2),np.dot(transp,G_inv))
########### DATA OF THE SECOND AND THIRD STATES ############
###################################################################
file_st3 = files[2]
st3_energy, st3_grad_cart, st3_hess_cart = my.initial_data(file_st3,'state2')
# CALCULATING THE GRADIENT AND HESSIAN MATRIX IN INTERNAL COORDINATES
# FOR THE SECOND STATE
Grad_st3 =np.dot(G_inv,np.dot(Bwilson,st3_grad_cart))
mtx_B_G3 = np.dot(derv_trans,Grad_st3)
mtx_resta3 = st3_hess_cart - mtx_B_G3
Hess_st3 =np.dot(np.dot(np.dot(G_inv,Bwilson),mtx_resta3),np.dot(transp,G_inv))
############# HEATING USING MAXWELL-BOLTZMANN DISTRIBUTION ###################
# " From Statistical thermodynamics it is known that the velocities, #
# of the atoms in a classical system are distributed according #
# to the Maxwell-Boltzmann distribution.This says that if the temperature #
# of the system is T, the probability of each component of the velocity #
# of the ith atom having a value between v and v + dv is #
# #
# f(V) dV = sqrt(Massi /2 pi Kb T) * exp(-Massi V^2 / 2 Kb T) * dV #
# #
#The values of the velocities of the atoms can be assigned by treating them #
# | |
has_negation( feeling_match.group(1))
and
(re.search( r"(^|\W)(i|we)\W", feeling_match.group(1))
or re.search( r"^\W*$", feeling_match.group(1)))
):
return_value = True
return return_value
#######
# ###### ## #####
# # # # # #
##### ##### # # # #
# # ###### #####
# # # # # #
# ###### # # # #
fear_pattern = r"|".join([
r"(fear",
r"afraid",
r"scared of",
r"scare(?:s|\W|ing)?",
r"terrified",
r"terrif(?:ies|ying)",
r"frightened",
r"concerned",
r"concern(?:s|\W|ing)?",
r"frighten(?:s|\W|ing)?)"
])
def has_fear( sentence):
return_value = False
if re.search( fear_pattern, sentence):
fear_match = re.match( r"(.*?)" + fear_pattern + r"(.*?$)", sentence)
if(
(re.search( r"(fear|afraid|scared|terrified|concerned|frightened)" ,fear_match.group(2))
and re.search( r"(^|\W)(i|we)\W", fear_match.group(1))
and not has_negation( fear_match.group(1)))
or
(re.search( r"(scare( |s|ing)|terrif(y|ies|ying)|concern( |s|ing)|frighten( |s|ing))" ,fear_match.group(2))
and re.search( r"(^|\W)(me|us|i|we)(\W|$)", fear_match.group(3))
and not has_negation( fear_match.group(1)))
or
(re.search( r"(ing|fear)" ,fear_match.group(2))
and re.search( r"(^|\W)(me|us|i|we|my)\W", fear_match.group(0))
and not has_negation( fear_match.group(1)))
):
return_value = True
return return_value
#
# # # # # # # ####
# # # # # ## # # #
# # #### # # # # #
# # # # # # # # # ###
# # # # # # ## # #
####### # # # # # # ####
dislikes = r"|".join([
r"(i (?:\w+ )?hate",
r"i (?:\w+ )?dislike",
r"i (?:\w+ )?can not stand",
r"i (?:\w+ )?detest",
r"i (?:\w+ )?loathe",
r"(freak|creep)(?:s|ing)? me out",
r"get(?:s|ting)? on my nerves",
r"i(?: have)(?: \w+)? had enough of",
r"i(?: \w+) can not see any more of)"
])
def has_dislike( sentence):
if(
re.search( dislikes, sentence)
and not has_negation( re.sub( dislikes, " good ", sentence))
):
return True
else:
return False
######
# # ###### #### # ##### ######
# # # # # # # #
# # ##### #### # # # #####
# # # # # ##### #
# # # # # # # # #
###### ###### #### # # # ######
desires = "|".join([
r"(i (\w+ )?wish",
r"if only",
r"my (\w+ )?goal is (that|for|to|when|.w+ing)",
r"i (\w+ )?hope(?! for)",
r"it would (\w+ )?be (\w+ )?" + positives + r" (if|when))",
])
def has_desire(sentence):
desire_match = re.search( desires + r"(\s|\.|\,)(?!you)", sentence)
if desire_match:
if not has_negation( desire_match.group(0)):
return True
else:
return False
###### #####
# # ## # # #### ###### ##### ##### #### # # ###### # ######
# # # # ## # # # # # # # # # # # # #
# # # # # # # # ##### # # # # # ##### ##### # #####
# # ###### # # # # ### # ##### # # # # # # #
# # # # # ## # # # # # # # # # # # # #
###### # # # # #### ###### # # # #### ##### ###### ###### #
hurts = "|".join([
r"(kill",
r"hang",
r"cut",
r"harm",
r"electrocute",
r"burn",
r"to death",
r"hurt",
r"drown)",
])
intentions_self = "|".join([
r"(i am (\w+ )?going to",
r"i (\w+ )?will",
r"i (\w|\s)*plan(ing)? to",
r"i (\w|\s)*inten(d|t)(ing)? to",
r"i (\w|\s)*prepar(e|ing) to",
r"i (\w+ )?want to",
r"i (\w|\s)*think(ing)? about",
r"i am (\w+ )about to)",
])
def has_danger_to_self(sentence):
intention_match = re.search( intentions_self+r"(.*)", sentence)
desire_match = re.search( desires+r"(.*)", sentence)
if intention_match:
if(
not has_negation( intention_match.group(1))
and re.search( hurts, intention_match.group(len(intention_match.groups())))
and re.search( r"\W(me|myself)(\W|$)", intention_match.group(len(intention_match.groups())))
):
return True
else:
return False
elif desire_match:
if(
not has_negation( desire_match.group(1))
and (
(
re.search( r"\W(dead|die)(\W|$)", desire_match.group(len(desire_match.groups())))
and re.search( r"\W(i)\W", desire_match.group(len(desire_match.groups())))
)
or(
re.search( hurts, desire_match.group( len( desire_match.groups())))
and re.search( r"\W(me|myself)(\W|$)", desire_match.group( len( desire_match.groups())))
)
)
):
return True
else:
return False
else:
return False
#####
# # #### # # ###### # # #### #####
# # # ## # # # # # # #
# # # # # # ##### # # # #
# # # # # # # # # # #
# # # # # ## # # # # # #
##### #### # # # ###### # #### #
conflicts = "|".join([
r"(trouble",
r"problem",
r"conflict",
r"fight",
r"disagreement",
r"struggle",
r"dispute",
r"argument",
r"battle",
r"quarrel",
r"dispute",
r"controvery",
r"clash",
r"collision",
r"(?:^|\s)issue)"
])
def has_conflict(sentence):
if re.search(conflicts,sentence) and not has_negation(sentence):
return True
else:
return False
######
# # ## ##### # #### # # ## # ######
# # # # # # # # ## # # # # #
###### # # # # # # # # # # # # #####
# # ###### # # # # # # # ###### # #
# # # # # # # # # ## # # # #
# # # # # # #### # # # # ###### ######
rationale_pattern = re.compile(r"(?:.*)because\W([^\.\,\;\!\?]+)")
def has_rationale(sentence):
if rationale_pattern.search(sentence):
return True
else:
return False
def reflect_rationale(sentence):
reason = rationale_pattern.search(sentence).group(1)
return capitalize_fragment(
perform_pronoun_reflection(
reason))
######
# # ##### #### ##### ###### #### #####
# # # # # # # # # #
###### # # # # # ##### #### #
# ##### # # # # # #
# # # # # # # # # #
# # # #### # ###### #### #
def has_protest_to_question(sentence):
if(
re.search( r"no[^\.\,\;(is)]+you[^\.\,\;]+(busines|concern)",sentence)
or re.search( r"mind[^\.\,\;(is)]+own[^\.\,\;]+busines",sentence)
or re.search( r"(never|no)[^\.\,\;(is)]+mind",sentence)
or re.search( r"no[^\.\,\;]+((talk[^\.\,\;]+about)|discuss)",sentence)
or re.search( r"(fuck|screw|stop) this", sentence)
or re.search( r"(annoying|stupid|idiotic|absurd|meaningless|fucking) (questions?|conversation)", sentence)
):
return True
else:
return False
#####
# # # # ###### #### ##### # #### # # ####
# # # # # # # # # # ## # #
# # # # ##### #### # # # # # # # ####
# # # # # # # # # # # # # # #
# # # # # # # # # # # # ## # #
#### # #### ###### #### # # #### # # ####
def has_request_to_explain( sentence):
if(
( # why are you asking this? / why would you want to know this?
re.search(r"(why|(what.*(for|reason|purpose)))", sentence)
and
re.search(r"(ask|know|question|curious|nosy|inquisitive)", sentence)
)
or( # in how far is that relevant?
re.search(r"(why|(how(\w|\s)+(is|be)))", sentence)
and
re.search(r"(important|relevant|interesting|fascinating)", sentence)
)
or( # what do you mean / i do not get your point?
re.search(r"(what|((^|\W)i\W).*(not.*(get|understand|follow)))", sentence)
and(
re.search(r"(talk|question|this)(\w|\s)+about", sentence)
or
re.search(r"you.*(point|mean|ask|question)", sentence)
)
)
or(
re.search(r"(question|ask)", sentence)
and
re.search(r"(you|this).*(has|make)", sentence)
and
re.search(r"no(\w|\s)+(sense)", sentence)
)
or(
re.search(r"(why|sorry|what|wtf)\?", sentence)
)
):
return True
else:
return False
######
# # ###### ###### # ###### #### ##### # #### # #
# # # # # # # # # # # # ## #
###### ##### ##### # ##### # # # # # # # #
# # # # # # # # # # # # # #
# # # # # # # # # # # # # ##
# # ###### # ###### ###### #### # # #### # #
temporal = "|".join([
r"(today",
r"right now",
r"currently",
r"now",
r"recently",
r"previously",
r"lately",
r"these days",
r"this \w+",
r"sometimes",
r"every now and then)"
])
#####
# # ##### ###### ###### ##### # # # ####
# # # # # # # ## # # #
# #### # # ##### ##### # # # # # #
# # ##### # # # # # # # # ###
# # # # # # # # # ## # #
##### # # ###### ###### # # # # ####
def current_greeting(current_hour):
if not isinstance(current_hour, int):
return "Hello"
elif current_hour < 11:
return "Good morning"
elif current_hour >= 18:
return "Good evening"
elif current_hour >= 14 and | |
col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(New_Builds!{}*VLOOKUP(Lookups!$A$11, Lookups!$A$9:Lookups!$B$24, 2, FALSE), "-")'.format(cell)
ws[cell] = part1
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_labor_costs(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
# part1 = "=IFERROR('New_4G_Sites'!{}*VLOOKUP(Lookups!$A$9, Lookups!$A$9:Lookups!$B$24, 2, FALSE),0)".format(cell)
# part1 = '=IFERROR(New_4G_Sites!{}*(SQRT((1/(Total_Sites_MNO!{}/Area!{}))/2)*1000),"-")'.format(cell, cell, cell)
# part1 = "*VLOOKUP('Lookups'!$A$10, 'Lookups'!$A$9:'Lookups'!$B$24, 2, FALSE))"
part1 = '=IFERROR(New_4G_Sites!{}*VLOOKUP(Lookups!$A$12, Lookups!$A$9:Lookups!$B$24, 2, FALSE), "-")'.format(cell)
# part5 = ',"-")'
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_power_costs(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
# part1 = "=IFERROR('New_4G_Sites'!{}*VLOOKUP(Lookups!$A$9, Lookups!$A$9:Lookups!$B$24, 2, FALSE),0)".format(cell)
# part1 = '=IFERROR(New_4G_Sites!{}*(SQRT((1/(Total_Sites_MNO!{}/Area!{}))/2)*1000),"-")'.format(cell, cell, cell)
# part1 = "*VLOOKUP('Lookups'!$A$10, 'Lookups'!$A$9:'Lookups'!$B$24, 2, FALSE))"
part1 = '=IFERROR(New_4G_Sites!{}*VLOOKUP(Lookups!$A$13, Lookups!$A$9:Lookups!$B$24, 2, FALSE), "-")'.format(cell)
# part5 = ',"-")'
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_site_opex(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(((RAN_Capex!{}*0.1)*Settings!C16)/((1+Settings!C11)^Settings!C16), "-")'.format(cell)
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_bh_opex(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(((BH_Capex!{}*0.1)*Settings!C16)/((1+Settings!C11)^Settings!C16), "-")'.format(cell)
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_tower_opex(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(((Tower_Capex!{}*0.1)*Settings!C16)/((1+Settings!C11)^Settings!C16), "-")'.format(cell)
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_power_opex(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(((Tower_Capex!{}*0.1)*Settings!C16)/((1+Settings!C11)^Settings!C16), "-")'.format(cell)
ws[cell] = part1 #+ part2 + part3 + part4 + part5
ws['M1'] = 'Income Group'
for i in range(2,lnth):
cell = "M{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['N1'] = 'Region'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_mno_costs(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(RAN_Capex!{}+BH_Capex!{}+Tower_Capex!{}+Labor_Capex!{}+Power_Capex!{}+RAN_Opex!{}+BH_Opex!{}+Tower_Opex!{}+Power_Opex!{}, "-")'.format(cell,cell,cell,cell,cell,cell,cell,cell,cell)
ws[cell] = part1
ws['M1'] = 'MNO Cost ($)'
for i in range(2,lnth):
cell = "M{}".format(i)
part1 = '=IFERROR(SUMIF((C{}:L{}), "<>n/a"), "-")'.format(i, i)
line = part1
ws[cell] = line
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_total_costs(ws, cols, lnth):
"""
"""
ws.sheet_properties.tabColor = "9966ff"
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = '=IFERROR(MNO_Costs!{}*(100/Settings!C16), "-")'.format(cell)
ws[cell] = part1
ws['M1'] = 'Total Cost ($)'
for i in range(2,lnth):
cell = "M{}".format(i)
part1 = '=IFERROR(SUMIF((C{}:L{}), "<>n/a"), "-")'.format(i, i)
line = part1
ws[cell] = line
ws['N1'] = 'Cost Per Pop ($)'
for i in range(2,lnth):
cell = "N{}".format(i)
ws[cell] = "=(M{})/Pop!M{}".format(i, i)
ws['O1'] = 'Income Group'
for i in range(2,lnth):
cell = "O{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$J$2:$J$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws['P1'] = 'Region'
for i in range(2,lnth):
cell = "P{}".format(i)
ws[cell] = "=IFERROR(INDEX(Options!$I$2:$I$1611,MATCH(A{}, Options!$G$2:$G$1611,0)), "")".format(i)
ws.column_dimensions['M'].width = 20
ws.column_dimensions['N'].width = 20
ws.column_dimensions['O'].width = 35
ws.column_dimensions['P'].width = 45
ws = format_numbers(ws, ['N'], (1,200), 'Comma [0]', 0)
ws = format_numbers(ws, ['M'], (1,200), 'Comma [0]', 1)
set_border(ws, 'A1:N{}'.format(lnth-1), "thin", "000000")
return ws
def add_gdp_sheet(ws):
"""
"""
ws.sheet_properties.tabColor = "ffff33"
path = os.path.join(DATA_RAW, 'imf_gdp_2020_2030_real.csv')
gdp = pd.read_csv(path, encoding = "ISO-8859-1")
gdp.rename(columns={'isocode':'ISO3'}, inplace=True)
for i in range(2020, 2031):
col = "GDP{}".format(i)
if col in gdp.columns:
gdp.rename(columns={col:i}, inplace=True)
gdp = gdp[['ISO3',2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030]]
gdp = gdp.sort_values('ISO3')
lnth = len(gdp) + 2
for r in dataframe_to_rows(gdp, index=False, header=True):
ws.append(r)
ws['L1'] = 'Mean 10-Year GDP ($Bn)'
for i in range(2,lnth):
cell = 'L{}'.format(i)
ws[cell] = "=((SUM(B{}:K{})*1e9)/10)/1e9".format(i,i)
ws['M1'] = 'GDP Growth Rate (%)'
for i in range(2,lnth):
cell = 'M{}'.format(i)
ws[cell] = "=IFERROR((K{}-B{})/B{},"")".format(i,i,i)
ws = format_numbers(ws, ['M'], (2,len(gdp)+1), 'Percent', 1)
ws['N1'] = 'Income Group'
for i in | |
# Imports
import os
import jinja2
import webapp2
import logging
import json
import urllib
import MySQLdb
import math
import numpy as np
from datetime import timedelta, datetime
#import pandas as pd
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# Import the Flask Framework
from flask import Flask, request
app = Flask(__name__)
_INSTANCE_NAME = 'jdiner-mobile-byte3:mobile-data'
_DB_NAME = 'mobile_data_db'
_USER = 'root'
_IPADDRESS = '192.168.127.12'
_PSWD = '<PASSWORD>'
_ACTIVITY = 'plugin_google_activity_recognition'
_LOCATIONS = 'locations'
_ID = 'ab755be6-a980-4d95-a229-6d2af7c35bbf'
_EPSILON = 0.0001
_HOME = '5440 5th Ave, Pittsburgh, PA 15232, United States'
_UNIVERSITY = 'Carnegie Mellon University, 4902 Forbes Ave, Pittsburgh, PA 15213, United States'
if (os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
_DB = MySQLdb.connect(unix_socket='/cloudsql/' + _INSTANCE_NAME, db=_DB_NAME, user=_USER, passwd = _PSWD, charset='utf8')
else:
_DB = MySQLdb.connect(host=_IPADDRESS, port=3306, db=_DB_NAME, user=_USER, passwd = _PSWD, charset='utf8')
cursor = _DB.cursor()
# # turns a unix timestamp into Year-month-day format
# day = "FROM_UNIXTIME(timestamp/1000,'%Y-%m-%d')"
# # turns a unix timestamp into Hour:minute format
# time_of_day = "FROM_UNIXTIME(timestamp/1000,'%H:%i')"
# # calculates the difference between two timestamps in seconds
# elapsed_seconds = "(max(timestamp)-min(timestamp))/1000"
# # the name of the table our query should run on
# table = _ACTIVITY
# # turns a unix timestamp into Year-month-day Hour:minute format
# day_and_time_of_day = "FROM_UNIXTIME(timestamp/100, '%Y-%m-%d %H:%i')"
# # Groups the rows of a table by day and activity (so there will be one
# # group of rows for each activity that occurred each day.
# # For each group of rows, the day, time of day, activity name, and
# # elapsed seconds (difference between maximum and minimum) is calculated,
# query = "SELECT {0} AS day, {1} AS time_of_day, activity_name, {2} AS time_elapsed_seconds FROM {3} WHERE device_id='{4}' GROUP BY day, activity_name, {5}".format(day, time_of_day, elapsed_seconds, table, _ID, day_and_time_of_day)
#####################################################################################################
############# FUNCTIONS #############
#####################################################################################################
# Takes the database link and the query as input
def make_query(cursor, query):
# this is for debugging -- comment it out for speed
# once everything is working
try:
# try to run the query
cursor.execute(query)
# and return the results
return cursor.fetchall()
except Exception:
# if the query failed, log that fact
logging.info("query making failed")
logging.info(query)
# finally, return an empty list of rows
return []
# helper function to make a query and print lots of
# information about it.
def make_and_print_query(cursor, query, description):
logging.info(description)
logging.info(query)
rows = make_query(cursor, query)
def bin_locations(locations, epsilon):
# always add the first location to the bin
bins = {1: [locations[0][0], locations[0][1]]}
# this gives us the current maximum key used in our dictionary
num_places = 1
# now loop through all the locations
for location in locations:
lat = location[0]
lon = location[1]
# assume that our current location is new for now (hasn't been found yet)
place_found = False
# loop through the bins
for place in bins.values():
# check whether the distance is smaller than epsilon
if distance_on_unit_sphere(lat, lon, place[0], place[1]) < epsilon:
#(lat, lon) is near (place[0], place[1]), so we can stop looping
place_found = True
# we weren't near any of the places already in bins
if place_found is False:
logging.info("new place: {0}, {1}".format(lat, lon))
# increment the number of places found and create a new entry in the
# dictionary for this place. Store the lat lon for comparison in the
# next round of the loop
num_places = num_places + 1
bins[num_places] = [lat, lon]
return bins.values()
def find_bin(bins, lat, lon, epsilon):
for i in range(len(bins)):
blat = bins[i][0]
blon = bins[i][1]
if distance_on_unit_sphere(lat, lon, blat, blon) < epsilon:
return i
bins.append([lat, lon])
return len(bins)-1
def group_activities_by_location(bins, locations, activities, epsilon):
searchable_locations = {}
for location in locations:
# day, hour
key = (location[0], location[1])
if key in searchable_locations:
# lat, lon
searchable_locations[key] = locations[key] + [(location[2], location[3])]
else:
searchable_locations[key] = [(location[2], location[3])]
# a place to store activities for which we couldn't find a location
# (indicates an error in either our data or algorithm)
no_loc = []
for activity in activities:
# collect the information we will need
aday = activity[0] # day
ahour = activity[1] # hour
aname = activity[2] # name
logging.info(aday + aname)
try:
possible_locations = searchable_locations[(aday, ahour)]
# loop through the locations
for location in possible_locations:
logging.info(" about to find bin")
bin = find_bin(bins, location[0], location[1], epsilon)
# and add the information to it
bins[bin] = bins[bin] + [aname]
except KeyError:
no_loc.append([aname])
# add no_loc to the bins
bins.append(no_loc)
# this function is taken verbatim from http://www.johndcook.com/python_longitude_latitude.html
def distance_on_unit_sphere(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
# sometimes small errors add up, and acos will fail if cos > 1
if cos>1: cos = 1
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc
def unique_address(locations_visit, epsilon):#Works with _EPSILON = 0.0001
addresses = {}
for location in locations_visit:
bin = find_bin(bins, location[0], location[1], epsilon)
if bin not in addresses:
print(bin, location[4])
addresses[bin] = location[4]
return addresses
def normalize_address(locations, addresses, epsilon):
loc_list = []
for loc in locations:
#print(loc_list)
location = list(loc)
bin = find_bin(bins, location[0], location[1], epsilon)
normalized_add = addresses.get(bin)
location[4] = normalized_add
loc_list.append(location)
return loc_list
def join_trips(norm_locations):
i=0
maxi = len(norm_locations)
trips = []
while i<maxi:
day_of_week = norm_locations[i][5]
start = norm_locations[i][2]
if start == None:
i+=1
if i<maxi:
start = norm_locations[i][2]
if i<maxi:
start_address = norm_locations[i][4]
i+=1
if i<maxi:
end = norm_locations[i][3]
end_address = norm_locations[i][4]
if None not in (start, end):
total_time = (end - start).total_seconds() #Total seconds of the trip
trip = [day_of_week, start, end, total_time, start_address, end_address]
trips.append(trip)
if norm_locations[i][2] == None: #If departure is None
i+=1
return trips
def handle_outliers(list, col_index):
l_array = np.array(list)
if l_array.ndim ==1:
col = l_array
else:
col = l_array[:,col_index]
mean, std, median = col.mean(), col.std(), np.median(col)
outliers = np.absolute(col - mean) > 2*std
col[outliers] = median #Replace outliers with the median
if l_array.ndim>1:
l_array[:,col_index] = col
return l_array
def nearest_temperature(trip, temperatures):
date = trip[0]
temp_time = [t[0] for t in temperatures]
temp_temp = [t[1] for t in temperatures]
closest_time = min(temp_time, key=lambda d: abs(d - date))
temp_index = temp_time.index(closest_time)
if abs(date-closest_time) > timedelta(hours=2):#If the time for the temperature is more than 2 hours away
temp_range = range(temp_index-2, temp_index+3)
temp = np.mean([temp_temp[i] for i in temp_range]) #Mean of a window of 5
else:
temp = temp_temp[temp_index]
return temp
def time_to_class(trip, class_start_time):
date = trip[0]
weekday = date.strftime('%A')
#start_time = class_start_time[weekday]
start_time = datetime.strptime(class_start_time[weekday], '%H:%M').time()
trip_time = date.time()
class_seconds = (start_time.hour*60*60 + start_time.minute*60 + start_time.second)
trip_seconds = (trip_time.hour*60*60 + trip_time.minute*60 + trip_time.second)
delta_minutes = (class_seconds - trip_seconds)/60
return delta_minutes
def add_missing_hours(aggregated_data):
complete_array = []
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
hours = range(24)
day_hour = {}
keys = []
for d in days:
for h in hours:
day_hour[(d,h)]=0
keys.append((d,h))
for a in aggregated_data:
index = (a[0],a[1])
count = a[2]
day_hour[index] = count
for k in keys:
value = day_hour[k]
row = (k[0],k[1],value)
complete_array.append(row)
return np.array(complete_array)
#####################################################################################################
############# END FUNCTIONS #############
#####################################################################################################
local_time_departure = "CONVERT_TZ(FROM_UNIXTIME(double_departure/1000,'%Y-%m-%d %H:%i:%s'), '+00:00','-05:00')"
local_time_arrival = "CONVERT_TZ(FROM_UNIXTIME(double_arrival/1000,'%Y-%m-%d %H:%i:%s'), '+00:00','-05:00')"
day_of_week = "DAYNAME(CONVERT_TZ(FROM_UNIXTIME(double_departure/1000,'%Y-%m-%d %H:%i:%s'), '+00:00','-05:00'))"
start_date = "FROM_UNIXTIME(timestamp/1000,'%Y-%m-%d')>'2017-01-30'" #Date I returned from NY
#max_time = "TIME(CONVERT_TZ(FROM_UNIXTIME(timestamp/1000,'%Y-%m-%d %H:%i:%s'), '+00:00','-05:00'))<MAKETIME(16,0,0)"
query = "SELECT double_latitude, double_longitude, {0} AS departure, {1} AS arrival, address, {2} FROM locations_visit WHERE {3};".format(local_time_departure, local_time_arrival, day_of_week, start_date)
locations_visit = make_query(cursor,query)
bins = bin_locations(locations_visit, _EPSILON)
addresses = unique_address(locations_visit, _EPSILON)
norm_locations = normalize_address(locations_visit, addresses, _EPSILON)
trips = join_trips(norm_locations)
start_address_index = 4
end_address_index = 5
total_time_index = 3
start_time_index = 1
commute_toUniv = [t for t in trips if (t[start_address_index] == _HOME) & (t[end_address_index] == _UNIVERSITY) & (t[start_time_index].time() < t[start_time_index].time().replace(hour=16, | |
mapping of runner names to job runner
instances.
For example:
::
ppl.run(runners={ 'my_runner': SpecialJobRunner() })
Any runner names that don't have associated job runner
instances will use the default runner defined via the
``default_runner`` argument.
Dynamically setting number of CPUs/threads via job runners
----------------------------------------------------------
When job runners are created they can have a maximum number
of available CPUs (aka "slots") associated with them.
For ``SimpleJobRunner``s this has to be set explicitly via
the ``nslots`` argument, for example:
::
runner = SimpleJobRunner(nslots=8)
By default only a single slot is allocated. (For
``GEJobRunners`` the number of slots is set implicitly.)
The number of slots can then be accessed at runtime, so that
jobs run within a task use the appropriate number of CPUs
dynamically, by using the ``runner_nslots`` method.
For standard ``PipelineTask`` classes, this should be done
when constructing commands within the ``setup`` method. For
example: ``bowtie2`` takes a ``--threads`` option which
tells the program how many threads it should use. A minimal
task to run ``bowtie2`` with dynamically assigned number of
threads might look like:
::
class RunBowtie2(PipelineTask):
def init(self,fastq,index_basename,sam_out):
pass
def setup(self):
self.add_cmd("Run bowtie",
Command("bowtie2",
"-x",self.args.index_basename,
"-U",self.args.fastq,
"-S",self.args.sam_out,
"--threads",self.runner_nslots)
.. note::
When using dynamic CPU assignment with ``SimpleJobRunners``,
it may also be worth considering using the ``max_slots``
parameter when running the pipeline.
Dealing with stdout from tasks
------------------------------
The stdout from tasks which run external commands can be
accessed via the ``stdout`` property of the task instance once
it has completed.
Where multiple jobs were run by the task, the stdout from all
jobs are concatenated and returned via this property.
The stdout for each job is topped and tailed with a standard
set of comment lines output from the wrapper scripts, of the
form::
#### COMMAND Echo text
#### HOSTNAME popov
#### USER pjb
#### START Thu Aug 17 08:38:14 BST 2017
...
...Job-specific output...
...
#### END Thu Aug 17 08:38:14 BST 2017
#### EXIT_CODE 0
When parsing the stdout it is recommended to check for these lines
using e.g. ``line.startswith("#### ")``.
Handling failed tasks in pipelines
----------------------------------
If a task in a pipeline fails (that is, completes with a
non-zero exit code) then the pipeline is considered to have
failed. In this case the pipeline can use one of a number of
strategies to handle execution of the remaining tasks:
* Pipeline execution halts immediately and all running tasks
are terminated ('immediate' mode, the default)
* Pipeline execution continues but all tasks which depend on
the failed tasks are removed and not executed ('deferred'
mode)
The strategy can be set explicitly at runtime by setting the
``exit_on_failure`` argument of the pipeline ``run`` method
to one of the values defined in the ``PipelineFailure`` class.
For example::
from pipeliner import PipelineFailure
...
# Define pipeline
...
# Run pipeline in 'deferred' mode
ppl.run(exit_on_failure=PipelineFailure.DEFERRED)
Note that regardless of how the failures are handled the
pipeline will always return exit code 1 when one or more
tasks fail.
Executing pipeline commands in batches
--------------------------------------
By default when the pipeline executes the commands generated by
a task, each command is sent to the scheduler as a single job.
It is also possible to request that the pipeline executes commands
in batches, by specifying either a non-zero size for the
``batch_size`` option of the ``run`` method, or by specifying
a non-zero ``batch_limit``.
If ``batch_size`` is set then commands are grouped together
into batches of this size, and each batch is sent to the scheduler
as a single job; if ``batch_limit`` is set then the batch size
is set automatically so that the number of batches don't
exceed the specified limit.
Within a batch the commands are executed sequentially, and if
one command fails then all subsequent commands in the batch
won't run.
Batch mode can also be requested on a per-task basis, by
explicitly specifying ``batch_size`` as keyword when adding
the task to the pipeline. For example::
ppl = Pipeline()
...
ppl.add_task(my_task,batch_size=5)
This will override any batch size set globally when the
pipeline is run.
Setting pipeline parameters at execution time
---------------------------------------------
When building pipelines, it is sometimes necessary or desirable
a parameter into a task where the value of the parameter isn't
known until execution time (via the ``run`` method).
For example, a task in the pipeline might need to know the
number of cores or the location of a temporary directory to be
used, which only be set this at execution time.
To handle these situations, it possible to define arbitrary
parameters within the ``Pipeline`` class at build time and then
set the values of these parameters at execution time.
Use the ``add_param`` method is used to define a parameter, for
example:
::
ppl = Pipeline()
ppl.add_param('ncores',value=1,type=int)
ppl.add_param('tmpdir')
This creates a new ``PipelineParam`` instance which is associated
with the supplied name.
The parameters can be accessed via the pipeline's ``params``
property, and passed as input into tasks, for example:
::
task = ExampleTask("This is an example",
ncores=ppl.params.ncores,
tmpdir=ppl.params.tmpdir)
ppl.add_task(task)
The runtime values of parameters are then passed via the
``params`` argument of the pipeline's ``run`` invocation:
::
temporary_dir = tempfile.mkdtemp()
ppl.run(params={ 'ncores': 8,
'tmpdir': temporary_dir, })
Built-in parameters
-------------------
In addition to the custom parameters defined using the
``add_param`` method and outlined in the previous section, a
number of 'built-in' parameters are also available as properties
of the ``Pipeline`` instance, for use when building a pipeline.
Specifically these are:
* ``WORKING_DIR``: the working directory used by the pipeline
* ``BATCH_SIZE``: the batch size to be used when running jobs
within pipeline tasks
* ``BATCH_LIMIT``: the maximum number of batches of jobs
* ``VERBOSE``: whether the pipeline is running in 'verbose'
mode
These can be used in the same way as the custom parameters when
setting up tasks, for example:
::
task = ExampleTask("This is an example",
ncores=ppl.params.ncores,
tmpdir=ppl.params.WORKING_DIR)
The values will be set when the pipeline's ``run`` method is
invoked.
Defining execution environment for a task: runners, modules & conda
-------------------------------------------------------------------
It is possible to define the execution environment on a per-task
basis within a pipeline, by defining job runners, environment
modules and conda dependencies.
Runners and environments can be declared in a parameterised
fashion when a pipline is created, using the ``add_runner`` and
``add_envmodules`` methods respectively of the ``Pipeline`` class.
For example:
::
ppl = Pipeline()
ppl.add_runner('4_cpus')
ppl.add_envmodules('myenv')
This defines a ``runner`` called ``4_cpus`` and an environment
called ``myenv``.
The runners and environments are accessed via the ``runners``
and ``envmodules`` properties of the ``Pipeline`` instance, and
can be associated with tasks within the pipeline when they
are added via the ``add_task`` method, using the ``runner`` and
``envmodules`` keywords respectively).
For example:
::
ppl.add_task(my_task,runner=ppl.runners['4_cpus'],...)
and
::
ppl.add_task(my_task,envmodules=ppl.envmodules['myenv'],...)
Actual runners and environments can be assigned when the pipeline
is executed, via the ``runners`` and ``envmodules`` options of
the ``run`` method of the ``Pipeline`` instance - these are
mappings of the names defined previously to ``JobRunner`` instances,
and to lists of environment modules.
For example:
::
ppl.run(runners={ '4_cpus': GEJobRunner('-pe smp.pe 4'), },
envmodules={ 'myenv': 'apps/trimmomatic/0.38', },...)
If a runner is not explicitly set for a task then the pipeline's
default runner is used for that task; this defaults to a
``SimpleJobRunner`` instance but can be set explicitly via the
``default_runner`` argument of the ``Pipeline`` instance's ``run``
method.
Execution environments can also be defined with ``conda`` packages.
The packages and versions required by a task are declared in a
task's ``init`` method with calls to the ``conda`` method, for
example:
::
class RunFastqc(PipelineTask):
def init(self,fastq,out_dir):
self.conda("fastqc=0.11.3")
...
If ``conda`` dependency resolution is enabled when the pipeline is
executed then these declarations will be used to generate ``conda``
environments that are activated when the tasks run (otherwise they
are ignored) (see the section "Enabling conda to create task environments
automatically" for details).
Defining outputs from a pipeline
--------------------------------
It is possible to define outputs for a ``Pipeline`` instance in
the same way that outputs can be defined for individual tasks.
The ``add_output`` method of the ``Pipeline`` class allows an
arbitrary output to be defined, for example:
::
ppl = Pipeline()
...
ppl.add_output('final_result',result)
ppl.run()
This can be accessed via the pipeline's ``output`` property:
::
print("The result is '%s'" % ppl.output.result)
It is possible that pipeline outputs are defined as
``PipelineParam`` instances (for example, if a pipeline output is
taken from an output from one of its constituent tasks). By
default, on pipeline completion the outputs are "finalized" by
substituting the ``PipelineParam``s for their actual values. To
prevent this behaviour, set the ``finalize_outputs`` argument of
the pipeline's ``run`` method to ``False``. For example:
::
ppl = Pipeline()
ppl.add_output('final_result',PipelineParam())
...
ppl.run(finalize_outputs=False)
It is recommended that outputs are defined as ``PipelineParam``
instances, to take advantage of the implicit task requirement
gathering mechanism.
Enabling conda to create task environments automatically
--------------------------------------------------------
The ``conda`` package manager can be used within ``Pipeline``s
to automatically create run-time environments for any tasks which
declare | |
from .api.functions import *
import posixpath
import csv
def attributes():
"""Output file attributes."""
lexical = [
'num_dot_url', 'num_hyphen_url', 'num_underline_url',
'num_bar_url', 'num_question_url', 'num_equal_url',
'num_atsign_url', 'num_ampersand_url', 'num_exclamation_url',
'num_space_url', 'num_tilde_url', 'num_comma_url',
'num_plus_url', 'num_asterisk_url', 'num_hashtag_url',
'num_dollar_url', 'num_percent_url', 'num_tld_url',
'length_url', 'num_dot_domain', 'num_hyphen_domain', 'num_underline_domain',
'num_bar_domain', 'num_question_domain', 'num_equal_domain',
'num_atsign_domain', 'num_ampersand_domain', 'num_exclamation_domain',
'num_space_domain', 'num_tilde_domain', 'num_comma_domain',
'num_plus_domain', 'num_asterisk_domain', 'num_hashtag_domain',
'num_dollar_domain', 'num_percent_domain', 'length_domain', 'format_ip_domain',
'server_client_domain', 'num_dot_directory', 'num_hyphen_directory', 'num_underline_directory',
'num_bar_directory', 'num_question_directory', 'num_equal_directory',
'num_atsign_directory', 'num_ampersand_directory', 'num_exclamation_directory',
'num_space_directory', 'num_tilde_directory', 'num_comma_directory',
'num_plus_directory', 'num_asterisk_directory', 'num_hashtag_directory',
'num_dollar_directory', 'num_percent_directory', 'length_directory', 'num_dot_file', 'num_hyphen_file', 'num_underline_file',
'num_bar_file', 'num_question_file', 'num_equal_file',
'num_atsign_file', 'num_ampersand_file', 'num_exclamation_file',
'num_space_file', 'num_tilde_file', 'num_comma_file',
'num_plus_file', 'num_asterisk_file', 'num_hashtag_file',
'num_dollar_file', 'num_percent_file',
'length_file', 'num_dot_params', 'num_hyphen_params', 'num_underline_params',
'num_bar_params', 'num_question_params', 'num_equal_params',
'num_atsign_params', 'num_ampersand_params', 'num_exclamation_params',
'num_space_params', 'num_tilde_params', 'num_comma_params',
'num_plus_params', 'num_asterisk_params', 'num_hashtag_params',
'num_dollar_params', 'num_percent_params',
'length_params', 'presence_tld_arguments', 'num_parameters',
'email_at_url', 'extension_file'
]
host = ['domain_present_in_rbl', 'time_response', 'localtion_geographic_ip',
'as_number','time_activation_domain', 'time_expiration_domain',
'num_ip_resolved', 'nameservers', 'num_server_mx', 'value_ttl_associaated']
others = ['certificate_tls_ssl', 'num_redirect', 'url_index_on_google', 'domain_index_on_google', 'url_shortener']
list_attributes = []
list_attributes.extend(lexical)
list_attributes.extend(host)
list_attributes.extend(others)
list_attributes.extend(['phising'])
return list_attributes
def extract_new_url(url, dataset):
print(url)
if (check_Alive(url)):
with open(dataset, "w", newline='') as output:
writer = csv.writer(output)
writer.writerow(attributes())
dict_url = start_url(url)
"""LEXICAL"""
# URL
dot_url = str(count(dict_url['url'], '.'))
hyphen_url = str(count(dict_url['url'], '-'))
underline_url = str(count(dict_url['url'], '_'))
bar_url = str(count(dict_url['url'], '/'))
question_url = str(count(dict_url['url'], '?'))
equal_url = str(count(dict_url['url'], '='))
atsign_url = str(count(dict_url['url'], '@'))
ampersand_url = str(count(dict_url['url'], '&'))
exclamation_url = str(count(dict_url['url'], '!'))
blank_url = str(count(dict_url['url'], ' '))
til_url = str(count(dict_url['url'], '~'))
comma_url = str(count(dict_url['url'], ','))
plus_url = str(count(dict_url['url'], '+'))
asterisk_url = str(count(dict_url['url'], '*'))
hashtag_url = str(count(dict_url['url'], '#'))
money_sign_url = str(count(dict_url['url'], '$'))
percentage_url = str(count(dict_url['url'], '%'))
len_url = str(length(dict_url['url']))
email_exist = str(valid_email(dict_url['url']))
count_tld_url = str(count_tld(dict_url['url']))
# DOMAIN
dot_host = str(count(dict_url['host'], '.'))
hyphen_host = str(count(dict_url['host'], '-'))
underline_host = str(count(dict_url['host'], '_'))
bar_host = str(count(dict_url['host'], '/'))
question_host = str(count(dict_url['host'], '?'))
equal_host = str(count(dict_url['host'], '='))
atsign_host = str(count(dict_url['host'], '@'))
ampersand_host = str(count(dict_url['host'], '&'))
exclamation_host = str(count(dict_url['host'], '!'))
blank_host = str(count(dict_url['host'], ' '))
til_host = str(count(dict_url['host'], '~'))
comma_host = str(count(dict_url['host'], ','))
plus_host = str(count(dict_url['host'], '+'))
asterisk_host = str(count(dict_url['host'], '*'))
hashtag_host = str(count(dict_url['host'], '#'))
money_sign_host = str(count(dict_url['host'], '$'))
percentage_host = str(count(dict_url['host'], '%'))
len_host = str(length(dict_url['host']))
ip_exist = str(valid_ip(dict_url['host']))
server_client = str(check_word_server_client(dict_url['host']))
# DIRECTORY
if dict_url['path']:
dot_path = str(count(dict_url['path'], '.'))
hyphen_path = str(count(dict_url['path'], '-'))
underline_path = str(count(dict_url['path'], '_'))
bar_path = str(count(dict_url['path'], '/'))
question_path = str(count(dict_url['path'], '?'))
equal_path = str(count(dict_url['path'], '='))
atsign_path = str(count(dict_url['path'], '@'))
ampersand_path = str(count(dict_url['path'], '&'))
exclamation_path = str(count(dict_url['path'], '!'))
blank_path = str(count(dict_url['path'], ' '))
til_path = str(count(dict_url['path'], '~'))
comma_path = str(count(dict_url['path'], ','))
plus_path = str(count(dict_url['path'], '+'))
asterisk_path = str(count(dict_url['path'], '*'))
hashtag_path = str(count(dict_url['path'], '#'))
money_sign_path = str(count(dict_url['path'], '$'))
percentage_path = str(count(dict_url['path'], '%'))
len_path = str(length(dict_url['path']))
else:
dot_path = -1
hyphen_path = -1
underline_path = -1
bar_path = -1
question_path = -1
equal_path = -1
atsign_path = -1
ampersand_path = -1
exclamation_path = -1
blank_path = -1
til_path = -1
comma_path = -1
plus_path = -1
asterisk_path = -1
hashtag_path = -1
money_sign_path = -1
percentage_path = -1
len_path = -1
# FILE
if dict_url['path']:
dot_file = str(count(posixpath.basename(dict_url['path']), '.'))
hyphen_file = str(count(posixpath.basename(dict_url['path']), '-'))
underline_file = str(count(posixpath.basename(dict_url['path']), '_'))
bar_file = str(count(posixpath.basename(dict_url['path']), '/'))
question_file = str(count(posixpath.basename(dict_url['path']), '?'))
equal_file = str(count(posixpath.basename(dict_url['path']), '='))
atsign_file = str(count(posixpath.basename(dict_url['path']), '@'))
ampersand_file = str(count(posixpath.basename(dict_url['path']), '&'))
exclamation_file = str(count(posixpath.basename(dict_url['path']), '!'))
blank_file = str(count(posixpath.basename(dict_url['path']), ' '))
til_file = str(count(posixpath.basename(dict_url['path']), '~'))
comma_file = str(count(posixpath.basename(dict_url['path']), ','))
plus_file = str(count(posixpath.basename(dict_url['path']), '+'))
asterisk_file = str(count(posixpath.basename(dict_url['path']), '*'))
hashtag_file = str(count(posixpath.basename(dict_url['path']), '#'))
money_sign_file = str(count(posixpath.basename(dict_url['path']), '$'))
percentage_file = str(count(posixpath.basename(dict_url['path']), '%'))
len_file = str(length(posixpath.basename(dict_url['path'])))
extension = str(extract_extension(posixpath.basename(dict_url['path'])))
else:
dot_file = -1
hyphen_file = -1
underline_file = -1
bar_file = -1
question_file = -1
equal_file = -1
atsign_file = -1
ampersand_file = -1
exclamation_file = -1
blank_file = -1
til_file = -1
comma_file = -1
plus_file = -1
asterisk_file = -1
hashtag_file = -1
money_sign_file = -1
percentage_file = -1
len_file = -1
extension = -1
# PARAMETERS
if dict_url['query']:
dot_params = str(count(dict_url['query'], '.'))
hyphen_params = str(count(dict_url['query'], '-'))
underline_params = str(count(dict_url['query'], '_'))
bar_params = str(count(dict_url['query'], '/'))
question_params = str(count(dict_url['query'], '?'))
equal_params = str(count(dict_url['query'], '='))
atsign_params = str(count(dict_url['query'], '@'))
ampersand_params = str(count(dict_url['query'], '&'))
exclamation_params = str(count(dict_url['query'], '!'))
blank_params = str(count(dict_url['query'], ' '))
til_params = str(count(dict_url['query'], '~'))
comma_params = str(count(dict_url['query'], ','))
plus_params = str(count(dict_url['query'], '+'))
asterisk_params = str(count(dict_url['query'], '*'))
hashtag_params = str(count(dict_url['query'], '#'))
money_sign_params = str(count(dict_url['query'], '$'))
percentage_params = str(count(dict_url['query'], '%'))
len_params = str(length(dict_url['query']))
tld_params = str(check_tld(dict_url['query']))
number_params = str(count_params(dict_url['query']))
else:
dot_params = -1
hyphen_params = -1
underline_params = -1
bar_params = -1
question_params = -1
equal_params = -1
atsign_params = -1
ampersand_params = -1
exclamation_params = -1
blank_params = -1
til_params = -1
comma_params = -1
plus_params = -1
asterisk_params = -1
hashtag_params = -1
money_sign_params = -1
percentage_params = -1
len_params = -1
tld_params = -1
number_params = -1
"""HOST"""
rbl = str(check_rbl(dict_url['host']))
time_domain = str(check_time_response(dict_url['protocol'] + '://' + dict_url['host']))
asn = str(get_asn_number(dict_url))
country = str(get_country(dict_url))
activation_time = str(time_activation_domain(dict_url))
expiration_time = str(expiration_date_register(dict_url))
count_ip = str(count_ips(dict_url))
count_ns = str(count_name_servers(dict_url))
count_mx = str(count_mx_servers(dict_url))
ttl = str(extract_ttl(dict_url))
"""OTHERS"""
ssl = str(check_ssl('https://' + dict_url['url']))
count_redirect = str(count_redirects(dict_url['protocol'] + '://' + dict_url['url']))
google_url = str(google_search(dict_url['url']))
google_domain = str(google_search(dict_url['host']))
shortener = str(check_shortener(dict_url))
_lexical = [
dot_url, hyphen_url, underline_url, bar_url, question_url,
equal_url, atsign_url, ampersand_url, exclamation_url,
blank_url, til_url, comma_url, plus_url, asterisk_url, hashtag_url,
money_sign_url, percentage_url, count_tld_url, len_url, dot_host,
hyphen_host, underline_host, bar_host, question_host, equal_host,
atsign_host, ampersand_host, exclamation_host, blank_host, til_host,
comma_host, plus_host, asterisk_host, hashtag_host, money_sign_host,
percentage_host, len_host, ip_exist, server_client,
dot_path, hyphen_path, underline_path, bar_path, question_path,
equal_path, atsign_path, ampersand_path, exclamation_path,
blank_path, til_path, comma_path, plus_path, asterisk_path,
hashtag_path, money_sign_path, percentage_path, len_path, dot_file,
hyphen_file, underline_file, bar_file, question_file, equal_file,
atsign_file, ampersand_file, exclamation_file, blank_file,
til_file, comma_file, plus_file, asterisk_file, hashtag_file,
money_sign_file, percentage_file, len_file, dot_params,
hyphen_params, underline_params, bar_params, question_params,
equal_params, atsign_params, ampersand_params, exclamation_params,
blank_params, til_params, comma_params, plus_params, asterisk_params,
hashtag_params, money_sign_params, percentage_params, len_params,
tld_params, number_params, email_exist, extension
]
_host = [rbl, time_domain, country, asn, activation_time,
expiration_time, count_ip, count_ns, count_mx, ttl]
_others = [ssl, count_redirect, google_url, google_domain, shortener]
result = []
result.extend(_lexical)
result.extend(_host)
result.extend(_others)
result.extend([''])
print(result)
writer.writerow(result)
else:
print('This page is not online')
def generate_dataset(urls, dataset, phising):
with open(dataset, "w", newline='') as output:
writer = csv.writer(output)
writer.writerow(attributes())
count_url = 0
for url in read_file(urls):
if (check_Alive(url)):
print(url)
count_url = count_url + 1
dict_url = start_url(url)
"""LEXICAL"""
# URL
dot_url = str(count(dict_url['url'], '.'))
hyphen_url = str(count(dict_url['url'], '-'))
underline_url = str(count(dict_url['url'], '_'))
bar_url = str(count(dict_url['url'], '/'))
question_url = str(count(dict_url['url'], '?'))
equal_url = str(count(dict_url['url'], '='))
atsign_url = str(count(dict_url['url'], '@'))
ampersand_url = str(count(dict_url['url'], '&'))
exclamation_url = str(count(dict_url['url'], '!'))
blank_url = str(count(dict_url['url'], ' '))
til_url = str(count(dict_url['url'], '~'))
comma_url = str(count(dict_url['url'], ','))
plus_url = str(count(dict_url['url'], '+'))
asterisk_url = str(count(dict_url['url'], '*'))
hashtag_url = str(count(dict_url['url'], '#'))
money_sign_url = str(count(dict_url['url'], '$'))
percentage_url = str(count(dict_url['url'], '%'))
len_url = str(length(dict_url['url']))
email_exist = str(valid_email(dict_url['url']))
count_tld_url = str(count_tld(dict_url['url']))
# DOMAIN
dot_host = str(count(dict_url['host'], '.'))
hyphen_host = str(count(dict_url['host'], '-'))
underline_host = str(count(dict_url['host'], '_'))
bar_host = str(count(dict_url['host'], '/'))
question_host = str(count(dict_url['host'], '?'))
equal_host = str(count(dict_url['host'], '='))
atsign_host = str(count(dict_url['host'], '@'))
ampersand_host = str(count(dict_url['host'], '&'))
exclamation_host = str(count(dict_url['host'], '!'))
blank_host = str(count(dict_url['host'], ' '))
til_host = str(count(dict_url['host'], '~'))
comma_host = str(count(dict_url['host'], ','))
plus_host = str(count(dict_url['host'], '+'))
asterisk_host = str(count(dict_url['host'], '*'))
hashtag_host = str(count(dict_url['host'], '#'))
money_sign_host = str(count(dict_url['host'], '$'))
percentage_host = str(count(dict_url['host'], '%'))
len_host = str(length(dict_url['host']))
ip_exist = str(valid_ip(dict_url['host']))
server_client = str(check_word_server_client(dict_url['host']))
# DIRECTORY
if dict_url['path']:
dot_path = str(count(dict_url['path'], '.'))
hyphen_path = str(count(dict_url['path'], '-'))
underline_path = str(count(dict_url['path'], '_'))
bar_path = str(count(dict_url['path'], '/'))
question_path = str(count(dict_url['path'], '?'))
equal_path = str(count(dict_url['path'], '='))
atsign_path = str(count(dict_url['path'], '@'))
ampersand_path = str(count(dict_url['path'], '&'))
exclamation_path = str(count(dict_url['path'], '!'))
blank_path = str(count(dict_url['path'], ' '))
til_path = str(count(dict_url['path'], '~'))
comma_path = str(count(dict_url['path'], ','))
plus_path = str(count(dict_url['path'], '+'))
asterisk_path = str(count(dict_url['path'], '*'))
hashtag_path = str(count(dict_url['path'], '#'))
money_sign_path = str(count(dict_url['path'], '$'))
percentage_path = str(count(dict_url['path'], '%'))
len_path = str(length(dict_url['path']))
else:
dot_path = -1
hyphen_path = -1
underline_path = -1
bar_path = -1
question_path = -1
equal_path = -1
atsign_path = -1
ampersand_path = -1
exclamation_path = -1
blank_path = -1
til_path = -1
comma_path = -1
plus_path = -1
asterisk_path = -1
hashtag_path = -1
money_sign_path = -1
percentage_path = -1
len_path = -1
# FILE
if dict_url['path']:
dot_file = str(count(posixpath.basename(dict_url['path']), '.'))
hyphen_file = str(count(posixpath.basename(dict_url['path']), '-'))
underline_file = str(
count(posixpath.basename(dict_url['path']), '_'))
bar_file = str(count(posixpath.basename(dict_url['path']), '/'))
question_file = str(
count(posixpath.basename(dict_url['path']), '?'))
equal_file = str(count(posixpath.basename(dict_url['path']), '='))
atsign_file = str(count(posixpath.basename(dict_url['path']), '@'))
ampersand_file = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'FirewallDevice',
'FirewallInbound',
'FirewallOutbound',
'InstanceAlerts',
'InstanceBackups',
'InstanceBackupsSchedule',
'InstanceConfig',
'InstanceConfigDevices',
'InstanceConfigDevicesSda',
'InstanceConfigDevicesSdb',
'InstanceConfigDevicesSdc',
'InstanceConfigDevicesSdd',
'InstanceConfigDevicesSde',
'InstanceConfigDevicesSdf',
'InstanceConfigDevicesSdg',
'InstanceConfigDevicesSdh',
'InstanceConfigHelpers',
'InstanceConfigInterface',
'InstanceDisk',
'InstanceInterface',
'InstanceSpecs',
'LkeClusterPool',
'LkeClusterPoolNode',
'NodeBalancerConfigNodeStatus',
'NodeBalancerTransfer',
'ObjectStorageBucketCert',
'ObjectStorageBucketLifecycleRule',
'ObjectStorageBucketLifecycleRuleExpiration',
'ObjectStorageBucketLifecycleRuleNoncurrentVersionExpiration',
'ObjectStorageKeyBucketAccess',
'StackScriptUserDefinedField',
'UserDomainGrant',
'UserFirewallGrant',
'UserGlobalGrants',
'UserImageGrant',
'UserLinodeGrant',
'UserLongviewGrant',
'UserNodebalancerGrant',
'UserStackscriptGrant',
'UserVolumeGrant',
'GetFirewallDeviceResult',
'GetFirewallInboundResult',
'GetFirewallOutboundResult',
'GetImagesFilterResult',
'GetImagesImageResult',
'GetInstanceBackupsAutomaticResult',
'GetInstanceBackupsAutomaticDiskResult',
'GetInstanceBackupsCurrentResult',
'GetInstanceBackupsCurrentDiskResult',
'GetInstanceBackupsInProgressResult',
'GetInstanceBackupsInProgressDiskResult',
'GetInstanceTypeAddonsResult',
'GetInstanceTypeAddonsBackupsResult',
'GetInstanceTypeAddonsBackupsPriceResult',
'GetInstanceTypePriceResult',
'GetInstancesFilterResult',
'GetInstancesInstanceResult',
'GetInstancesInstanceAlertsResult',
'GetInstancesInstanceBackupResult',
'GetInstancesInstanceBackupScheduleResult',
'GetInstancesInstanceConfigResult',
'GetInstancesInstanceConfigDeviceResult',
'GetInstancesInstanceConfigDeviceSdaResult',
'GetInstancesInstanceConfigDeviceSdbResult',
'GetInstancesInstanceConfigDeviceSdcResult',
'GetInstancesInstanceConfigDeviceSddResult',
'GetInstancesInstanceConfigDeviceSdeResult',
'GetInstancesInstanceConfigDeviceSdfResult',
'GetInstancesInstanceConfigDeviceSdgResult',
'GetInstancesInstanceConfigDeviceSdhResult',
'GetInstancesInstanceConfigHelperResult',
'GetInstancesInstanceConfigInterfaceResult',
'GetInstancesInstanceDiskResult',
'GetInstancesInstanceSpecResult',
'GetLkeClusterPoolResult',
'GetLkeClusterPoolNodeResult',
'GetNodeBalancerConfigNodeStatusResult',
'GetNodeBalancerTransferResult',
'GetProfileReferralsResult',
'GetStackScriptUserDefinedFieldResult',
'GetVlansFilterResult',
'GetVlansVlanResult',
]
@pulumi.output_type
class FirewallDevice(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entityId":
suggest = "entity_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FirewallDevice. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FirewallDevice.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FirewallDevice.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
entity_id: Optional[int] = None,
id: Optional[int] = None,
label: Optional[str] = None,
type: Optional[str] = None,
url: Optional[str] = None):
"""
:param int entity_id: The ID of the underlying entity this device references (i.e. the Linode's ID).
:param int id: The ID of the Firewall Device.
:param str label: Used to identify this rule. For display purposes only.
:param str type: The type of Firewall Device.
"""
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if id is not None:
pulumi.set(__self__, "id", id)
if label is not None:
pulumi.set(__self__, "label", label)
if type is not None:
pulumi.set(__self__, "type", type)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[int]:
"""
The ID of the underlying entity this device references (i.e. the Linode's ID).
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter
def id(self) -> Optional[int]:
"""
The ID of the Firewall Device.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
Used to identify this rule. For display purposes only.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of Firewall Device.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def url(self) -> Optional[str]:
return pulumi.get(self, "url")
@pulumi.output_type
class FirewallInbound(dict):
def __init__(__self__, *,
action: str,
label: str,
protocol: str,
ipv4s: Optional[Sequence[str]] = None,
ipv6s: Optional[Sequence[str]] = None,
ports: Optional[str] = None):
"""
:param str action: Controls whether traffic is accepted or dropped by this rule (`ACCEPT`, `DROP`). Overrides the Firewall’s inbound_policy if this is an inbound rule, or the outbound_policy if this is an outbound rule.
:param str label: Used to identify this rule. For display purposes only.
:param str protocol: The network protocol this rule controls. (`TCP`, `UDP`, `ICMP`)
:param Sequence[str] ipv4s: A list of IPv4 addresses or networks. Must be in IP/mask format.
:param Sequence[str] ipv6s: A list of IPv6 addresses or networks. Must be in IP/mask format.
:param str ports: A string representation of ports and/or port ranges (i.e. "443" or "80-90, 91").
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "protocol", protocol)
if ipv4s is not None:
pulumi.set(__self__, "ipv4s", ipv4s)
if ipv6s is not None:
pulumi.set(__self__, "ipv6s", ipv6s)
if ports is not None:
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def action(self) -> str:
"""
Controls whether traffic is accepted or dropped by this rule (`ACCEPT`, `DROP`). Overrides the Firewall’s inbound_policy if this is an inbound rule, or the outbound_policy if this is an outbound rule.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def label(self) -> str:
"""
Used to identify this rule. For display purposes only.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The network protocol this rule controls. (`TCP`, `UDP`, `ICMP`)
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def ipv4s(self) -> Optional[Sequence[str]]:
"""
A list of IPv4 addresses or networks. Must be in IP/mask format.
"""
return pulumi.get(self, "ipv4s")
@property
@pulumi.getter
def ipv6s(self) -> Optional[Sequence[str]]:
"""
A list of IPv6 addresses or networks. Must be in IP/mask format.
"""
return pulumi.get(self, "ipv6s")
@property
@pulumi.getter
def ports(self) -> Optional[str]:
"""
A string representation of ports and/or port ranges (i.e. "443" or "80-90, 91").
"""
return pulumi.get(self, "ports")
@pulumi.output_type
class FirewallOutbound(dict):
def __init__(__self__, *,
action: str,
label: str,
protocol: str,
ipv4s: Optional[Sequence[str]] = None,
ipv6s: Optional[Sequence[str]] = None,
ports: Optional[str] = None):
"""
:param str action: Controls whether traffic is accepted or dropped by this rule (`ACCEPT`, `DROP`). Overrides the Firewall’s inbound_policy if this is an inbound rule, or the outbound_policy if this is an outbound rule.
:param str label: Used to identify this rule. For display purposes only.
:param str protocol: The network protocol this rule controls. (`TCP`, `UDP`, `ICMP`)
:param Sequence[str] ipv4s: A list of IPv4 addresses or networks. Must be in IP/mask format.
:param Sequence[str] ipv6s: A list of IPv6 addresses or networks. Must be in IP/mask format.
:param str ports: A string representation of ports and/or port ranges (i.e. "443" or "80-90, 91").
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "protocol", protocol)
if ipv4s is not None:
pulumi.set(__self__, "ipv4s", ipv4s)
if ipv6s is not None:
pulumi.set(__self__, "ipv6s", ipv6s)
if ports is not None:
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def action(self) -> str:
"""
Controls whether traffic is accepted or dropped by this rule (`ACCEPT`, `DROP`). Overrides the Firewall’s inbound_policy if this is an inbound rule, or the outbound_policy if this is an outbound rule.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def label(self) -> str:
"""
Used to identify this rule. For display purposes only.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The network protocol this rule controls. (`TCP`, `UDP`, `ICMP`)
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def ipv4s(self) -> Optional[Sequence[str]]:
"""
A list of IPv4 addresses or networks. Must be in IP/mask format.
"""
return pulumi.get(self, "ipv4s")
@property
@pulumi.getter
def ipv6s(self) -> Optional[Sequence[str]]:
"""
A list of IPv6 addresses or networks. Must be in IP/mask format.
"""
return pulumi.get(self, "ipv6s")
@property
@pulumi.getter
def ports(self) -> Optional[str]:
"""
A string representation of ports and/or port ranges (i.e. "443" or "80-90, 91").
"""
return pulumi.get(self, "ports")
@pulumi.output_type
class InstanceAlerts(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkIn":
suggest = "network_in"
elif key == "networkOut":
suggest = "network_out"
elif key == "transferQuota":
suggest = "transfer_quota"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceAlerts. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceAlerts.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceAlerts.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cpu: Optional[int] = None,
io: Optional[int] = None,
network_in: Optional[int] = None,
network_out: Optional[int] = None,
transfer_quota: Optional[int] = None):
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if io is not None:
pulumi.set(__self__, "io", io)
if network_in is not None:
pulumi.set(__self__, "network_in", network_in)
if network_out is not None:
pulumi.set(__self__, "network_out", network_out)
if transfer_quota is not None:
pulumi.set(__self__, "transfer_quota", transfer_quota)
@property
@pulumi.getter
def cpu(self) -> Optional[int]:
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def io(self) -> Optional[int]:
return pulumi.get(self, "io")
@property
@pulumi.getter(name="networkIn")
def network_in(self) -> Optional[int]:
return pulumi.get(self, "network_in")
@property
@pulumi.getter(name="networkOut")
def network_out(self) -> Optional[int]:
return pulumi.get(self, "network_out")
@property
@pulumi.getter(name="transferQuota")
def transfer_quota(self) -> Optional[int]:
return pulumi.get(self, "transfer_quota")
@pulumi.output_type
class InstanceBackups(dict):
def __init__(__self__, *,
enabled: Optional[bool] = None,
schedule: Optional['outputs.InstanceBackupsSchedule'] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def schedule(self) -> Optional['outputs.InstanceBackupsSchedule']:
return pulumi.get(self, "schedule")
@pulumi.output_type
class InstanceBackupsSchedule(dict):
def __init__(__self__, *,
day: Optional[str] = None,
window: Optional[str] = None):
if day is not None:
pulumi.set(__self__, "day", day)
if window is not None:
pulumi.set(__self__, "window", window)
@property
@pulumi.getter
def day(self) -> Optional[str]:
return pulumi.get(self, "day")
@property
@pulumi.getter
def window(self) -> Optional[str]:
return pulumi.get(self, "window")
@pulumi.output_type
class InstanceConfig(dict):
@staticmethod
def | |
def users(self):
self._users_value = None
self._users_present = False
@property
def doc_owner(self):
"""
The Paper doc owner. This field is populated on every single response.
:rtype: sharing.UserInfo
"""
if self._doc_owner_present:
return self._doc_owner_value
else:
raise AttributeError("missing required field 'doc_owner'")
@doc_owner.setter
def doc_owner(self, val):
self._doc_owner_validator.validate_type_only(val)
self._doc_owner_value = val
self._doc_owner_present = True
@doc_owner.deleter
def doc_owner(self):
self._doc_owner_value = None
self._doc_owner_present = False
@property
def cursor(self):
"""
Pass the cursor into
:meth:`dropbox.dropbox.Dropbox.paper_docs_users_list_continue` to
paginate through all users. The cursor preserves all properties as
specified in the original call to
:meth:`dropbox.dropbox.Dropbox.paper_docs_users_list`.
:rtype: Cursor
"""
if self._cursor_present:
return self._cursor_value
else:
raise AttributeError("missing required field 'cursor'")
@cursor.setter
def cursor(self, val):
self._cursor_validator.validate_type_only(val)
self._cursor_value = val
self._cursor_present = True
@cursor.deleter
def cursor(self):
self._cursor_value = None
self._cursor_present = False
@property
def has_more(self):
"""
Will be set to True if a subsequent call with the provided cursor to
:meth:`dropbox.dropbox.Dropbox.paper_docs_users_list_continue` returns
immediately with some results. If set to False please allow some delay
before making another call to
:meth:`dropbox.dropbox.Dropbox.paper_docs_users_list_continue`.
:rtype: bool
"""
if self._has_more_present:
return self._has_more_value
else:
raise AttributeError("missing required field 'has_more'")
@has_more.setter
def has_more(self, val):
val = self._has_more_validator.validate(val)
self._has_more_value = val
self._has_more_present = True
@has_more.deleter
def has_more(self):
self._has_more_value = None
self._has_more_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(ListUsersOnPaperDocResponse, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'ListUsersOnPaperDocResponse(invitees={!r}, users={!r}, doc_owner={!r}, cursor={!r}, has_more={!r})'.format(
self._invitees_value,
self._users_value,
self._doc_owner_value,
self._cursor_value,
self._has_more_value,
)
ListUsersOnPaperDocResponse_validator = bv.Struct(ListUsersOnPaperDocResponse)
class PaperApiCursorError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar expired_cursor: The provided cursor is expired.
:ivar invalid_cursor: The provided cursor is invalid.
:ivar wrong_user_in_cursor: The provided cursor contains invalid user.
:ivar reset: Indicates that the cursor has been invalidated. Call the
corresponding non-continue endpoint to obtain a new cursor.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
expired_cursor = None
# Attribute is overwritten below the class definition
invalid_cursor = None
# Attribute is overwritten below the class definition
wrong_user_in_cursor = None
# Attribute is overwritten below the class definition
reset = None
# Attribute is overwritten below the class definition
other = None
def is_expired_cursor(self):
"""
Check if the union tag is ``expired_cursor``.
:rtype: bool
"""
return self._tag == 'expired_cursor'
def is_invalid_cursor(self):
"""
Check if the union tag is ``invalid_cursor``.
:rtype: bool
"""
return self._tag == 'invalid_cursor'
def is_wrong_user_in_cursor(self):
"""
Check if the union tag is ``wrong_user_in_cursor``.
:rtype: bool
"""
return self._tag == 'wrong_user_in_cursor'
def is_reset(self):
"""
Check if the union tag is ``reset``.
:rtype: bool
"""
return self._tag == 'reset'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, processor):
super(PaperApiCursorError, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperApiCursorError(%r, %r)' % (self._tag, self._value)
PaperApiCursorError_validator = bv.Union(PaperApiCursorError)
class PaperDocCreateArgs(bb.Struct):
"""
:ivar parent_folder_id: The Paper folder ID where the Paper document should
be created. The API user has to have write access to this folder or
error is thrown.
:ivar import_format: The format of provided data.
"""
__slots__ = [
'_parent_folder_id_value',
'_parent_folder_id_present',
'_import_format_value',
'_import_format_present',
]
_has_required_fields = True
def __init__(self,
import_format=None,
parent_folder_id=None):
self._parent_folder_id_value = None
self._parent_folder_id_present = False
self._import_format_value = None
self._import_format_present = False
if parent_folder_id is not None:
self.parent_folder_id = parent_folder_id
if import_format is not None:
self.import_format = import_format
@property
def parent_folder_id(self):
"""
The Paper folder ID where the Paper document should be created. The API
user has to have write access to this folder or error is thrown.
:rtype: str
"""
if self._parent_folder_id_present:
return self._parent_folder_id_value
else:
return None
@parent_folder_id.setter
def parent_folder_id(self, val):
if val is None:
del self.parent_folder_id
return
val = self._parent_folder_id_validator.validate(val)
self._parent_folder_id_value = val
self._parent_folder_id_present = True
@parent_folder_id.deleter
def parent_folder_id(self):
self._parent_folder_id_value = None
self._parent_folder_id_present = False
@property
def import_format(self):
"""
The format of provided data.
:rtype: ImportFormat
"""
if self._import_format_present:
return self._import_format_value
else:
raise AttributeError("missing required field 'import_format'")
@import_format.setter
def import_format(self, val):
self._import_format_validator.validate_type_only(val)
self._import_format_value = val
self._import_format_present = True
@import_format.deleter
def import_format(self):
self._import_format_value = None
self._import_format_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(PaperDocCreateArgs, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperDocCreateArgs(import_format={!r}, parent_folder_id={!r})'.format(
self._import_format_value,
self._parent_folder_id_value,
)
PaperDocCreateArgs_validator = bv.Struct(PaperDocCreateArgs)
class PaperDocCreateError(PaperApiBaseError):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar content_malformed: The provided content was malformed and cannot be
imported to Paper.
:ivar folder_not_found: The specified Paper folder is cannot be found.
:ivar doc_length_exceeded: The newly created Paper doc would be too large.
Please split the content into multiple docs.
:ivar image_size_exceeded: The imported document contains an image that is
too large. The current limit is 1MB. Note: This only applies to HTML
with data uri.
"""
# Attribute is overwritten below the class definition
content_malformed = None
# Attribute is overwritten below the class definition
folder_not_found = None
# Attribute is overwritten below the class definition
doc_length_exceeded = None
# Attribute is overwritten below the class definition
image_size_exceeded = None
def is_content_malformed(self):
"""
Check if the union tag is ``content_malformed``.
:rtype: bool
"""
return self._tag == 'content_malformed'
def is_folder_not_found(self):
"""
Check if the union tag is ``folder_not_found``.
:rtype: bool
"""
return self._tag == 'folder_not_found'
def is_doc_length_exceeded(self):
"""
Check if the union tag is ``doc_length_exceeded``.
:rtype: bool
"""
return self._tag == 'doc_length_exceeded'
def is_image_size_exceeded(self):
"""
Check if the union tag is ``image_size_exceeded``.
:rtype: bool
"""
return self._tag == 'image_size_exceeded'
def _process_custom_annotations(self, annotation_type, processor):
super(PaperDocCreateError, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperDocCreateError(%r, %r)' % (self._tag, self._value)
PaperDocCreateError_validator = bv.Union(PaperDocCreateError)
class PaperDocCreateUpdateResult(bb.Struct):
"""
:ivar doc_id: Doc ID of the newly created doc.
:ivar revision: The Paper doc revision. Simply an ever increasing number.
:ivar title: The Paper doc title.
"""
__slots__ = [
'_doc_id_value',
'_doc_id_present',
'_revision_value',
'_revision_present',
'_title_value',
'_title_present',
]
_has_required_fields = True
def __init__(self,
doc_id=None,
revision=None,
title=None):
self._doc_id_value = None
self._doc_id_present = False
self._revision_value = None
self._revision_present = False
self._title_value = None
self._title_present = False
if doc_id is not None:
self.doc_id = doc_id
if revision is not None:
self.revision = revision
if title is not None:
self.title = title
@property
def doc_id(self):
"""
Doc ID of the newly created doc.
:rtype: str
"""
if self._doc_id_present:
return self._doc_id_value
else:
raise AttributeError("missing required field 'doc_id'")
@doc_id.setter
def doc_id(self, val):
val = self._doc_id_validator.validate(val)
self._doc_id_value = val
self._doc_id_present = True
@doc_id.deleter
def doc_id(self):
self._doc_id_value = None
self._doc_id_present = False
@property
def revision(self):
"""
The Paper doc revision. Simply an ever increasing number.
:rtype: int
"""
if self._revision_present:
return self._revision_value
else:
raise AttributeError("missing required field 'revision'")
@revision.setter
def revision(self, val):
val = self._revision_validator.validate(val)
self._revision_value = val
self._revision_present = True
@revision.deleter
def revision(self):
self._revision_value = None
self._revision_present = False
@property
def title(self):
"""
The Paper doc title.
:rtype: str
"""
if self._title_present:
return self._title_value
else:
raise AttributeError("missing required field 'title'")
@title.setter
def title(self, val):
val = self._title_validator.validate(val)
self._title_value = val
self._title_present = True
@title.deleter
def title(self):
self._title_value = None
self._title_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(PaperDocCreateUpdateResult, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperDocCreateUpdateResult(doc_id={!r}, revision={!r}, title={!r})'.format(
self._doc_id_value,
self._revision_value,
self._title_value,
)
PaperDocCreateUpdateResult_validator = bv.Struct(PaperDocCreateUpdateResult)
class PaperDocExport(RefPaperDoc):
__slots__ = [
'_export_format_value',
'_export_format_present',
]
_has_required_fields = True
def __init__(self,
doc_id=None,
export_format=None):
super(PaperDocExport, self).__init__(doc_id)
self._export_format_value = None
self._export_format_present = False
if export_format is not None:
self.export_format = export_format
@property
def export_format(self):
"""
:rtype: ExportFormat
"""
if self._export_format_present:
return self._export_format_value
else:
raise AttributeError("missing required field 'export_format'")
@export_format.setter
def export_format(self, val):
self._export_format_validator.validate_type_only(val)
self._export_format_value = val
self._export_format_present = True
@export_format.deleter
def export_format(self):
self._export_format_value = None
self._export_format_present = False
def _process_custom_annotations(self, annotation_type, processor):
super(PaperDocExport, self)._process_custom_annotations(annotation_type, processor)
def __repr__(self):
return 'PaperDocExport(doc_id={!r}, export_format={!r})'.format(
self._doc_id_value,
self._export_format_value,
)
PaperDocExport_validator = bv.Struct(PaperDocExport)
class PaperDocExportResult(bb.Struct):
"""
:ivar owner: The Paper doc owner's email address.
:ivar title: The Paper doc title.
:ivar revision: The Paper doc revision. Simply an ever increasing number.
:ivar mime_type: MIME type of the export. This corresponds to
:class:`ExportFormat` specified in the request.
"""
__slots__ = [
'_owner_value',
'_owner_present',
'_title_value',
'_title_present',
'_revision_value',
'_revision_present',
'_mime_type_value',
'_mime_type_present',
]
_has_required_fields = True
def __init__(self,
owner=None,
title=None,
| |
import os
import sys
#import pip.utils.logging
#import pip
import socket
import tempfile
import threading
import subprocess
import xmlrpclib
import re
from cStringIO import StringIO
import sys
import shutil
import time
#import zipfile
from distutils.version import LooseVersion
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
from docassemble.webapp.core.models import Supervisors
from docassemble.webapp.files import SavedFile
from docassemble.webapp.daredis import r
supervisor_url = os.environ.get('SUPERVISOR_SERVER_URL', None)
if supervisor_url:
USING_SUPERVISOR = True
else:
USING_SUPERVISOR = False
def remove_inactive_hosts():
from docassemble.base.config import hostname
if USING_SUPERVISOR:
to_delete = set()
for host in Supervisors.query.all():
if host.hostname == hostname:
continue
try:
socket.gethostbyname(host.hostname)
server = xmlrpclib.Server(host.url + '/RPC2')
result = server.supervisor.getState()
except:
to_delete.add(host.id)
for id_to_delete in to_delete:
Supervisors.query.filter_by(id=id_to_delete).delete()
def check_for_updates(doing_startup=False):
sys.stderr.write("check_for_updates: starting\n")
from docassemble.base.config import hostname
ok = True
here_already = dict()
results = dict()
sys.stderr.write("check_for_updates: 1\n")
installed_packages = get_installed_distributions()
for package in installed_packages:
here_already[package.key] = package.version
packages = dict()
installs = dict()
to_install = list()
to_uninstall = list()
uninstall_done = dict()
uninstalled_packages = dict()
logmessages = ''
package_by_name = dict()
sys.stderr.write("check_for_updates: 2\n")
for package in Package.query.filter_by(active=True).all():
package_by_name[package.name] = package
# packages is what is supposed to be installed
sys.stderr.write("check_for_updates: 3\n")
for package in Package.query.filter_by(active=True).all():
if package.type is not None:
packages[package.id] = package
#print "Found a package " + package.name
sys.stderr.write("check_for_updates: 4\n")
for package in Package.query.filter_by(active=False).all():
if package.name not in package_by_name:
uninstalled_packages[package.id] = package # this is what the database says should be uninstalled
sys.stderr.write("check_for_updates: 5\n")
for install in Install.query.filter_by(hostname=hostname).all():
installs[install.package_id] = install # this is what the database says in installed on this server
if install.package_id in uninstalled_packages and uninstalled_packages[install.package_id].name not in package_by_name:
to_uninstall.append(uninstalled_packages[install.package_id]) # uninstall if it is installed
changed = False
package_owner = dict()
sys.stderr.write("check_for_updates: 6\n")
for auth in PackageAuth.query.filter_by(authtype='owner').all():
package_owner[auth.package_id] = auth.user_id
sys.stderr.write("check_for_updates: 7\n")
for package in packages.itervalues():
if package.id not in installs and package.name in here_already:
sys.stderr.write("check_for_updates: package " + package.name + " here already\n")
install = Install(hostname=hostname, packageversion=here_already[package.name], version=package.version, package_id=package.id)
db.session.add(install)
installs[package.id] = install
changed = True
if changed:
db.session.commit()
sys.stderr.write("check_for_updates: 8\n")
for package in packages.itervalues():
#sys.stderr.write("check_for_updates: processing package id " + str(package.id) + "\n")
#sys.stderr.write("1: " + str(installs[package.id].packageversion) + " 2: " + str(package.packageversion) + "\n")
if (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is None) or (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is not None and LooseVersion(package.packageversion) > LooseVersion(installs[package.id].packageversion)):
new_version_needed = True
else:
new_version_needed = False
#sys.stderr.write("got here and new version is " + str(new_version_needed) + "\n")
if package.id not in installs or package.version > installs[package.id].version or new_version_needed:
to_install.append(package)
#sys.stderr.write("done with that" + "\n")
sys.stderr.write("check_for_updates: 9\n")
for package in to_uninstall:
#sys.stderr.write("Going to uninstall a package: " + package.name + "\n")
if package.name in uninstall_done:
sys.stderr.write("check_for_updates: skipping uninstallation of " + str(package.name) + " because already uninstalled" + "\n")
continue
returnval, newlog = uninstall_package(package)
uninstall_done[package.name] = 1
logmessages += newlog
if returnval == 0:
Install.query.filter_by(hostname=hostname, package_id=package.id).delete()
results[package.name] = 'successfully uninstalled'
else:
results[package.name] = 'uninstall failed'
ok = False
packages_to_delete = list()
sys.stderr.write("check_for_updates: 10\n")
for package in to_install:
sys.stderr.write("check_for_updates: going to install a package: " + package.name + "\n")
# if doing_startup and package.name.startswith('docassemble') and package.name in here_already:
# #adding this because of unpredictability of installing new versions of docassemble
# #just because of a system restart.
# sys.stderr.write("check_for_updates: skipping update on " + str(package.name) + "\n")
# continue
returnval, newlog = install_package(package)
logmessages += newlog
sys.stderr.write("check_for_updates: return value was " + str(returnval) + "\n")
if returnval != 0:
sys.stderr.write("Return value was not good" + "\n")
ok = False
#pip._vendor.pkg_resources._initialize_master_working_set()
pip_info = get_pip_info(package.name)
real_name = pip_info['Name']
sys.stderr.write("check_for_updates: real name of package " + str(package.name) + " is " + str(real_name) + "\n")
if real_name is None:
results[package.name] = 'install failed'
ok = False
if package.name not in here_already:
sys.stderr.write("check_for_updates: removing package entry for " + package.name + "\n")
packages_to_delete.append(package)
elif returnval != 0:
results[package.name] = 'could not be upgraded'
else:
results[package.name] = 'successfully installed'
if real_name != package.name:
sys.stderr.write("check_for_updates: changing name" + "\n")
package.name = real_name
if package.id in installs:
install = installs[package.id]
install.version = package.version
else:
install = Install(hostname=hostname, packageversion=package.packageversion, version=package.version, package_id=package.id)
db.session.add(install)
db.session.commit()
update_versions()
add_dependencies(package_owner.get(package.id, 1))
update_versions()
sys.stderr.write("check_for_updates: 11\n")
for package in packages_to_delete:
package.active = False
sys.stderr.write("check_for_updates: 12\n")
db.session.commit()
sys.stderr.write("check_for_updates: finished uninstalling and installing\n")
return ok, logmessages, results
def update_versions():
sys.stderr.write("update_versions: starting" + "\n")
install_by_id = dict()
from docassemble.base.config import hostname
for install in Install.query.filter_by(hostname=hostname).all():
install_by_id[install.package_id] = install
package_by_name = dict()
for package in Package.query.filter_by(active=True).order_by(Package.name, Package.id.desc()).all():
if package.name in package_by_name:
continue
package_by_name[package.name] = package
installed_packages = get_installed_distributions()
for package in installed_packages:
if package.key in package_by_name:
if package_by_name[package.key].id in install_by_id and package.version != install_by_id[package_by_name[package.key].id].packageversion:
install_by_id[package_by_name[package.key].id].packageversion = package.version
db.session.commit()
if package.version != package_by_name[package.key].packageversion:
package_by_name[package.key].packageversion = package.version
db.session.commit()
return
def add_dependencies(user_id):
#sys.stderr.write('add_dependencies: user_id is ' + str(user_id) + "\n")
sys.stderr.write("add_dependencies: starting\n")
from docassemble.base.config import hostname, daconfig
#docassemble_git_url = daconfig.get('docassemble git url', 'https://github.com/jhpyle/docassemble')
package_by_name = dict()
for package in Package.query.filter_by(active=True).order_by(Package.name, Package.id.desc()).all():
if package.name in package_by_name:
continue
package_by_name[package.name] = package
installed_packages = get_installed_distributions()
for package in installed_packages:
if package.key in package_by_name:
continue
pip_info = get_pip_info(package.key)
#sys.stderr.write("Home page of " + str(package.key) + " is " + str(pip_info['Home-page']) + "\n")
Package.query.filter_by(name=package.key).delete()
db.session.commit()
package_auth = PackageAuth(user_id=user_id)
if package.key.startswith('docassemble.') and pip_info['Home-page'] is not None and re.search(r'/github.com/', pip_info['Home-page']):
package_entry = Package(name=package.key, package_auth=package_auth, type='git', giturl=pip_info['Home-page'], packageversion=package.version, dependency=True)
else:
package_entry = Package(name=package.key, package_auth=package_auth, type='pip', packageversion=package.version, dependency=True)
db.session.add(package_auth)
db.session.add(package_entry)
db.session.commit()
install = Install(hostname=hostname, packageversion=package_entry.packageversion, version=package_entry.version, package_id=package_entry.id)
db.session.add(install)
db.session.commit()
sys.stderr.write("add_dependencies: ending\n")
return
def fix_names():
installed_packages = [package.key for package in get_installed_distributions()]
for package in Package.query.filter_by(active=True).all():
if package.name not in installed_packages:
pip_info = get_pip_info(package.name)
actual_name = pip_info['Name']
if actual_name is not None:
package.name = actual_name
db.session.commit()
else:
sys.stderr.write("fix_names: package " + package.name + " does not appear to be installed" + "\n")
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path:
allparts.insert(0, parts[0])
break
elif parts[1] == path:
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def install_package(package):
sys.stderr.write("install_package: " + package.name + "\n")
if package.type == 'zip' and package.upload is None:
return 0, ''
sys.stderr.write('install_package: ' + package.name + "\n")
from docassemble.base.config import daconfig
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local')
logfilecontents = ''
#pip.utils.logging._log_state = threading.local()
#pip.utils.logging._log_state.indentation = 0
pip_log = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
use_pip_cache = r.get('da:updatepackage:use_pip_cache')
if use_pip_cache is None:
disable_pip_cache = False
elif int(use_pip_cache):
disable_pip_cache = False
else:
disable_pip_cache = True
if package.type == 'zip' and package.upload is not None:
saved_file = SavedFile(package.upload, extension='zip', fix=True)
# with zipfile.ZipFile(saved_file.path + '.zip', mode='r') as zf:
# for zinfo in zf.infolist():
# parts = splitall(zinfo.filename)
# if parts[-1] == 'setup.py':
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--log-file=' + pip_log.name, '--upgrade', saved_file.path + '.zip'])
elif package.type == 'git' and package.giturl is not None:
if package.gitbranch is not None:
branchpart = '@' + str(package.gitbranch)
else:
branchpart = ''
if package.gitsubdir is not None:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name + '&subdirectory=' + str(package.gitsubdir)])
else:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name])
elif package.type == 'pip':
if package.limitation is None:
limit = ""
else:
limit = str(package.limitation)
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, package.name + limit])
else:
sys.stderr.write("Wrong package type\n")
return 1, 'Unable to recognize package type: ' + package.name
sys.stderr.write("install_package: running " + " ".join(commands) + "\n")
logfilecontents += " ".join(commands) + "\n"
returnval = 1
try:
subprocess.call(commands)
returnval = 0
except subprocess.CalledProcessError as err:
returnval = err.returncode
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
with open(pip_log.name, 'rU') as x:
logfilecontents += x.read().decode('utf8')
pip_log.close()
try:
sys.stderr.write(logfilecontents + "\n")
except:
pass
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('returnval is: ' + str(returnval) + "\n")
sys.stderr.write('install_package: done' + "\n")
shutil.rmtree(temp_dir)
return returnval, logfilecontents
def uninstall_package(package):
sys.stderr.write('uninstall_package: ' + package.name + "\n")
logfilecontents = ''
#sys.stderr.write("uninstall_package: uninstalling " + package.name + "\n")
#return 0
#pip.utils.logging._log_state = threading.local()
#pip.utils.logging._log_state.indentation = 0
pip_log | |
default = {"value": value}
attr = cls._attributes(field, default, **attributes)
select_items = []
for option in options:
if isinstance(option[1], dict):
items = [(v, k) for k, v in option[1].items()]
if not items:
continue
items.sort()
opts = [OPTION(v, _value=k) for v, k in items]
select_items.append(OPTGROUP(*opts,
_label = option[0],
))
else:
select_items.append(OPTION(option[1],
_label = option[0],
))
return SELECT(select_items, **attr)
# =============================================================================
class S3EntityRoleManager(S3Method):
""" Entity/User role manager """
ENTITY_TYPES = ["org_organisation",
"org_office",
"inv_warehouse",
"med_hospital",
#"po_area",
"pr_group",
]
def __init__(self, *args, **kwargs):
""" Constructor """
super(S3EntityRoleManager, self).__init__(*args, **kwargs)
# Dictionary of pentities this admin can manage
self.realm = self.get_realm()
# The list of user accounts linked to pentities in this realm
self.realm_users = current.s3db.pr_realm_users(self.realm)
# Create the dictionary of roles
self.roles = {}
self.modules = self.get_modules()
self.acls = self.get_access_levels()
for module_uid, module_label in self.modules.items():
for acl_uid, acl_label in self.acls.items():
role_uid = "%s_%s" % (module_uid, acl_uid)
self.roles[role_uid] = {
"module": {
"uid": module_uid,
"label": module_label
},
"acl": {
"uid": acl_uid,
"label": acl_label
}
}
# -------------------------------------------------------------------------
@classmethod
def set_method(cls, r, entity=None, record_id=None):
"""
Plug-in OrgAdmin Role Managers when appropriate
@param r: the S3Request
@param entity: override target entity (default: r.tablename)
@param record_id: specify target record ID (only for OU's)
"""
s3db = current.s3db
auth = current.auth
if not current.deployment_settings.get_auth_entity_role_manager() or \
auth.user is None:
return False
sr = auth.get_system_roles()
realms = auth.user.realms or Storage()
ORG_ADMIN = sr.ORG_ADMIN
admin = sr.ADMIN in realms
org_admin = ORG_ADMIN in realms
if admin or org_admin:
if entity is not None:
tablename = entity
record = None
else:
tablename = r.tablename
record = r.record
all_entities = admin or org_admin and realms[ORG_ADMIN] is None
if not all_entities and tablename in cls.ENTITY_TYPES:
if not record and record_id is not None:
# Try to load the record and check pe_id
table = s3db.table(tablename)
if table and "pe_id" in table.fields:
record = current.db(table._id == record_id).select(table.pe_id,
limitby = (0, 1),
).first()
if record and record.pe_id not in realms[ORG_ADMIN]:
return False
if entity is not None:
# Configure as custom method for this resource
prefix, name = tablename.split("_", 1)
s3db.set_method(prefix, name, method="roles", action=cls)
elif tablename in cls.ENTITY_TYPES:
# Configure as method handler for this request
r.set_handler("roles", cls)
else:
# Unsupported entity
return False
return True
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
"""
if self.method == "roles" and \
(r.tablename in self.ENTITY_TYPES + ["pr_person"]):
context = self.get_context_data(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
# Set the default view
current.response.view = "admin/manage_roles.html"
return context
# -------------------------------------------------------------------------
def get_context_data(self, r, **attr):
"""
@todo: description?
@return: dictionary for the view
{
# All the possible roles
"roles": {
"staff_reader": {
"module": {
"uid": "staff",
"label": "Staff"
},
...
},
...
},
# The roles currently assigned to users for entit(y/ies)
"assigned_roles": {
"1": [
"staff_reader",
"project_editor",
...
],
...
},
"pagination_list": [
(
"User One",
"1"
),
...
],
# The object (user/entity) we are assigning roles for
"foreign_object": {
"id": "1",
"name": "User One"
}
or
"foreign_object": {
"id": "70",
"name": "Organisation Seventy"
}
}
"""
T = current.T
# organisation or site entity
self.entity = self.get_entity()
# user account to assigned roles to
self.user = self.get_user()
# roles already assigned to a user or users
self.assigned_roles = self.get_assigned_roles()
# The foreign object is the one selected in the role form
# for a person this is the entity
# for an entity (organisation or office) this is a user
self.foreign_object = self.get_foreign_object()
form = self.get_form()
# if we are editing roles, set those assigned roles as initial values
# for the form
form.vars.update(self.get_form_vars())
if form.accepts(r.post_vars, current.session):
before = self.assigned_roles[self.foreign_object["id"]] if self.foreign_object else []
after = ["%s_%s" % (mod_uid, acl_uid) for mod_uid, acl_uid
in form.vars.items()
if mod_uid in self.modules.keys()
and acl_uid in self.acls.keys()]
# either both values will have been specified or one will
# be supplied by the form (for roles on new objects)
user_id = self.user["id"] if self.user else form.vars.foreign_object
entity_id = self.entity["id"] if self.entity else form.vars.foreign_object
self.update_roles(user_id, entity_id, before, after)
current.session.confirmation = T("Roles updated")
redirect(r.url(vars={}))
context = {"roles": self.roles,
"foreign_object": self.foreign_object,
"form": form,
"title": T("Roles"),
}
if not self.foreign_object:
# how many assigned roles to show per page
pagination_size = int(r.get_vars.get("page_size", 4))
# what page of assigned roles to view
pagination_offset = int(r.get_vars.get("page_offset", 0))
# the number of pages of assigned roles
import math
pagination_pages = int(math.ceil(len(self.assigned_roles) / float(pagination_size)))
# the list of objects to show on this page sorted by name
pagination_list = [(self.objects[gid], gid) for gid in self.assigned_roles]
pagination_list = sorted(pagination_list)[pagination_offset * pagination_size:pagination_offset * pagination_size + pagination_size]
context.update({"assigned_roles": self.assigned_roles,
"pagination_size": pagination_size,
"pagination_offset": pagination_offset,
"pagination_list": pagination_list,
"pagination_pages": pagination_pages,
})
return context
# -------------------------------------------------------------------------
def get_realm(self):
"""
Returns the realm (list of pe_ids) that this user can manage
or raises a permission error if the user is not logged in
"""
auth = current.auth
system_roles = auth.get_system_roles()
ORG_ADMIN = system_roles.ORG_ADMIN
ADMIN = system_roles.ADMIN
if auth.user:
realms = auth.user.realms
else:
# User is not logged in
auth.permission.fail()
# Get the realm from the current realms
if ADMIN in realms:
realm = realms[ADMIN]
elif ORG_ADMIN in realms:
realm = realms[ORG_ADMIN]
else:
# raise an error here - user is not permitted
# to access the role matrix
auth.permission.fail()
return realm
# -------------------------------------------------------------------------
def get_modules(self):
"""
This returns an OrderedDict of modules with their uid as the key,
e.g., {hrm: "Human Resources",}
@return: OrderedDict
"""
return current.deployment_settings.get_auth_role_modules()
# -------------------------------------------------------------------------
def get_access_levels(self):
"""
This returns an OrderedDict of access levels and their uid as
the key, e.g., {reader: "Reader",}
@return: OrderedDict
"""
return current.deployment_settings.get_auth_access_levels()
# -------------------------------------------------------------------------
def get_assigned_roles(self, entity_id=None, user_id=None):
"""
If an entity ID is provided, the dict will be the users
with roles assigned to that entity. The key will be the user IDs.
If a user ID is provided, the dict will be the entities the
user has roles for. The key will be the entity pe_ids.
If both an entity and user ID is provided, the dict will be
the roles assigned to that user for that entity. The key will be
the user ID.
@type entity_id: int
@param entity_id: the pe_id of the entity
@type user_id: int
@param user_id: id of the user account
@return: dict
{
1: [
"staff_reader",
"project_reader",
...
]
2: [
...
],
...
}
"""
if not entity_id and not user_id:
raise RuntimeError("Not enough arguments")
mtable = current.auth.settings.table_membership
gtable = current.auth.settings.table_group
utable = current.auth.settings.table_user
query = (mtable.deleted == False) & \
(gtable.deleted == False) & \
(gtable.id == mtable.group_id) & \
(utable.deleted == False) & \
(utable.id == mtable.user_id)
if user_id:
field = mtable.pe_id
query &= (mtable.user_id == user_id) & \
(mtable.pe_id != None)
if entity_id:
field = utable.id
query &= (mtable.pe_id == entity_id)
rows = current.db(query).select(utable.id,
gtable.uuid,
mtable.pe_id,
)
assigned_roles = OrderedDict()
roles = self.roles
for row in rows:
object_id = row[field]
role_uid = row[gtable.uuid]
if role_uid in roles:
if object_id not in assigned_roles:
assigned_roles[object_id] = []
assigned_roles[object_id].append(role_uid)
return assigned_roles
# -------------------------------------------------------------------------
def get_form(self):
"""
Contructs the role form
@return: SQLFORM
"""
fields = self.get_form_fields()
form = SQLFORM.factory(*fields,
table_name="roles",
_id = "role-form",
_action = "",
_method = "POST",
)
return form
# -------------------------------------------------------------------------
def get_form_fields(self):
"""
@todo: description?
@return: list of Fields
"""
fields = []
requires = IS_EMPTY_OR(IS_IN_SET(self.acls))
for module_uid, module_label in self.modules.items():
field = Field(module_uid,
label = module_label,
requires = requires,
)
fields.append(field)
return fields
# -------------------------------------------------------------------------
def get_form_vars(self):
"""
Get the roles currently assigned for a user/entity and put it
into a Storage object for the form
@return: Storage() to pre-populate the role form
"""
form_vars = Storage()
fo = self.foreign_object
roles = self.roles
if fo and fo["id"] in self.assigned_roles:
for role in self.assigned_roles[fo["id"]]:
mod_uid = roles[role]["module"]["uid"]
acl_uid = roles[role]["acl"]["uid"]
form_vars[mod_uid] = acl_uid
return form_vars
# -------------------------------------------------------------------------
def update_roles(self, user_id, entity_id, before, after):
"""
Update the users roles on entity based on the selected roles
in before and after
@param user_id: id (pk) of the user account to modify
| |
<reponame>aditya95sriram/td-slim
# coding=utf-8
import argparse
import os
import sys
import time
import networkx as nx
import subprocess
from operator import itemgetter
from networkx.drawing.nx_agraph import *
import matplotlib.pyplot as plt
import signal
from typing import Union
# optional dependency pysat+rc2
PYSATDISABLED = False
try:
from pysat.examples.rc2 import RC2
from pysat.formula import WCNF
except ImportError:
PYSATDISABLED = True
VIRTUALIZE = False
def apex_vertices(g):
buff = 0
delete_vertices = list()
for u, degree in g.degree():
if degree == g.number_of_nodes() - 1:
delete_vertices.append(u)
g.remove_nodes_from(delete_vertices)
buff += len(delete_vertices)
nx.convert_node_labels_to_integers(g, first_label=0)
return g, buff
def degree_one_reduction(g):
"""
Removes all but one degree one neighbours of one vertex
:returns g: reduced graph
:type g: networkx graph
"""
nodes = set()
for u in g.nodes():
deg = 0
for v in g.neighbors(u):
if g.degree(v) == 1:
if deg == 0:
deg = 1
else:
nodes = nodes.union({v})
g.remove_nodes_from(list(nodes))
g = nx.convert_node_labels_to_integers(g, first_label=0)
return g
def read_edge(filename):
with open(filename, 'r') as in_file:
edge = in_file.read()
edge = edge.rstrip("\n")
edge = edge.replace('e ', '')
edge = edge.split('\n')
while edge[0][0] != 'p':
edge.pop(0)
attr = edge.pop(0)
attr = attr.split()
attr = int(attr.pop())
while len(edge) > attr:
edge.pop()
int_edge = list()
for e in edge:
eu, ev = e.split()
int_edge.append([int(eu), int(ev)])
if int_edge[len(int_edge) - 1] == []:
int_edge.pop()
return int_edge
def make_vars(g, width):
nv = g.number_of_nodes()
p = [[[0 for i in range(width)] for j in range(nv)] for k in range(nv)]
nvar = 1
for u in range(nv):
for v in range(u, nv):
for i in range(width):
p[u][v][i] = nvar
nvar += 1
return p, nvar - 1
class Formula(object):
def __init__(self, nvar, comment=None):
self.nvar = nvar
self.clauses = []
self.comment = comment
@property
def nclauses(self):
return len(list(filter(lambda a: a[0], self.clauses)))
def get_header(self):
return f"p cnf {self.nvar} {self.nclauses}\n"
def add(self, *clause: int, comment=None):
self.clauses.append((clause, comment))
def get_cnf(self, strip_comments=False):
encoding = self.get_header()
if self.comment is not None: encoding += f"c {self.comment}\n"
for clause, comment in self.clauses:
if not strip_comments and comment is not None:
encoding += f"c {comment}\n"
if clause: encoding += " ".join(map(str, clause)) + " 0\n"
return encoding
def __add__(self, other: 'Formula'):
assert self.nvar == other.nvar, "merging cnfs with different number of variables"
result = self.__class__(self.nvar)
result.clauses = self.clauses + [((), other.comment)] + other.clauses
result.comment = self.comment
return result
def __str__(self):
return self.get_cnf()
def write(self, fname):
with open(fname, 'w') as f:
f.write(self.get_cnf())
class WFormula(Formula):
top = 42
def get_header(self):
return f"p wcnf {self.nvar} {self.nclauses} {self.top}\n"
def addw(self, weight: int, *clause: int, comment=None):
if clause:
super().add(weight, *clause, comment=comment)
else:
super().add(comment=comment)
def adds(self, *clause: int, comment=None):
self.addw(1, *clause, comment=comment)
def addh(self, *clause: int, comment=None):
self.addw(self.top, *clause, comment=comment)
add = addh
def generate_encoding(g, reqwidth, formula=Formula) -> Union[Formula, WFormula]:
nv = g.number_of_nodes()
if VIRTUALIZE and reqwidth > nv:
width = nv+1
virtualizing = True
delta = reqwidth - width
else:
width = reqwidth
virtualizing = False
delta = -1 # not needed
s, nvar = make_vars(g, width)
encoding = formula(nvar, f"virtualizing: {virtualizing}")
nclauses = 0
for u in range(nv):
for v in range(u, nv):
encoding.add(s[u][v][width - 1])
encoding.add(-s[u][v][0])
for u in range(nv):
for v in range(u, nv):
for i in range(1, width):
encoding.add(-s[u][v][i - 1], s[u][v][i])
for u in range(nv):
for v in range(u + 1, nv):
for w in range(v + 1, nv):
for i in range(width):
encoding.add(-s[u][v][i], -s[u][w][i], s[v][w][i])
encoding.add(-s[u][v][i], -s[v][w][i], s[u][w][i])
encoding.add(-s[u][w][i], -s[v][w][i], s[u][v][i])
for u in range(nv):
for v in range(u + 1, nv):
for i in range(width):
encoding.add(-s[u][v][i], s[u][u][i])
encoding.add(-s[u][v][i], s[v][v][i])
nclauses += 2
for u in range(nv):
for v in range(u + 1, nv):
for i in range(1, width):
encoding.add(-s[u][v][i], s[u][u][i - 1], s[v][v][i - 1])
for e in g.edges():
u = min(e)
v = max(e)
for i in range(1, width):
encoding.add(-s[u][u][i], s[u][u][i - 1], -s[v][v][i], s[u][v][i])
encoding.add(-s[u][u][i], s[v][v][i - 1], -s[v][v][i], s[u][v][i])
# weight encoding constraints
weight_encoding = formula(nvar, "weight encoding")
weight_nclauses = 0
for u, weight in g.nodes.data("weight"):
if weight:
#weight = d["weight"] + 1
#if weight <= 1: continue
if not virtualizing:
if weight >= width: # NO instance
weight_encoding.add(1, comment="force UNSAT")
weight_encoding.add(-1, comment="force UNSAT")
else:
# constraint: not s(u,u,w)
weight_encoding.add(-s[u][u][weight])
else:
if weight <= delta:
pass # no constraints needed
else:
# constraint: not s(u,u,w-(D-n))
weight_encoding.add(-s[u][u][weight - delta])
# forced ancestry encoding constraints
ancestry_encoding = formula(nvar, "ancestry encoding")
ancestry_nclauses = 0
forced_ancestries = g.graph.get("forced_ancestries", [])
for parent, child in forced_ancestries:
for i in range(2, width+1):
ancestry_encoding.add(-s[parent][parent][i-1], s[child][child][i-1])
return encoding + weight_encoding + ancestry_encoding
def generate_maxsat_encoding(g, reqwidth):
nv = g.number_of_nodes()
if VIRTUALIZE and reqwidth > nv:
width = nv + 1
virtualizing = True
delta = reqwidth - width
else:
width = reqwidth
virtualizing = False
delta = -1 # not needed
s, nvar = make_vars(g, width)
encoding: WFormula = generate_encoding(g, reqwidth, WFormula)
free = dict(zip(range(width + 1), range(nvar + 1, nvar + 1 + width + 1)))
encoding.nvar += width+1
encoding.add(comment="f[i]" + str(free))
for i in range(1, width+1): # 0th layer is always free, so no clauses needed
encoding.add(comment = f"f[{i}] clauses")
encoding.adds(free[i])
encoding.add(-free[i], free[i-1])
for u in range(nv):
encoding.add(-free[i], -s[u][u][i-1])
encoding.add(comment="weight driven free layer detection")
for u, weight in g.nodes.data("weight", default=0):
encoding.add(comment=f"vertex {u}, weight {weight}")
for i in range(2, width+1):
for j in range(1, min(weight, i)+1):
if i-j < 0:
raise RuntimeError("shouldn't reach here, fix limits of for loop")
encoding.add(-s[u][u][i-1], -free[i-j])
return encoding
# runners for different solvers
def run_uwrmaxsat(cnffile, solfile, cli_args, debug=False):
"""solve maxsat using uwrmaxsat"""
solver = os.path.join(os.getcwd(), "solvers", "uwrmaxsat")
cmd = [solver, cnffile, "-m", "-v0", f"-cpu-lim={cli_args.timeout}"] # try -no-msu
# if cli_args.depth >= 0: cmd += [f"-goal={cli_args.depth}"]
output = subprocess.check_output(cmd).decode() # todo add "-cpu-lim=1"
model = None
for line in output.splitlines():
if line.startswith("v"):
model = " ".join(line.split()[1:] + ["0\n"])
# break
else:
if debug:
print("maxsat:", line)
if line.startswith("s"):
if "UNSATISFIABLE" in line:
print("maxsat: UNSAT")
return False
elif "UNKNOWN" in line:
print("maxsat: UNKNOWN")
return False
else:
print("maxsat:", line, file=sys.stderr)
assert model is not None, "maxsat didn't generate any model"
with open(solfile, "w") as sol:
sol.write(model)
return True
def run_rc2(cnffile, solfile, cli_args, debug=False):
"""solve maxsat using RC2"""
if PYSATDISABLED:
raise NotImplementedError("required package pysat not found")
formula = WCNF(from_file=cnffile)
rc2 = RC2(formula)
model = rc2.compute()
if model is None:
print("maxsat didn't generate any model")
return False
with open(solfile, "w") as sol:
sol.write(" ".join(map(str, model + [0])))
return True
def run_loandra(cnffile, solfile, cli_args, debug=False):
"""solve maxsat using loandra"""
solver = os.path.join(os.getcwd(), "solvers", "loandra_static")
timeoutcmd = ["timeout", "-s", "15", str(cli_args.timeout)]
cmd = timeoutcmd + [solver, cnffile, "-verbosity=0", "-print-model"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
output = proc.stdout
rc = proc.returncode
model = None
for line in output.splitlines():
if line.startswith("v"):
model = " ".join(line.split()[1:] + ["0\n"])
break
else:
if debug:
print("maxsat:", line)
if line.startswith("s"):
if "UNSATISFIABLE" in line:
print("UNSAT")
return False
elif "UNKNOWN" in line:
print("UNKNOWN")
return False
if model is None:
print("maxsat didn't generate any model")
return False
with open(solfile, "w") as sol:
sol.write(model)
return True
MAXSAT_SOLVERS = {'uwrmaxsat': run_uwrmaxsat,
'rc2': run_rc2,
'loandra': run_loandra,
'default': run_uwrmaxsat}
def decode_output(sol, g, reqwidth, return_decomp=False, debug=False):
nv = g.number_of_nodes()
if VIRTUALIZE and reqwidth > nv:
width = nv+1
else:
width = reqwidth
with open(sol, 'r') as out_file:
out = out_file.read()
out = out.split('\n')
out = out[0]
out = out.split(' ')
out = list(map(int, out))
out.pop()
ne = g.number_of_edges()
s, nvar = make_vars(g, width)
components = list()
for i in range(width - 1, 0, -1):
level = list()
for u in range(nv):
ver = list()
for v in range(u, nv):
if out[s[u][v][i] - 1] > 0:
ver.append(v)
do_not_add = 0
for v in level:
if set(ver).issubset(set(v)):
do_not_add = 1
if do_not_add == 0:
level.append(ver)
components.append(level)
for i in components:
if debug: sys.stderr.write(str(i) + '\n')
if debug: sys.stderr.write('\n' + "*" * 10 + '\n')
decomp = nx.DiGraph()
root = list()
level_i = list()
for i in range(width - 1, 0, -1):
level = list()
# sys.stderr.write('\n'+'*'*10+'\n')
for u in range(nv):
if out[s[u][u][i] - 1] > 0 and out[s[u][u][i - 1] - 1] < 0:
edge_add = False
if i == width - 1:
root.append(u)
decomp.add_node(u, level=i)
# sys.stderr.write("%i "%u)
level.append(u)
if level_i != []:
for v in level_i[len(level_i) - 1]:
if out[s[min(u, v)][max(u, | |
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.EnableServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_enable_service_async_from_dict():
await test_enable_service_async(request_type=dict)
def test_enable_service_field_headers():
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.EnableServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_service),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.enable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_enable_service_field_headers_async():
client = ServiceUsageAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.EnableServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_service),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.enable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_disable_service(transport: str = 'grpc', request_type=serviceusage.DisableServiceRequest):
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_service),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.disable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.DisableServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_disable_service_from_dict():
test_disable_service(request_type=dict)
def test_disable_service_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_service),
'__call__') as call:
client.disable_service()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.DisableServiceRequest()
@pytest.mark.asyncio
async def test_disable_service_async(transport: str = 'grpc_asyncio', request_type=serviceusage.DisableServiceRequest):
client = ServiceUsageAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_service),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.disable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.DisableServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_disable_service_async_from_dict():
await test_disable_service_async(request_type=dict)
def test_disable_service_field_headers():
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.DisableServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_service),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.disable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_disable_service_field_headers_async():
client = ServiceUsageAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.DisableServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_service),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.disable_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_service(transport: str = 'grpc', request_type=serviceusage.GetServiceRequest):
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Service(
name='name_value',
parent='parent_value',
state=resources.State.DISABLED,
)
response = client.get_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.GetServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Service)
assert response.name == 'name_value'
assert response.parent == 'parent_value'
assert response.state == resources.State.DISABLED
def test_get_service_from_dict():
test_get_service(request_type=dict)
def test_get_service_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service),
'__call__') as call:
client.get_service()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.GetServiceRequest()
@pytest.mark.asyncio
async def test_get_service_async(transport: str = 'grpc_asyncio', request_type=serviceusage.GetServiceRequest):
client = ServiceUsageAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.Service(
name='name_value',
parent='parent_value',
state=resources.State.DISABLED,
))
response = await client.get_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == serviceusage.GetServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Service)
assert response.name == 'name_value'
assert response.parent == 'parent_value'
assert response.state == resources.State.DISABLED
@pytest.mark.asyncio
async def test_get_service_async_from_dict():
await test_get_service_async(request_type=dict)
def test_get_service_field_headers():
client = ServiceUsageClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.GetServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service),
'__call__') as call:
call.return_value = resources.Service()
client.get_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_service_field_headers_async():
client = ServiceUsageAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = serviceusage.GetServiceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Service())
await client.get_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_list_services(transport: str = 'grpc', request_type=serviceusage.ListServicesRequest):
client = ServiceUsageClient(
| |
is not expected",
'3.4.6.1.map_variables_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'3.4.6.1.map_variables_with_model':
"Element 'cellml:model': This element is not expected",
'3.4.6.1.map_variables_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'3.4.6.1.map_variables_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'3.4.6.1.map_variables_with_role':
"Element 'cellml:role': This element is not expected",
'3.4.6.1.map_variables_with_unit':
"Element 'cellml:unit': This element is not expected",
'3.4.6.1.map_variables_with_units':
"Element 'cellml:units': This element is not expected",
'3.4.6.1.map_variables_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'3.4.6.1.map_variables_with_variable':
"Element 'cellml:variable': This element is not expected",
# 4.4.1 Bad math
'4.4.1.math_not_math_component':
"cake': This element is not expected.",
'4.4.1.math_not_math_reaction':
"cake': This element is not expected.",
# 5.2.2 CellML prefers "deka" over "deca"
'5.2.2.unit_deca':
"'deca' is not a valid value of the union type 'cellml:unit_prefix'",
# 5.4.1.1 Unitses must have a name
'5.4.1.1.units_name_missing':
"Element 'cellml:units': The attribute 'name' is required",
# 5.4.1.1 A units can only contain unit elements
'5.4.1.1.units_with_component':
"Element 'cellml:component': This element is not expected",
'5.4.1.1.units_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'5.4.1.1.units_with_connection':
"Element 'cellml:connection': This element is not expected",
'5.4.1.1.units_with_group':
"Element 'cellml:group': This element is not expected",
'5.4.1.1.units_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'5.4.1.1.units_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'5.4.1.1.units_with_model':
"Element 'cellml:model': This element is not expected",
'5.4.1.1.units_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'5.4.1.1.units_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'5.4.1.1.units_with_role':
"Element 'cellml:role': This element is not expected",
'5.4.1.1.units_with_units':
"Element 'cellml:units': This element is not expected",
'5.4.1.1.units_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'5.4.1.1.units_with_variable':
"Element 'cellml:variable': This element is not expected",
# 5.4.1.2 A units name must be a valid identifier
'5.4.1.2.units_name_invalid':
'not accepted by the pattern',
# 5.4.1.2 Units names must be unique (within model or local component)
'5.4.1.2.units_name_duplicate_1':
"Element 'cellml:units': Duplicate key-sequence",
'5.4.1.2.units_name_duplicate_2':
"Element 'cellml:units': Duplicate key-sequence",
# 5.4.1.3 Units base_units attribute can only be yes or no
'5.4.1.3.units_base_units_invalid':
"not an element of the set",
# 5.4.2.1 A unit must have a units attribute
'5.4.2.1.unit_units_missing':
"Element 'cellml:unit': The attribute 'units' is required",
# 5.4.2.1 A unit cannot have CellML children
'5.4.2.1.unit_with_component':
"Element 'cellml:component': This element is not expected",
'5.4.2.1.unit_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'5.4.2.1.unit_with_connection':
"Element 'cellml:connection': This element is not expected",
'5.4.2.1.unit_with_group':
"Element 'cellml:group': This element is not expected",
'5.4.2.1.unit_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'5.4.2.1.unit_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'5.4.2.1.unit_with_model':
"Element 'cellml:model': This element is not expected",
'5.4.2.1.unit_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'5.4.2.1.unit_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'5.4.2.1.unit_with_role':
"Element 'cellml:role': This element is not expected",
'5.4.2.1.unit_with_unit':
"Element 'cellml:unit': This element is not expected",
'5.4.2.1.unit_with_units':
"Element 'cellml:units': This element is not expected",
'5.4.2.1.unit_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'5.4.2.1.unit_with_variable':
"Element 'cellml:variable': This element is not expected",
# 5.4.2.3 Allowed values of the prefix attribute
'5.4.2.3.unit_prefix_real':
"not a valid value of the union type 'cellml:unit_prefix'",
'5.4.2.3.unit_prefix_real_int':
"not a valid value of the union type 'cellml:unit_prefix'",
'5.4.2.3.unit_prefix_spaces':
"not a valid value of the union type 'cellml:unit_prefix'",
'5.4.2.3.unit_prefix_unknown':
"not a valid value of the union type 'cellml:unit_prefix'",
# 5.4.2.4 A unit exponent must be a real number
'5.4.2.4.unit_exponent_invalid':
'not accepted by the pattern',
# 5.4.2.5 A unit multiplier must be a real number
'5.4.2.5.unit_multiplier_invalid':
'not accepted by the pattern',
# 5.4.2.6 A unit offset must be a real number
'5.4.2.6.unit_offset_invalid':
'not accepted by the pattern',
# 6.4.1.1 A group cannot be empty (extra test for missing comp_ref/rel_ref)
'6.4.1.1.group_empty':
"Element 'cellml:group': Missing child element(s)",
# 6.4.1.1 A group can only contain component_refs and relationship_refs
'6.4.1.1.group_with_component':
"Element 'cellml:component': This element is not expected",
'6.4.1.1.group_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'6.4.1.1.group_with_connection':
"Element 'cellml:connection': This element is not expected",
'6.4.1.1.group_with_group':
"Element 'cellml:group': This element is not expected",
'6.4.1.1.group_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'6.4.1.1.group_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'6.4.1.1.group_with_model':
"Element 'cellml:model': This element is not expected",
'6.4.1.1.group_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'6.4.1.1.group_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'6.4.1.1.group_with_role':
"Element 'cellml:role': This element is not expected",
'6.4.1.1.group_with_unit':
"Element 'cellml:unit': This element is not expected",
'6.4.1.1.group_with_units':
"Element 'cellml:units': This element is not expected",
'6.4.1.1.group_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'6.4.1.1.group_with_variable':
"Element 'cellml:variable': This element is not expected",
# 6.4.2.1 A relationship_ref cannot have any CellML children
'6.4.2.1.relationship_ref_with_component':
"Element 'cellml:component': This element is not expected",
'6.4.2.1.relationship_ref_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'6.4.2.1.relationship_ref_with_connection':
"Element 'cellml:connection': This element is not expected",
'6.4.2.1.relationship_ref_with_group':
"Element 'cellml:group': This element is not expected",
'6.4.2.1.relationship_ref_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'6.4.2.1.relationship_ref_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'6.4.2.1.relationship_ref_with_model':
"Element 'cellml:model': This element is not expected",
'6.4.2.1.relationship_ref_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'6.4.2.1.relationship_ref_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'6.4.2.1.relationship_ref_with_role':
"Element 'cellml:role': This element is not expected",
'6.4.2.1.relationship_ref_with_unit':
"Element 'cellml:unit': This element is not expected",
'6.4.2.1.relationship_ref_with_units':
"Element 'cellml:units': This element is not expected",
'6.4.2.1.relationship_ref_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'6.4.2.1.relationship_ref_with_variable':
"Element 'cellml:variable': This element is not expected",
# 6.4.2.2 When not in a namespace, a relationship_ref's relationship must
# be either containment or encapsulation.
'6.4.2.2.relationship_ref_relationship_invalid':
"'howdy' is not an element of the set",
# 6.4.2.3 A relationship_ref name must be a cellml identifier
'6.4.2.3.relationship_ref_name_invalid':
'not accepted by the pattern',
# 6.2.4.5 name/relationship pairs must be unique
'6.4.2.5.relationship_ref_duplicate_named':
"Element 'cellml:relationship_ref': Duplicate key-sequence",
# 6.4.3.1 A component_ref must define a component
'6.4.3.1.component_ref_component_missing':
"'cellml:component_ref': The attribute 'component' is required",
# 6.4.3.1 A component_ref can only contain a component_ref
'6.4.3.1.component_ref_with_component':
"Element 'cellml:component': This element is not expected",
'6.4.3.1.component_ref_with_connection':
"Element 'cellml:connection': This element is not expected",
'6.4.3.1.component_ref_with_group':
"Element 'cellml:group': This element is not expected",
'6.4.3.1.component_ref_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'6.4.3.1.component_ref_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'6.4.3.1.component_ref_with_model':
"Element 'cellml:model': This element is not expected",
'6.4.3.1.component_ref_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'6.4.3.1.component_ref_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'6.4.3.1.component_ref_with_role':
"Element 'cellml:role': This element is not expected",
'6.4.3.1.component_ref_with_unit':
"Element 'cellml:unit': This element is not expected",
'6.4.3.1.component_ref_with_units':
"Element 'cellml:units': This element is not expected",
'6.4.3.1.component_ref_with_variable_ref':
"Element 'cellml:variable_ref': This element is not expected",
'6.4.3.1.component_ref_with_variable':
"Element 'cellml:variable': This element is not expected",
# 6.4.3.3 A component attribute must be an identifier
'6.4.3.3.component_ref_component_invalid':
'not accepted by the pattern',
# 6.4.3.3 A component_ref must refer to an existing component
'6.4.3.3.component_ref_component_nonexistent_1':
"'cellml:component_ref': No match found for key-sequence",
# 7.4.1.1 A reaction must contain at least one variable_ref
'7.4.1.1.reaction_variable_ref_missing':
"'cellml:reaction': Missing child element",
# 7.4.1.1 A reaction can only contain a variable_ref
'7.4.1.1.reaction_with_component':
"Element 'cellml:component': This element is not expected",
'7.4.1.1.reaction_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'7.4.1.1.reaction_with_connection':
"Element 'cellml:connection': This element is not expected",
'7.4.1.1.reaction_with_group':
"Element 'cellml:group': This element is not expected",
'7.4.1.1.reaction_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'7.4.1.1.reaction_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'7.4.1.1.reaction_with_model':
"Element 'cellml:model': This element is not expected",
'7.4.1.1.reaction_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'7.4.1.1.reaction_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'7.4.1.1.reaction_with_role':
"Element 'cellml:role': This element is not expected",
'7.4.1.1.reaction_with_unit':
"Element 'cellml:unit': This element is not expected",
'7.4.1.1.reaction_with_units':
"Element 'cellml:units': This element is not expected",
'7.4.1.1.reaction_with_variable':
"Element 'cellml:variable': This element is not expected",
# 7.4.1.2 The reversible attribute can only be yes or no
'7.4.1.2.reaction_reversible_invalid':
"not an element of the set",
# 7.4.1.3 There's another rule about maths here that I don't understand
# 7.4.2.1 A variable_ref must have at least one role
'7.4.2.1.variable_ref_role_missing':
"Element 'cellml:variable_ref': Missing child element",
'7.4.2.1.variable_ref_variable_missing':
"Element 'cellml:variable_ref': The attribute 'variable' is required",
# 7.4.2.1 A variable_ref can only contain a role
'7.4.2.1.variable_ref_with_component_ref':
"Element 'cellml:component_ref': This element is not expected",
'7.4.2.1.variable_ref_with_component':
"Element 'cellml:component': This element is not expected",
'7.4.2.1.variable_ref_with_connection':
"Element 'cellml:connection': This element is not expected",
'7.4.2.1.variable_ref_with_group':
"Element 'cellml:group': This element is not expected",
'7.4.2.1.variable_ref_with_map_components':
"Element 'cellml:map_components': This element is not expected",
'7.4.2.1.variable_ref_with_map_variables':
"Element 'cellml:map_variables': This element is not expected",
'7.4.2.1.variable_ref_with_model':
"Element 'cellml:model': This element is not expected",
'7.4.2.1.variable_ref_with_reaction':
"Element 'cellml:reaction': This element is not expected",
'7.4.2.1.variable_ref_with_relationship_ref':
"Element 'cellml:relationship_ref': This element is not expected",
'7.4.2.1.variable_ref_with_unit':
"Element 'cellml:unit': This element is not expected",
'7.4.2.1.variable_ref_with_units':
"Element 'cellml:units': This element | |
from types import DictType
from types import ListType
import com.ibm.ws.scripting
import logging
import os
import re
import sys
import wdr.app
import wdr.config
import wdr.task
(
AdminApp, AdminConfig, AdminControl, AdminTask, Help
) = wdr.WsadminObjects().getObjects()
logger = logging.getLogger('wdr.manifest')
_genericPattern = re.compile(r'^(?P<tabs>(?:\ |\t)*).*$')
_commentPattern = re.compile(r'^(?:\s*#\.*)|(?:\s*)$')
_directivePattern = re.compile(
r'^(?P<tabs>(?:\ |\t)*)'
r'@'
r'\s*'
r'(?P<name>[A-Za-z][a-zA-Z0-9_]*)'
r'(?P<values>(?:\s*(?P<value>.+?))*)?'
r'\s*$')
_typePattern = re.compile(
r'^(?P<tabs>(?:\ |\t)*)'
r'(?:(?P<operation>[!?+])\s*)?'
r'\s*'
r'(?P<type>[A-Za-z][a-zA-Z0-9_]*)'
r'(?:'
r'\s+'
r'(?P<linkage>[&#][a-zA-Z0-9_]+)'
r')?'
r'(?:'
r'\s+'
r'\<(?P<templateName>.*)\>'
r')?'
r'\s*$')
_keyPattern = re.compile(
r'^(?P<tabs>(?:\ |\t)*)'
r'\*'
r'(?P<name>[A-Za-z][a-zA-Z0-9_]*)'
r'\s*'
r'(?P<value>.+?)?'
r'\s*$')
_attPattern = re.compile(
r'^(?P<tabs>(?:\ |\t)*)'
r'-'
r'(?P<name>[A-Za-z][a-zA-Z0-9_]*)'
r'\s*'
r'(?P<value>.+?)?'
r'\s*$')
_variablePattern = re.compile(
r'\$\['
r'\s*'
r'(?:'
r'(?P<val>(?:\'[^\']*\')|(?:\"[^\"]*\"))'
r'|'
r'(?P<var>[a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)'
r')'
r'(?P<filter>'
r'(?:'
r'\s*\|\s*'
r'(?:[a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)'
r')*'
r')'
r'\s*'
r'\]')
_appNamePattern = re.compile(
r'^'
r'(?:(?:"(?P<qname>[^"]+)")|(?P<name>\S+))'
r'(?:\s+(?:(?:"(?P<qpath>[^"]+)")|(?P<path>.+?)))?'
r'\s*$')
_appOptionPattern = re.compile(
r'^(?P<tabs>(?:\ |\t))'
r'(?P<name>\*?[a-zA-Z0-9_\.]+)'
r'\s*'
r'(?P<value>.+?)?'
r'\s*$')
_appOptionValuePattern = re.compile(r'^(?P<tabs>(?:\t\t)|(?:\ \ ))(?P<value>.+?)\s*$')
WDR_CHECKSUM_DESCRIPTION = (
'Checksum of deployed EAR file and application manifest'
)
def _defaultFilter(value):
if value is None:
return ''
else:
return str(value)
def _processFilter(value, filterExpression, variables):
filter = _defaultFilter
context = variables
try:
for seg in filterExpression.split('.'):
filter = context[seg]
if isinstance(filter, DictType):
context = filter
except KeyError:
raise KeyError(filterExpression)
return filter(value)
def _processFilters(value, filterExpression, variables):
if filterExpression is not None:
for f in [s.strip() for s in filterExpression.split('|')]:
if f:
value = _processFilter(value, f, variables)
return value
def _lookupVariable(literal, expression, filterExpression, variables):
value = None
if literal:
value = literal[1:-1]
else:
context = variables
try:
for seg in expression.split('.'):
value = context[seg]
if isinstance(value, DictType):
context = value
except KeyError:
raise KeyError(expression)
if callable(value):
value = value(expression, variables)
return _defaultFilter(_processFilters(value, filterExpression, variables))
def substituteVariables(value, variables):
if not value:
return value
return re.sub(
_variablePattern,
(
lambda k, v=variables:
_lookupVariable(k.group('val'), k.group('var'), k.group('filter'), v)
),
value
)
def _construct_ServerCluster(
manifestObject, parentObject, parentAttribute, attributeCache
):
args = [
'-clusterConfig', [
'-clusterName',
manifestObject.keys.get('name')
or
manifestObject.getAttribute('name'),
'-preferLocal', 'true'
]
]
logger.debug('creating cluster %s', args)
result = wdr.config.ConfigObject(
wdr.config._parseConfigId(AdminTask.createCluster(args))
)
return result
def _construct_ClusterMember(
manifestObject, parentObject, parentAttribute, attributeCache
):
if parentObject._type != 'ServerCluster':
raise Exception(
'ClusterMember objects can be created only in the context'
' of ServerCluster'
)
cluster = parentObject
members = attributeCache.getAttribute(cluster, 'members')
args = [
'-clusterName', attributeCache.getAttribute(cluster, 'name'),
'-memberConfig', [
'-memberNode',
manifestObject.keys.get('nodeName')
or
manifestObject.getAttribute('nodeName'),
'-memberName',
manifestObject.keys.get('memberName')
or
manifestObject.getAttribute('memberName'),
'-memberWeight', '2',
'-genUniquePorts', 'true',
'-replicatorEntry', 'false'
],
]
if len(members) == 0:
args.extend(
[
'-firstMember',
[
'-templateName', 'default',
'-nodeGroup', 'DefaultNodeGroup',
'-coreGroup', 'DefaultCoreGroup'
]
]
)
logger.debug('creating cluster member %s', args)
result = wdr.config.ConfigObject(
wdr.config._parseConfigId(
AdminTask.createClusterMember(args)
)
)
attributeCache.invalidate(cluster, 'members')
attributeCache.invalidate(cluster)
return result
def _construct_J2CActivationSpec(
manifestObject, parentObject, parentAttribute, attributeCache
):
if parentObject._type != 'J2CResourceAdapter':
raise Exception(
'J2CActivationSpec objects can be created only in the context'
' of J2CResourceAdapter'
)
adapter = parentObject
args = [
'-name',
manifestObject.keys.get('name')
or
manifestObject.getAttribute('name'),
'-jndiName',
manifestObject.keys.get('jndiName')
or
manifestObject.getAttribute('jndiName')
or
'',
'-destinationJndiName',
manifestObject.keys.get('destinationJndiName')
or
manifestObject.getAttribute('destinationJndiName')
or
'',
'-authenticationAlias',
manifestObject.keys.get('authenticationAlias')
or
manifestObject.getAttribute('authenticationAlias')
or
'',
'-messageListenerType', 'javax.jms.MessageListener'
]
logger.debug(
'creating activation spec in %s with arguments %s', adapter, args
)
result = wdr.config.ConfigObject(
AdminTask.createJ2CActivationSpec(str(adapter), args)
)
attributeCache.invalidate(adapter, 'j2cActivationSpec')
return result
def _construct_J2CAdminObject(
manifestObject, parentObject, parentAttribute, attributeCache
):
if parentObject._type != 'J2CResourceAdapter':
raise Exception(
'J2CAdminObject objects can be created only in the context'
' of J2CResourceAdapter'
)
adapter = parentObject
adminObjectInterface = None
properties = manifestObject.getAttribute('properties')
for property in properties:
name = property.keys.get('name') or property.getAttribute('name')
if name == 'QueueName':
adminObjectInterface = "javax.jms.Queue"
break
elif name == 'TopicName':
adminObjectInterface = "javax.jms.Topic"
break
args = [
'-adminObjectInterface',
adminObjectInterface,
'-name',
manifestObject.keys.get('name')
or
manifestObject.getAttribute('name'),
'-jndiName',
manifestObject.keys.get('jndiName')
or
manifestObject.getAttribute('jndiName'),
'-description',
manifestObject.keys.get('description')
or
manifestObject.getAttribute('description')
or
''
]
logger.debug(
'creating J2C admin object in %s with arguments %s', adapter, args
)
result = wdr.config.ConfigObject(
AdminTask.createJ2CAdminObject(str(adapter), args)
)
attributeCache.invalidate(adapter, 'j2cAdminObjects')
return result
def _construct_J2CConnectionFactory(
manifestObject, parentObject, parentAttribute, attributeCache
):
if parentObject._type != 'J2CResourceAdapter':
raise Exception(
'J2CConnectionFactory objects can be created only in the context'
' of J2CResourceAdapter'
)
adapter = parentObject
args = [
'-name',
manifestObject.keys.get('name')
or
manifestObject.getAttribute('name'),
'-jndiName',
manifestObject.keys.get('jndiName')
or
manifestObject.getAttribute('jndiName')
or
'',
'-connectionFactoryInterface', 'javax.jms.ConnectionFactory'
]
logger.debug(
'creating connection factory in %s with arguments %s', adapter, args
)
result = wdr.config.ConfigObject(
AdminTask.createJ2CConnectionFactory(str(adapter), args)
)
attributeCache.invalidate(adapter)
return result
def _construct_SIBQueue(
manifestObject, parentObject, parentAttribute, attributeCache
):
if parentObject._type != 'SIBus':
raise Exception(
'SIBQueue objects can be created only in the context'
' of SIBus'
)
sibus = parentObject
args = [
'-name',
manifestObject.keys.get('identifier')
or
manifestObject.getAttribute('identifier'),
'-bus',
sibus.name,
'-type', 'Queue',
'-cluster',
manifestObject.getAttribute('localizationPointRefs')[0].getAttribute('cluster')
or
''
]
logger.debug(
'creating SIB queue in %s with arguments %s', sibus.name, args
)
result = wdr.config.ConfigObject(
AdminTask.createSIBDestination(args)
)
return result
_constructors = {
'ServerCluster': _construct_ServerCluster,
'ClusterMember': _construct_ClusterMember,
'J2CActivationSpec': _construct_J2CActivationSpec,
'J2CAdminObject': _construct_J2CAdminObject,
'J2CConnectionFactory': _construct_J2CConnectionFactory,
'SIBQueue': _construct_SIBQueue,
}
class Operations:
names = {
'+': 'assure',
'?': 'customize',
'!': 'remove',
}
assure, customize, remove = ('+', '?', '!',)
class ManifestConfigObject:
def __init__(self, type, filename=None, linenumber=0):
self.type = type
self.operation = Operations.assure
self.filename = filename
self.linenumber = linenumber
self.keys = {}
self.items = []
self.anchor = None
self.reference = None
self.templateName = None
def isEmpty(self):
return (
len(self.items) == 0
and
len(self.keys) == 0
)
def __str__(self):
return self._toString(0)
def __unicode__(self):
return unicode(self._toString(0))
def getSourceLocation(self):
if self.filename and self.linenumber:
return '%s(%d)' % (self.filename, self.linenumber)
else:
return '(unknown source)'
def _toString(self, indent):
result = ''
opcode = ''
if self.operation != Operations.assure:
opcode = self.operation
if self.anchor:
result += (
"%s%s%s #%s\n"
%
("\t" * indent, opcode, self.type, self.anchor)
)
elif self.reference:
result += (
"%s%s%s &%s\n"
%
("\t" * indent, opcode, self.type, self.reference)
)
else:
result += (
"%s%s%s\n"
%
("\t" * indent, opcode, self.type)
)
for (k, v) in self.keys.items():
result += "%s*%s %s\n" % ("\t" * (indent + 1), k, v)
for item in self.items:
if item.get('attribute'):
name = item['name']
value = item['value']
if isinstance(value, ListType):
result += "%s-%s\n" % ("\t" * (indent + 1), name)
for c in value:
result += c._toString(indent + 2)
elif isinstance(value, ManifestConfigObject):
result += "%s-%s\n" % ("\t" * (indent + 1), name)
result += value._toString(indent + 2)
else:
value = value.replace('\r\n', '$[ __wdr__.nl ]')
value = value.replace('\n', '$[ __wdr__.nl ]')
result += "%s-%s %s\n" % ("\t" * (indent + 1), name, value)
elif item.get('child'):
result += item['value']._toString(indent + 1)
return result
def getAttribute(self, name):
for item in self.items:
if item.get('attribute') and item['name']==name:
return item['value']
return None
def mapOperation(self, opcode):
if opcode is None:
return Operations.assure
if not Operations.names.has_key(opcode):
raise Exception(
'[%s] Invalid operation code: "%s"'
% (self.getSourceLocation(), opcode)
)
return opcode
def apply(self, anchors, parentObject, parentAttribute, attributeCache):
typeName = self.type
logger.debug(
'importing object type %s as child of object %s and property %s',
typeName, parentObject, parentAttribute
)
if parentObject:
self._applyWithParentContext(
anchors, parentObject, parentAttribute, attributeCache
)
else:
self._applyWithoutParentContext(
anchors, parentObject, parentAttribute, attributeCache
)
def _filterMatching(self, candidateList, attributeCache):
matchingList = []
for o in candidateList:
if o is None:
continue
if o._type == self.type:
for (k, v) in self.keys.items():
if attributeCache.getAttribute(o, k) != v:
break
else:
matchingList.append(o)
return matchingList
def _create(self, parentObject, parentAttribute, attributeCache):
typeName = self.type
typeInfo = wdr.config.getTypeInfo(typeName)
simpleAttributes = []
for (name, value) in self.keys.items():
if typeInfo.attributes.has_key(name):
if wdr.config.getTypeInfo(
typeInfo.attributes[name].type
).converter:
simpleAttributes.append([name, value])
for item in self.items:
if item.get('attribute'):
name = item['name']
value = item['value']
if typeInfo.attributes.has_key(name):
if wdr.config.getTypeInfo(
typeInfo.attributes[name].type
).converter:
simpleAttributes.append([name, value])
else:
raise Exception(
'[%s] Invalid attribute %s specified for object %s(%s)'
% (
self.getSourceLocation(), name, typeName,
self.keys
)
)
constructor = _constructors.get(typeName)
if constructor:
result = constructor(
self, parentObject, parentAttribute, attributeCache
)
else:
result = parentObject._create(
typeName, parentAttribute, simpleAttributes, self.templateName
)
if parentAttribute is not None:
attributeCache.invalidate(parentObject, parentAttribute)
return result
def _remove(self, configObject, anchors, attributeCache):
if self.items:
raise Exception(
'[%s] Objects being removed '
'must not have attributes nor children'
% self.getSourceLocation()
)
if self.reference:
raise Exception(
'[%s] Remove not implemented yet' % self.getSourceLocation()
)
else:
configObject.remove()
def _setAnchor(self, anchors, configObject):
if self.anchor:
if anchors.has_key(self.anchor):
raise Exception(
'[%s] Duplicate anchor: %s'
% (self.getSourceLocation(), self.anchor)
)
else:
logger.debug(
'setting anchor %s to %s', self.anchor, configObject
)
anchors[self.anchor] = configObject
def _updateSimpleAttributes(self, configObject, attributeCache):
typeName = self.type
typeInfo = wdr.config.getTypeInfo(typeName)
for item in self.items:
if item.get('attribute'):
name = item['name']
value = item['value']
if not typeInfo.attributes.has_key(name):
raise Exception(
'[%s] Invalid attribute %s specified for object %s(%s)'
% (
self.getSourceLocation(), name, typeName, self.keys
)
)
attributeInfo = typeInfo.attributes[name]
attributeTypeInfo = wdr.config.getTypeInfo(attributeInfo.type)
if attributeTypeInfo.converter:
if attributeInfo.list:
if value:
newValue = value.split(';')
else:
newValue = value
else:
newValue = value
try:
configObject._modify([[name, newValue]])
attributeCache.invalidate(configObject, name)
| |
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
import zmq
from cloudburst.shared.proto.cloudburst_pb2 import GenericResponse
from cloudburst.shared.proto.internal_pb2 import (
CPU, GPU, # Cloudburst's executor types
PinFunction
)
from cloudburst.shared.proto.shared_pb2 import StringSet
from cloudburst.server.scheduler.policy.base_policy import (
BaseCloudburstSchedulerPolicy
)
from cloudburst.server.scheduler.utils import (
get_cache_ip_key,
get_pin_address,
get_unpin_address
)
sys_random = random.SystemRandom()
NUM_EXECUTOR_THREADS = 3
class DefaultCloudburstSchedulerPolicy(BaseCloudburstSchedulerPolicy):
def __init__(self, pin_accept_socket, pusher_cache, kvs_client, ip,
random_threshold=0.20, local=False):
# This scheduler's IP address.
self.ip = ip
# A socket to listen for confirmations of pin operations' successes.
self.pin_accept_socket = pin_accept_socket
# A cache for zmq.PUSH sockets.
self.pusher_cache = pusher_cache
# This thread's Anna KVS client.
self.kvs_client = kvs_client
# A map to track how many requests have been routed to each executor in
# the most recent timeslice.
self.running_counts = {}
# A map to track nodes which have recently reported high load. These
# nodes will not be sent requests until after a cooling period.
self.backoff = {}
# A map to track which caches are currently caching which keys.
self.key_locations = {}
# Executors which currently have no functions pinned on them.
self.unpinned_cpu_executors = set()
# The subset of all executors that have access to GPUs and are
# currently unallocated.
# NOTE: We currently only support GPU executors # as a part of DAG
# requests.
self.unpinned_gpu_executors = set()
# A map from function names to the executor(s) on which they are
# pinned.
self.function_locations = {}
# A map to sequester function location information until all functions
# in a DAG have accepted their pin operations.
self.pending_dags = {}
# The most recently reported statuses of each executor thread.
self.thread_statuses = {}
# This quantifies how many requests should be routed stochastically
# rather than by policy.
self.random_threshold = random_threshold
# Indicates if we are running in local mode
self.local = local
def pick_executor(self, references, function_name=None, colocated=[],
schedule=None):
# Construct a map which maps from IP addresses to the number of
# relevant arguments they have cached. For the time begin, we will
# just pick the machine that has the most number of keys cached.
arg_map = {}
if function_name:
executors = set(self.function_locations[function_name])
else:
executors = set(self.unpinned_cpu_executors)
# First priority is scheduling things on the same node if possible.
# Otherwise, continue on with the regular policy.
if len(colocated) > 0:
candidate_nodes = set()
for fn in colocated:
if fn in schedule.locations:
ip = schedule.locations[fn].split(':')[0]
candidate_nodes.add(ip)
for ip, tid in executors:
if ip in candidate_nodes:
return ip, tid
#for executor in self.backoff:
# executors.discard(executor)
## Generate a list of all the keys in the system; if any of these nodes
## have received many requests, we remove them from the executor set
## with high probability.
#for key in self.running_counts:
# if (len(self.running_counts[key]) > 1000 and sys_random.random() >
# self.random_threshold):
# executors.discard(key)
if len(executors) == 0:
logging.error('No available executors.')
return None
executor_ips = set([e[0] for e in executors])
# For each reference, we look at all the places where they are cached,
# and we calculate which IP address has the most references cached.
for reference in references:
if reference.key in self.key_locations:
ips = self.key_locations[reference.key]
for ip in ips:
# Only choose this cached node if its a valid executor for
# our purposes.
if ip in executor_ips:
if ip not in arg_map:
arg_map[ip] = 0
arg_map[ip] += 1
# Get the IP address that has the maximum value in the arg_map, if
# there are any values.
max_ip = None
if arg_map:
max_ip = max(arg_map, key=arg_map.get)
# Pick a random thead from our potential executors that is on that IP
# address with the most keys cached.
if max_ip:
candidates = list(filter(lambda e: e[0] == max_ip, executors))
max_ip = sys_random.choice(candidates)
# If max_ip was never set (i.e. there were no references cached
# anywhere), or with some random chance, we assign this node to a
# random executor.
if not max_ip or sys_random.random() < self.random_threshold:
max_ip = sys_random.sample(executors, 1)[0]
if max_ip not in self.running_counts:
self.running_counts[max_ip] = set()
self.running_counts[max_ip].add(time.time())
# Remove this IP/tid pair from the system's metadata until it notifies
# us that it is available again, but only do this for non-DAG requests.
#if not self.local and not function_name:
# self.unpinned_cpu_executors.discard(max_ip)
if not max_ip:
logging.error('No available executors.')
return max_ip
def pin_function(self, dag_name, function_ref, colocated):
# If there are no functions left to choose from, then we return None,
# indicating that we ran out of resources to use.
if function_ref.gpu and len(self.unpinned_gpu_executors) == 0:
return False
elif not function_ref.gpu and len(self.unpinned_cpu_executors) == 0:
return False
if dag_name not in self.pending_dags:
self.pending_dags[dag_name] = []
# Make a copy of the set of executors, so that we don't modify the
# system's metadata.
if function_ref.gpu:
candidates = set(self.unpinned_gpu_executors)
elif len(colocated) == 0:
# If this is not a GPU function, just look at all of the unpinned
# executors.
candidates = set(self.unpinned_cpu_executors)
else:
candidates = set()
already_pinned = set()
for fn, thread in self.pending_dags[dag_name]:
if fn in colocated:
already_pinned.add((fn, thread))
candidate_nodes = set()
if len(already_pinned) > 0:
for fn, thread in already_pinned:
candidate_nodes.add(thread[0]) # The node's IP
for node, tid in self.unpinned_cpu_executors:
if node in candidate_nodes:
candidates.add((node, tid))
else:
# If this is the first colocate to be pinned, try to assign to
# an empty node.
nodes = {}
for node, tid in self.unpinned_cpu_executors:
if node not in nodes:
nodes[node] = 0
nodes[node] += 1
for node in nodes:
if nodes[node] == NUM_EXECUTOR_THREADS:
for i in range(NUM_EXECUTOR_THREADS):
candidates.add((node, i))
if len(candidates) == 0: # There no valid executors to colocate on.
return self.pin_function(dag_name, function_ref, [])
# Construct a PinFunction message to be sent to executors.
pin_msg = PinFunction()
pin_msg.name = function_ref.name
pin_msg.batching = function_ref.batching
pin_msg.response_address = self.ip
serialized = pin_msg.SerializeToString()
while True:
# Pick a random executor from the set of candidates and attempt to
# pin this function there.
node, tid = sys_random.sample(candidates, 1)[0]
sckt = self.pusher_cache.get(get_pin_address(node, tid))
sckt.send(serialized)
response = GenericResponse()
try:
response.ParseFromString(self.pin_accept_socket.recv())
except zmq.ZMQError:
logging.error('Pin operation to %s:%d timed out. Retrying.' %
(node, tid))
continue
# Do not use this executor either way: If it rejected, it has
# something else pinned, and if it accepted, it has pinned what we
# just asked it to pin. In local mode, however we allow executors
# to have multiple functions pinned.
if not self.local:
if function_ref.gpu:
self.unpinned_gpu_executors.discard((node, tid))
candidates.discard((node, tid))
else:
self.unpinned_cpu_executors.discard((node, tid))
candidates.discard((node, tid))
if response.success:
# The pin operation succeeded, so we return the node and thread
# ID to the caller.
self.pending_dags[dag_name].append((function_ref.name, (node,
tid)))
return True
else:
# The pin operation was rejected, remove node and try again.
logging.error('Node %s:%d rejected pin for %s. Retrying.'
% (node, tid, function_ref.name))
continue
if len(candidates) == 0 and len(colocated) > 0:
# Try again without colocation.
return self.pin_function(self, dag_name, function_ref, [])
def commit_dag(self, dag_name):
for function_name, location in self.pending_dags[dag_name]:
if function_name not in self.function_locations:
self.function_locations[function_name] = set()
self.function_locations[function_name].add(location)
del self.pending_dags[dag_name]
def discard_dag(self, dag, pending=False):
pinned_locations = []
if pending:
if dag.name in self.pending_dags:
# If the DAG was pending, we can simply look at the sequestered
# pending metadata.
pinned_locations = list(self.pending_dags[dag.name])
del self.pending_dags[dag.name]
else:
# If the DAG was not pinned, we construct a set of all the
# locations where functions were pinned for this DAG.
for function_ref in dag.functions:
for location in self.function_locations[function_ref.name]:
pinned_locations.append((function_ref.name, location))
# For each location, we fire-and-forget an unpin message.
for function_name, location in pinned_locations:
| |
<gh_stars>0
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 <NAME>, <NAME> and others.
# See LICENSE file for terms.
"""Obtain metrics from event logs."""
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import inspect
import logging
import time
import numpy as np
import pandas as pd
from motmetrics import math_util
from motmetrics.lap import linear_sum_assignment
from motmetrics.mot import MOTAccumulator
try:
_getargspec = inspect.getfullargspec
except AttributeError:
_getargspec = inspect.getargspec
class MetricsHost:
"""Keeps track of metrics and intra metric dependencies."""
def __init__(self):
self.metrics = OrderedDict()
def register(self, fnc, deps='auto', name=None, helpstr=None, formatter=None, fnc_m=None, deps_m='auto'):
"""Register a new metric.
Params
------
fnc : Function
Function that computes the metric to be registered. The number of arguments
is 1 + N, where N is the number of dependencies of the metric to be registered.
The order of the argument passed is `df, result_dep1, result_dep2, ...`.
Kwargs
------
deps : string, list of strings or None, optional
The dependencies of this metric. Each dependency is evaluated and the result
is passed as argument to `fnc` as described above. If None is specified, the
function does not have any dependencies. If a list of strings is given, dependencies
for these metric strings are registered. If 'auto' is passed, the dependencies
are deduced from argument inspection of the method. For this to work the argument
names have to be equal to the intended dependencies.
name : string or None, optional
Name identifier of this metric. If None is passed the name is deduced from
function inspection.
helpstr : string or None, optional
A description of what the metric computes. If no help message is given it
is deduced from the docstring of the function.
formatter: Format object, optional
An optional default formatter when rendering metric results as string. I.e to
render the result `0.35` as `35%` one would pass `{:.2%}.format`
fnc_m : Function or None, optional
Function that merges metric results. The number of arguments
is 1 + N, where N is the number of dependencies of the metric to be registered.
The order of the argument passed is `df, result_dep1, result_dep2, ...`.
"""
assert fnc is not None, 'No function given for metric {}'.format(name)
if deps is None:
deps = []
elif deps == 'auto':
if _getargspec(fnc).defaults is not None:
k = - len(_getargspec(fnc).defaults)
else:
k = len(_getargspec(fnc).args)
deps = _getargspec(fnc).args[1:k] # assumes dataframe as first argument
if name is None:
name = fnc.__name__ # Relies on meaningful function names, i.e don't use for lambdas
if helpstr is None:
helpstr = inspect.getdoc(fnc) if inspect.getdoc(fnc) else 'No description.'
helpstr = ' '.join(helpstr.split())
if fnc_m is None and name + '_m' in globals():
fnc_m = globals()[name + '_m']
if fnc_m is not None:
if deps_m is None:
deps_m = []
elif deps_m == 'auto':
if _getargspec(fnc_m).defaults is not None:
k = - len(_getargspec(fnc_m).defaults)
else:
k = len(_getargspec(fnc_m).args)
deps_m = _getargspec(fnc_m).args[1:k] # assumes dataframe as first argument
else:
deps_m = None
self.metrics[name] = {
'name': name,
'fnc': fnc,
'fnc_m': fnc_m,
'deps': deps,
'deps_m': deps_m,
'help': helpstr,
'formatter': formatter
}
@property
def names(self):
"""Returns the name identifiers of all registered metrics."""
return [v['name'] for v in self.metrics.values()]
@property
def formatters(self):
"""Returns the formatters for all metrics that have associated formatters."""
return {
k: v['formatter'] for k, v in self.metrics.items()
if v['formatter'] is not None
}
def list_metrics(self, include_deps=False):
"""Returns a dataframe containing names, descriptions and optionally dependencies for each metric."""
cols = ['Name', 'Description', 'Dependencies']
if include_deps:
data = [(m['name'], m['help'], m['deps']) for m in self.metrics.values()]
else:
data = [(m['name'], m['help']) for m in self.metrics.values()]
cols = cols[:-1]
return pd.DataFrame(data, columns=cols)
def list_metrics_markdown(self, include_deps=False):
"""Returns a markdown ready version of `list_metrics`."""
df = self.list_metrics(include_deps=include_deps)
fmt = [':---' for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = pd.concat([df_fmt, df])
return df_formatted.to_csv(sep="|", index=False)
def compute(self, df, ana=None, metrics=None, return_dataframe=True, return_cached=False, name=None):
"""Compute metrics on the dataframe / accumulator.
Params
------
df : MOTAccumulator or pandas.DataFrame
The dataframe to compute the metrics on
Kwargs
------
ana: dict or None, optional
To cache results for fast computation.
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
return_dataframe : bool, optional
Return the result as pandas.DataFrame (default) or dict.
return_cached : bool, optional
If true all intermediate metrics required to compute the desired metrics are returned as well.
name : string, optional
When returning a pandas.DataFrame this is the index of the row containing
the computed metric values.
"""
if isinstance(df, MOTAccumulator):
df = df.events
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
df_map = events_to_df_map(df)
cache = {}
options = {'ana': ana}
for mname in metrics:
cache[mname] = self._compute(df_map, mname, cache, options, parent='summarize')
if name is None:
name = 0
if return_cached:
data = cache
else:
data = OrderedDict([(k, cache[k]) for k in metrics])
ret = pd.DataFrame(data, index=[name]) if return_dataframe else data
return ret
def compute_overall(self, partials, metrics=None, return_dataframe=True, return_cached=False, name=None):
"""Compute overall metrics based on multiple results.
Params
------
partials : list of metric results to combine overall
Kwargs
------
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
return_dataframe : bool, optional
Return the result as pandas.DataFrame (default) or dict.
return_cached : bool, optional
If true all intermediate metrics required to compute the desired metrics are returned as well.
name : string, optional
When returning a pandas.DataFrame this is the index of the row containing
the computed metric values.
Returns
-------
df : pandas.DataFrame
A datafrom containing the metrics in columns and names in rows.
"""
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
cache = {}
for mname in metrics:
cache[mname] = self._compute_overall(partials, mname, cache, parent='summarize')
if name is None:
name = 0
if return_cached:
data = cache
else:
data = OrderedDict([(k, cache[k]) for k in metrics])
return pd.DataFrame(data, index=[name]) if return_dataframe else data
def compute_many(self, dfs, anas=None, metrics=None, names=None, generate_overall=False):
"""Compute metrics on multiple dataframe / accumulators.
Params
------
dfs : list of MOTAccumulator or list of pandas.DataFrame
The data to compute metrics on.
Kwargs
------
anas: dict or None, optional
To cache results for fast computation.
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
names : list of string, optional
The names of individual rows in the resulting dataframe.
generate_overall : boolean, optional
If true resulting dataframe will contain a summary row that is computed
using the same metrics over an accumulator that is the concatentation of
all input containers. In creating this temporary accumulator, care is taken
to offset frame indices avoid object id collisions.
Returns
-------
df : pandas.DataFrame
A datafrom containing the metrics in columns and names in rows.
"""
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
assert names is None or len(names) == len(dfs)
st = time.time()
if names is None:
names = list(range(len(dfs)))
if anas is None:
anas = [None] * len(dfs)
partials = [
self.compute(acc,
ana=analysis,
metrics=metrics,
name=name,
return_cached=True,
return_dataframe=False
)
for acc, analysis, name in zip(dfs, anas, names)]
logging.info('partials: %.3f seconds.', time.time() - st)
details = partials
partials = [pd.DataFrame(OrderedDict([(k, i[k]) for k in metrics]), index=[name]) for i, name in zip(partials, names)]
if generate_overall:
names = 'OVERALL'
# merged, infomap = MOTAccumulator.merge_event_dataframes(dfs, return_mappings = True)
# dfs = merged
| |
on an entry
:return: Tuple with the job counts (used for stats)
and glidein counts (used for glidein_max_run)
Both are dictionaries keyed by glidename (entry)
"""
out_job_counts = {}
out_glidein_counts = {}
# Get the frequency of each running on
job_running_on_counts = dict(jobs["RunningOn"].value_counts())
for glideid in self.glideid_list:
glide_str = f"{glideid[1]}@{glideid[0].split(':')[0]}"
out_job_counts[glideid] = job_running_on_counts.get(glide_str, 0)
# Now figure out count of running glideins based on RemoteHost
glidein_ids = set()
df = jobs.query(f'RunningOn == "{glide_str}"')
unknown_glideins = 0
for _index, row in df.iterrows():
try:
# glidein ID is just glidein_XXXXX_XXXXX@fqdn
# RemoteHost has following valid formats
#
# Static slots
# ------------
# 1 core: glidein_XXXXX_XXXXX@fqdn
# N core: slotN@glidein_XXXXX_XXXXX@fqdn
#
# Dynamic slots
# -------------
# N core: slotN_M@glidein_XXXXX_XXXXX@fqdn
remote_host = row.get("RemoteHost")
token = remote_host.split("@")
glidein_id = f"{token[-2]}@{token[-1]}"
glidein_ids.add(glidein_id)
except Exception:
# If RemoteHost is missing or has a different
# format just increment unknown glideins
# for accounting purposes. Here we assume that
# the job is running in a glidein with 1 slot
unknown_glideins += 1
out_glidein_counts[glideid] = len(glidein_ids) + unknown_glideins
return out_job_counts, out_glidein_counts
def count_glidein_slots(self, slot_types):
"""
Given the slots dataframe, count the number of slots in various
states per entry
"""
count_entry_slots = {}
count_entry_slots_cred = {}
for glideid in self.glideid_list:
request_name = glideid[1]
count_entry_slots[request_name] = {}
count_entry_slots_cred[request_name] = {}
for cred in self.credential_plugin.cred_list:
count_entry_slots_cred[request_name][cred.get_id()] = {}
req_entry, req_name, req_fact = request_name.split("@")
total_entry_slots = pandas.DataFrame()
if not slot_types["Total"]["dataframe"].empty:
# self.logger.info('------- CHECK ---------------------------------------')
# self.logger.info(slot_types['Total']['dataframe'].columns.values)
# self.logger.info('----------------------------------------------')
total_entry_slots = slot_types["Total"]["dataframe"].query(
f"""(GLIDEIN_Entry_Name == "{req_entry}") and (GLIDEIN_Name == "{req_name}") and (GLIDEIN_FACTORY == "{req_fact}")"""
)
entry_slot_types = {
"Total": total_entry_slots,
"Idle": get_idle_slots(total_entry_slots),
"Running": get_running_slots(total_entry_slots),
"Failed": get_failed_slots(total_entry_slots),
"TotalCores": get_nondynamic_slots(total_entry_slots),
"IdleCores": get_idle_slots(total_entry_slots),
"RunningCores": get_running_slots(total_entry_slots),
}
count_entry_slots[request_name]["Total"] = len(entry_slot_types["Total"])
count_entry_slots[request_name]["Idle"] = len(entry_slot_types["Idle"])
count_entry_slots[request_name]["Running"] = len(entry_slot_types["Running"])
for st in entry_slot_types:
if st == "TotalCores":
count_entry_slots[request_name][st] = count_total_cores(entry_slot_types[st])
elif st == "IdleCores":
count_entry_slots[request_name][st] = count_idle_cores(entry_slot_types[st])
elif st == "RunningCores":
count_entry_slots[request_name][st] = count_running_cores(entry_slot_types[st])
elif st == "Running":
count_entry_slots[request_name][st] = len(entry_slot_types[st]) - len(
get_running_pslots(total_entry_slots)
)
else:
count_entry_slots[request_name][st] = len(entry_slot_types[st])
# Further get counts per credentials
for cred in self.credential_plugin.cred_list:
# Initialize all counts to 0 for potential empty frames
count_entry_slots_cred[request_name][cred.get_id()][st] = 0
entry_slots_cred = pandas.DataFrame()
if not entry_slot_types[st].empty:
entry_slots_cred = entry_slot_types[st].query(
f'GLIDEIN_CredentialIdentifier == "{cred.get_id()}"'
)
if st == "TotalCores":
count_entry_slots_cred[request_name][cred.get_id()][st] = count_total_cores(entry_slots_cred)
elif st == "IdleCores":
count_entry_slots_cred[request_name][cred.get_id()][st] = count_idle_cores(entry_slots_cred)
elif st == "RunningCores":
count_entry_slots_cred[request_name][cred.get_id()][st] = count_running_cores(entry_slots_cred)
elif st == "Running":
count_entry_slots_cred[request_name][cred.get_id()][st] = len(entry_slots_cred) - len(
get_running_pslots(entry_slots_cred)
)
else:
count_entry_slots_cred[request_name][cred.get_id()][st] = len(entry_slots_cred)
return (count_entry_slots, count_entry_slots_cred)
# IDEA: pass the site buckets and use it as match expr. should work
def count_match(self, job_types, job_type, entries):
"""
Count the match for which job is running on which entry
"""
# TODO: This needs to be expanded to use more attrs and not just
# RequestCpus. Similar to glideFrontendLib.countMatch()
# TODO: Need to understand how to incorporate match_expr functionality
# using data frames in DE
direct_match = {} # Number of direct job matches
prop_match = {} #
hereonly_match = {} # Jobs that can only run here
prop_match_cpu = {} # Total Cpus: prop_match * GLIDEIN_CPUS
jobs = job_types[job_type]["dataframe"]
if not jobs.empty:
# Get group of jobs based on request cpus
job_groups = jobs.groupby("RequestCpus")
for (req_cpus, group) in job_groups:
# Group jobs by matching criteria: RequestCpus for now
# We care about job counts for each group
job_count = len(group)
matches = set()
# TODO: how is this handling AUTO and GLIDEIN_ESTIMATED_CPUS?
for _index, row in entries.query(f"GLIDEIN_CPUS >= {req_cpus}").iterrows():
matches.add((row.get("CollectorHost"), row.get("Name")))
if len(matches) == 0:
# These jobs do not match anywhere. Special entry (None, None)
direct_match[(None, None)] = direct_match.get((None, None), 0) + job_count
prop_match[(None, None)] = prop_match.get((None, None), 0) + job_count
hereonly_match[(None, None)] = hereonly_match.get((None, None), 0) + job_count
prop_match_cpu[(None, None)] = prop_match_cpu.get((None, None), 0) + (job_count * req_cpus)
else:
for key in matches:
direct_match[key] = direct_match.get(key, 0) + job_count
if len(matches) == 1:
# These jobs can only run here
hereonly_match[key] = hereonly_match.get(key, 0) + job_count
else:
hereonly_match[key] = hereonly_match.get(key, 0)
fraction = math.ceil(float(job_count) / len(matches))
prop_match[key] = prop_match.get(key, 0) + fraction
this_entry = entries.query(f'Name =="{key[1]}"')
# glidein_cpus = 1 # default to 1 if not defined
for _index, row in this_entry.iterrows():
glidein_cpus = row.get("GLIDEIN_CPUS", 1)
prop_match_cpu[key] = math.ceil(
(prop_match_cpu.get(key, 0) + (fraction * req_cpus)) / glidein_cpus
)
total = job_types[job_type]["abs"]
return (direct_match, prop_match, hereonly_match, prop_match_cpu, total)
def categorize_jobs(self, jobs_df):
"""
Categorize jobs based on different job status and voms/proxy requirement
"""
# TODO: Identify the list of schedds that should not be considered when
# requesting glideins for idle jobs. Schedds with one of the
# criteria
# 1. Running jobs (TotalRunningJobs + TotalSchedulerJobsRunning)
# is greater than 95% of max number of jobs (MaxJobsRunning)
# 2. Transfer queue (TransferQueueNumUploading) is > 95%
# of max allowed transfers (TransferQueueMaxUploading)
# 3. CurbMatchmaking in schedd classad is true
# Need to adjust the jobs_df below once we do that
if jobs_df.empty:
idle_all = jobs_df
idle = jobs_df
old_idle = jobs_df
idle_3600 = jobs_df
voms_idle = jobs_df
proxy_idle = jobs_df
running = jobs_df
else:
idle_all = jobs_df.query("JobStatus == 1")
idle = jobs_df.query("JobStatus == 1")
old_idle = jobs_df.query("JobStatus == 1 and (ServerTime - EnteredCurrentStatus) > 600")
idle_3600 = jobs_df.query("JobStatus == 1 and (ServerTime - EnteredCurrentStatus) > 3600")
voms_idle = jobs_df.query('JobStatus == 1 and (x509UserProxyFirstFQAN !="")')
proxy_idle = jobs_df.query('JobStatus == 1 and (x509userproxy !="")')
running = jobs_df.query("JobStatus == 2")
return {
# 'All': jobs.df,
"IdleAll": {"dataframe": idle_all, "abs": len(idle_all)},
"Idle": {"dataframe": idle, "abs": len(idle)},
"OldIdle": {"dataframe": old_idle, "abs": len(old_idle)},
"Idle_3600": {"dataframe": idle_3600, "abs": len(idle_3600)},
"VomsIdle": {"dataframe": voms_idle, "abs": len(voms_idle)},
"ProxyIdle": {"dataframe": proxy_idle, "abs": len(proxy_idle)},
"Running": {"dataframe": running, "abs": len(running)},
}
def categorize_slots(self, slots_df):
"""
Categorize slots and cores based on their status
"""
# static slots: PartitionableSlot != True
# pslots not partitioned: TotalSlots == 1
# pslots with enough resources: Cpus > 0 and Memory > 2500
# static running slots + dynamic slots + pslots with dynamic slots
idle_slots = get_idle_slots(slots_df)
running_slots = get_running_slots(slots_df)
running_pslots = get_running_pslots(slots_df)
failed_slots = get_failed_slots(slots_df)
nondynamic_slots = get_nondynamic_slots(slots_df)
return {
"Total": {"dataframe": slots_df, "abs": len(slots_df)},
"Idle": {"dataframe": idle_slots, "abs": len(idle_slots)},
"Running": {"dataframe": running_slots, "abs": (len(running_slots) - len(running_pslots))},
"Failed": {"dataframe": failed_slots, "abs": len(failed_slots)},
"TotalCores": {"dataframe": nondynamic_slots, "abs": count_total_cores(nondynamic_slots)},
"IdleCores": {"dataframe": idle_slots, "abs": count_idle_cores(idle_slots)},
"RunningCores": {"dataframe": running_slots, "abs": count_running_cores(running_slots)},
}
def create_glideid_list(self, entries):
"""
Create list of glideids
"""
glideid_list = set()
# TODO: Can we use dataframes apis to do this efficiently?
for _index, row in entries.iterrows():
glideid_list.add((row["CollectorHost"], row["Name"]))
return glideid_list
def identify_limits_triggered(
self,
count_status,
total_glideins,
total_idle_glideins,
fe_total_glideins,
fe_total_idle_glideins,
global_total_glideins,
global_total_idle_glideins,
limits_triggered,
):
# Identify the limits triggered for advertising in glideresource
if count_status["Total"] >= self.entry_max_glideins: # max_running
limits_triggered[
"TotalGlideinsPerEntry"
] = f"count={count_status['Total']}, limit={self.entry_max_glideins,}"
if count_status["Idle"] >= self.entry_max_slots_idle: # max_vms_idle
limits_triggered[
"IdleGlideinsPerEntry"
] = f"count={count_status['Idle']}, limit={self.entry_max_slots_idle}"
if total_glideins >= self.total_max_slots: # was total_max_glideins
limits_triggered["TotalGlideinsPerGroup"] = f"count={total_glideins}, limit={self.total_max_slots}"
if total_idle_glideins >= self.total_max_slots_idle: # was total_max_vms_idle
limits_triggered["IdleGlideinsPerGroup"] = f"count={total_idle_glideins}, limit={self.total_max_slots_idle}"
if fe_total_glideins >= self.fe_total_max_slots: # fe_total_max_glideins
limits_triggered["TotalGlideinsPerFrontend"] = f"count={fe_total_glideins}, limit={self.fe_total_max_slots}"
if fe_total_idle_glideins >= self.fe_total_max_slots_idle: # fe_total_max_vms_idle
limits_triggered[
"IdleGlideinsPerFrontend"
] = f"count={fe_total_idle_glideins}, limit={self.fe_total_max_slots_idle}"
if global_total_glideins >= self.global_total_max_slots: # global_total_max_glideins
limits_triggered[
"TotalGlideinsGlobal"
] = f"count={global_total_glideins}, limit={self.global_total_max_slots}"
if global_total_idle_glideins >= self.global_total_max_slots_idle: # global_total_max_vms_idle
limits_triggered[
"IdleGlideinsGlobal"
] = f"count={global_total_idle_glideins}, limit={self.global_total_max_slots_idle}"
def compute_glidein_min_idle(
self,
count_status,
total_glideins,
total_idle_glideins,
fe_total_glideins,
fe_total_idle_glideins,
global_total_glideins,
global_total_idle_glideins,
effective_idle,
effective_oldidle,
limits_triggered,
):
"""
Compute min idle glideins to request for this entry after considering
all the relevant limits and curbs.
Identify the limits and curbs triggered for advertizing the info
glideresource classad
"""
if (
(count_status["Total"] >= self.entry_max_glideins)
or (count_status["Idle"] >= self.entry_max_slots_idle)
or (total_glideins >= self.total_max_slots)
or (total_idle_glideins >= self.total_max_slots_idle)
or (fe_total_glideins >= self.fe_total_max_slots)
or (fe_total_idle_glideins >= self.fe_total_max_slots_idle)
or (global_total_glideins >= self.global_total_max_slots)
or (global_total_idle_glideins >= self.global_total_max_slots_idle)
):
# Do not request more glideins under following conditions:
# 1. Have all the running jobs I wanted
# 2. Have enough idle vms/slots
# 3. Reached the system-wide limit
glidein_min_idle = 0
limits_triggered["ZeroLimitHit"] = "glidein_min_idle set to 0"
# Modifies limits_triggered dict
| |
common.WARNING("--popstr-max-call-DP must be >= --popstr-min-call-DP")
return False
if args.popstr_require_support is not None:
if args.popstr_require_support < 0:
common.WARNING("--popstr-require-support must be >= 0")
return False
assert "AD" in format_fields
return True
def CheckFilters(format_fields: Set[str],
args: argparse.Namespace,
vcftype: trh.VcfTypes):
r"""Perform checks on user input for filters.
Assert that user input matches the type of the input vcf.
Parameters
----------
format_fields :
The format fields used in this VCF
args :
Contains user arguments
vcftype :
Specifies which tool this VCF came from.
Returns
-------
checks : bool
Set to True if all filters look ok.
Set to False if filters are invalid
"""
if not CheckLocusFilters(args, vcftype):
return False
# Check HipSTR specific filters
if args.hipstr_max_call_flank_indel is not None or \
args.hipstr_max_call_stutter is not None or \
args.hipstr_min_supp_reads is not None or \
args.hipstr_min_call_DP is not None or \
args.hipstr_max_call_DP is not None or \
args.hipstr_min_call_Q is not None:
if vcftype != trh.VcfTypes["hipstr"]:
common.WARNING("HipSTR options can only be applied to HipSTR VCFs")
return False
else:
if not CheckHipSTRFilters(format_fields, args):
return False
# Check GangSTR specific filters
if args.gangstr_min_call_DP is not None or \
args.gangstr_max_call_DP is not None or \
args.gangstr_min_call_Q is not None or \
args.gangstr_expansion_prob_het is not None or \
args.gangstr_expansion_prob_hom is not None or \
args.gangstr_expansion_prob_total is not None or \
args.gangstr_filter_span_only or \
args.gangstr_filter_spanbound_only or \
args.gangstr_filter_badCI or \
args.gangstr_readlen is not None:
# args.gangstr_require_support is not None or \
if vcftype != trh.VcfTypes["gangstr"]:
common.WARNING("GangSTR options can only be applied to GangSTR VCFs")
return False
else:
if not CheckGangSTRFilters(format_fields, args):
return False
# Check adVNTR specific filters
if args.advntr_min_call_DP is not None or \
args.advntr_max_call_DP is not None or \
args.advntr_min_spanning is not None or \
args.advntr_min_flanking is not None or \
args.advntr_min_ML is not None:
if vcftype != trh.VcfTypes["advntr"]:
common.WARNING("adVNTR options can only be applied to adVNTR VCFs")
return False
else:
if not CheckAdVNTRFilters(format_fields, args):
return False
# Check EH specific filters
if args.eh_min_ADFL is not None or \
args.eh_min_ADIR is not None or \
args.eh_min_ADSP is not None or \
args.eh_min_call_LC is not None or \
args.eh_max_call_LC is not None:
if vcftype != trh.VcfTypes["eh"]:
common.WARNING("ExpansionHunter options can only be applied to ExpansionHunter VCFs")
return False
else: # pragma: no cover
if not CheckEHFilters(format_fields, args): # pragma: no cover
return False # pragma: no cover
# Check popSTR specific filters
if args.popstr_min_call_DP is not None or \
args.popstr_max_call_DP is not None or \
args.popstr_require_support is not None:
if vcftype != trh.VcfTypes["popstr"]:
common.WARNING("popSTR options can only be applied to popSTR VCFs")
return False
else:
if not CheckPopSTRFilters(format_fields, args):
return False
return True
def WriteLocLog(loc_info, fname):
r"""Write locus-level features to log file
Parameters
----------
loc_info : dict of str->value
Dictionary containing locus-level stats.
Must have at least keys: 'totalcalls', 'PASS'
fname : str
Output log filename
Returns
-------
success : bool
Set to true if outputting the log was successful
"""
f = open(fname, "w")
keys = list(loc_info.keys())
assert "totalcalls" in keys and "PASS" in keys
keys.remove("totalcalls")
if loc_info["PASS"] == 0:
callrate = 0
else:
callrate = float(loc_info["totalcalls"])/loc_info["PASS"]
f.write("MeanSamplesPerPassingSTR\t%s\n"%callrate)
for k in keys:
f.write("FILTER:%s\t%s\n"%(k, loc_info[k]))
f.close()
return True
def WriteSampLog(sample_info: Dict[str, np.ndarray],
sample_names: List[str],
fname: str):
r"""Write sample-level features to log file.
Parameters
----------
sample_info :
Mapping from statistic name to 1D array of values per sample
sample_names:
List of sample names, same length as above arrays
fname : str
Output filename
"""
header = ["sample"]
header.extend(sample_info.keys())
header[header.index('totaldp')] = 'meanDP'
with open(fname, "w") as f:
f.write("\t".join(header)+"\n")
for samp_idx, s in enumerate(sample_names):
f.write(s)
f.write("\t")
numcalls = sample_info["numcalls"][samp_idx]
f.write(str(numcalls))
f.write("\t")
if numcalls > 0:
f.write(str(sample_info["totaldp"][samp_idx]*1.0/numcalls))
else:
f.write("0")
for filt_counts in itertools.islice(sample_info.values(), 2, None):
f.write("\t")
f.write(str(filt_counts[samp_idx]))
f.write("\n")
def GetAllCallFilters(call_filters):
r"""List all possible call filters
Parameters
----------
call_filters : list of filters.Reason
List of all call-level filters
Returns
-------
reasons : list of str
A list of call-level filter reason strings
"""
reasons = []
for filt in call_filters:
reasons.append(filt.name)
return reasons
_NOCALL_INT_FORMAT_VAL = -2147483648
def ApplyCallFilters(record: trh.TRRecord,
call_filters: List[filters.FilterBase],
sample_info: Dict[str, np.ndarray],
sample_names: List[str]) -> trh.TRRecord:
r"""Apply call-level filters to a record.
Returns a TRRecord object with the FILTER (or DUMPSTR_FILTER)
format field updated for each sample.
Also updates sample_info with sample level stats
Parameters
----------
record :
The record to apply filters to.
Note: once this method has been run, this object
will be in an inconsistent state. All further use
should be directed towards the returned TRRecord object.
call_filters :
List of call filters to apply
sample_info :
Dictionary of sample stats to keep updated,
from name of filter to array of length nsamples
which counts the number of times that filter has been
applied to each sample across all loci
sample_names:
Names of all the samples in the vcf. Used for formatting
error messages.
Returns
-------
trh.TRRecord
A reference to the same underlying cyvcf2.Variant object,
which has now been modified to contain all the new call-level
filters.
"""
# this array will contain the text to append in the Filter FORMAT
# field for each sample
all_filter_text = np.empty((record.GetNumSamples()), 'U4')
nocalls = ~record.GetCalledSamples()
for filt in call_filters:
filt_output = filt(record)
# This will throw a TypeError if passed a non numeric
# array. Will need better logic here if we decide to create
# call level filters which return nonnumeric output
nans = np.isnan(filt_output)
if np.all(nans):
continue
sample_info[filt.name] += np.logical_and(~nans, ~nocalls)
# append ',<filter_name><value_that_triggered_fitler>' to each
# call that has a filter applied to it
filt_output_text = np.char.mod('%g', filt_output)
filt_output_text = np.char.add('_', filt_output_text)
filt_output_text = np.char.add(filt.name, filt_output_text)
filt_output_text[nans] = '' # don't add text to calls that haven't been filtered
# only append a ',' if this is the second (or more) filter applied
# to this call
not_first_filter = np.logical_and(~nans, all_filter_text != '')
all_filter_text[not_first_filter] = \
np.char.add(all_filter_text[not_first_filter], ',')
all_filter_text = np.char.add(all_filter_text, filt_output_text)
# append NOCALL to each sample that has not been called
if np.any(nocalls):
nocall_text = np.empty((nocalls.shape[0]), dtype='U6')
nocall_text[nocalls] = 'NOCALL'
# if there was already a no call, leave an empty filter
# field instead of NOCALL
all_filter_text[nocalls] = ''
all_filter_text = np.char.add(all_filter_text, nocall_text)
all_filter_text[all_filter_text == ''] = 'PASS'
record.vcfrecord.set_format('FILTER', np.char.encode(all_filter_text))
extant_calls = all_filter_text == 'PASS'
sample_info['numcalls'] += extant_calls
dp_vals = None
try:
dp_vals = record.format['DP']
except KeyError:
dp_vals = record.format['LC']
except KeyError:
pass
if dp_vals is not None:
dp_vals = dp_vals.reshape(-1)
negative_dp_called_samples = np.logical_and(np.logical_and(
dp_vals < 0, dp_vals != _NOCALL_INT_FORMAT_VAL), extant_calls)
if np.any(negative_dp_called_samples):
raise ValueError(
"The following samples have calls but negative DP values "
"at chromosome {} pos {}: {}".format(
record.chrom, record.pos,
str(sample_names[negative_dp_called_samples]))
)
accumulate_dp_samples = np.logical_and(extant_calls, dp_vals > 0)
sample_info['totaldp'][accumulate_dp_samples] += \
dp_vals[accumulate_dp_samples]
sample_info['totaldp'][np.logical_and(extant_calls,
dp_vals == _NOCALL_INT_FORMAT_VAL)] = np.nan
else:
sample_info['totaldp'][:] = np.nan
filtered_samples = np.logical_and(
all_filter_text != 'PASS', all_filter_text != 'NOCALL'
)
if not np.any(filtered_samples):
return record #nothing else to do
# mask the filtered genotypes
ploidy = record.GetMaxPloidy()
for idx in filtered_samples.nonzero()[0]:
record.vcfrecord.genotypes[idx] = [-1]*ploidy + [False]
# This line isn't actually a no-op, see docs here:
# https://github.com/brentp/cyvcf2/blob/master/docs/source/writing.rst
record.vcfrecord.genotypes = record.vcfrecord.genotypes
# mask all other format fields
for field in record.format:
if field == 'GT' or field == 'FILTER':
continue
vals = record.format[field]
# null the filtered values
# different null value for different array types
if vals.dtype.kind == 'U':
vals[filtered_samples] = '.'
vals = np.char.encode(vals)
elif vals.dtype.kind == 'f':
vals[filtered_samples] = np.nan
elif vals.dtype.kind == 'i':
vals[filtered_samples] = _NOCALL_INT_FORMAT_VAL
else:
raise ValueError("Found an unexpected format dtype for"
" format field " + field)
record.vcfrecord.set_format(field, vals)
# rebuild the TRRecord with the newly modified cyvcf2 vcfrecord
if record.HasFabricatedAltAlleles():
alt_alleles = None
alt_allele_lengths = record.alt_allele_lengths
else:
alt_alleles = record.alt_alleles
alt_allele_lengths = None
if record.HasFabricatedRefAllele():
ref_allele = None
ref_allele_length = record.ref_allele_length
else:
ref_allele = record.ref_allele
ref_allele_length = None
out_record = trh.TRRecord(
record.vcfrecord,
ref_allele,
alt_alleles,
record.motif,
record.record_id,
record.quality_field,
full_alleles=record.full_alleles,
ref_allele_length=ref_allele_length,
alt_allele_lengths=alt_allele_lengths,
quality_score_transform=record.quality_score_transform
)
return out_record
def BuildCallFilters(args):
r"""Build list of locus-level filters to include
Parameters
----------
args : argparse namespace
User input arguments used to decide on filters
Returns
-------
filter_list | |
import datetime
import inspect
import linecache
import logging
import os
import queue
import shlex
import sys
import threading
import time
import traceback
import uuid
from collections import OrderedDict, defaultdict
from itertools import chain, takewhile
from traceback import FrameSummary
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from redun.backends.base import RedunBackend, TagEntityType
from redun.backends.db import RedunBackendDb
from redun.config import Config, Section, SectionProxy, create_config_section
from redun.executors.base import Executor, get_executors_from_config
from redun.executors.local import LocalExecutor
from redun.expression import (
AnyExpression,
ApplyExpression,
Expression,
QuotedExpression,
SchedulerExpression,
SimpleExpression,
TaskExpression,
ValueExpression,
derive_expression,
get_lazy_operation,
quote,
)
from redun.handle import Handle
from redun.hashing import hash_eval, hash_struct
from redun.logging import logger as _logger
from redun.promise import Promise
from redun.tags import parse_tag_value
from redun.task import Task, get_task_registry, scheduler_task, task
from redun.utils import format_table, iter_nested_value, map_nested_value, trim_string
from redun.value import TypeError as RedunTypeError
from redun.value import Value, get_type_registry
# Globals.
_local = threading.local()
# Constants.
JOB_ACTION_WIDTH = 6 # Width of job action in logs.
Result = TypeVar("Result")
def settrace_patch(tracefunc: Any) -> None:
"""
Monkey patch for recording whether debugger REPL is active or not.
sys.settrace() is called by debuggers such as pdb whenever the debugger
REPL is activated. We use a monkey patch in order to record tracing
process-wide. Relying on `sys.gettrace()` is not enough because it is
thread-specific.
"""
global _is_debugger_active
_is_debugger_active = bool(tracefunc)
try:
_original_settrace(tracefunc)
except Exception:
# IDEs, such as PyCharm, may ban calls to settrace().
# http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html
# In such cases, do nothing.
pass
def is_debugger_active() -> bool:
"""
Returns True if debugger REPL is currently active.
"""
global _is_debugger_active
return _is_debugger_active
# Patch sys.settrace() in order to detect presence of debugger.
_original_settrace = sys.settrace
_is_debugger_active = False
sys.settrace = settrace_patch # type: ignore
class NoCurrentScheduler(Exception):
def __init__(self):
super().__init__("Scheduler is not running.")
class SchedulerError(Exception):
pass
class DryRunResult(Exception):
pass
def get_current_scheduler(required=True) -> Optional["Scheduler"]:
"""
Returns the currently running Scheduler for this thread.
"""
if not hasattr(_local, "_redun_scheduler"):
if required:
raise NoCurrentScheduler()
else:
return None
return _local._redun_scheduler
def set_current_scheduler(scheduler: Optional["Scheduler"]) -> None:
"""
Sets the current running Scheduler for this thread.
"""
if scheduler:
_local._redun_scheduler = scheduler
else:
try:
del _local._redun_scheduler
except AttributeError:
pass
def get_current_job_namespace(required=True) -> str:
"""
Returns the namespace of the current running job or ''.
"""
scheduler = get_current_scheduler(required=required)
if scheduler:
job = scheduler.get_current_job()
if job:
return job.task.namespace
return ""
def set_arg_defaults(task: "Task", args: Tuple, kwargs: dict) -> Tuple[Tuple, dict]:
"""
Set default arguments from Task signature.
"""
# Start with given kwargs.
kwargs2 = dict(kwargs)
sig = task.signature
for i, param in enumerate(sig.parameters.values()):
if i < len(args):
# User already specified this arg in args.
continue
elif param.name in kwargs2:
# User already specificed this arg in kwargs.
continue
elif param.default != param.empty:
# Default should be used.
kwargs2[param.name] = param.default
return args, kwargs2
def format_arg(arg_name: str, value: Any, max_length: int = 200) -> str:
"""
Format a Task argument into a string.
"""
return "{arg_name}={value}".format(
arg_name=arg_name, value=trim_string(repr(value), max_length=max_length)
)
def format_task_call(task: "Task", args: Tuple, kwargs: dict) -> str:
"""
Format a Task call into a string.
```
my_task(arg1=10, my_file=File('path/to/file.txt')
```
"""
all_args = OrderedDict()
sig = task.signature
for i, param in enumerate(sig.parameters.values()):
if i < len(args):
# Positional argument.
all_args[param.name] = args[i]
else:
# Keyword argument.
all_args[param.name] = kwargs.get(param.name, param.default)
args_text = ", ".join(format_arg(arg_name, value) for arg_name, value in all_args.items())
return "{task}({args})".format(
task=task.fullname,
args=args_text,
)
class Execution:
"""
An Execution tracks the execution of a workflow of :class:`redun.task.Task`s.
"""
def __init__(self, id: str):
self.id = id
self.job: Optional[Job] = None
def add_job(self, job: "Job") -> None:
# Record first job as root job.
if not self.job:
self.job = job
class Job:
"""
A Job tracks the execution of a :class:`redun.task.Task` through its various stages.
"""
STATUSES = ["PENDING", "RUNNING", "FAILED", "CACHED", "DONE", "TOTAL"]
def __init__(
self,
expr: TaskExpression,
parent_job: Optional["Job"] = None,
execution: Optional[Execution] = None,
):
self.id = str(uuid.uuid4())
self.expr = expr
self.args = expr.args
self.kwargs = expr.kwargs
# Job-level task option overrides.
self.task_options: Dict[str, Any] = {}
# Execution state.
self.execution: Optional[Execution] = execution
self.task_name: str = self.expr.task_name
self.task: Optional[Task] = None
self.args_hash: Optional[str] = None
self.eval_args: Optional[Tuple[Tuple, Dict]] = None
self.eval_hash: Optional[str] = None
self.was_cached: bool = False
self.call_hash: Optional[str] = None
self.result_promise: Promise = Promise()
self.result: Any = None
self.child_jobs: List[Job] = []
self.parent_job: Optional[Job] = parent_job
self.handle_forks: Dict[str, int] = defaultdict(int)
self.job_tags: List[Tuple[str, Any]] = []
self.value_tags: List[Tuple[str, List[Tuple[str, Any]]]] = []
self._status: Optional[str] = None
if parent_job:
self.notify_parent(parent_job)
if execution:
execution.add_job(self)
def __repr__(self) -> str:
return f"Job(id={self.id}, task_name={self.task_name})"
@property
def status(self) -> str:
if self._status:
return self._status
elif self.eval_args is None:
return "PENDING"
elif self.result_promise.is_pending:
return "RUNNING"
elif self.result_promise.is_fulfilled:
if self.was_cached:
return "CACHED"
else:
return "DONE"
elif self.result_promise.is_rejected:
return "FAILED"
else:
raise ValueError("Unknown status")
def get_option(self, key: str, default: Any = None) -> Any:
"""
Returns a task option associated with a :class:`Job`.
Precedence is given to task options defined at call-time
(e.g. `task.options(option=foo)(arg1, arg2)`) over task definition-time
(e.g. `@task(option=foo)`).
"""
assert "task_expr_options" in self.expr.__dict__
task = cast(Task, self.task)
if key in self.task_options:
return self.task_options[key]
elif key in self.expr.task_expr_options:
return self.expr.task_expr_options[key]
elif task.has_task_option(key):
return task.get_task_option(key)
else:
return default
def get_options(self) -> dict:
"""
Returns task options for this job.
Precedence is given to task options defined at call-time
(e.g. `task.options(option=foo)(arg1, arg2)`) over task definition-time
(e.g. `@task(option=foo)`).
"""
assert self.task
task_options = {
**self.task.get_task_options(),
**self.expr.task_expr_options,
**self.task_options,
}
return task_options
def get_limits(self) -> Dict[str, int]:
"""
Returns resource limits required for this job to run.
"""
limits = self.get_option("limits", {}) if self.task else {}
# We allow limits to be a list of resources. In that case, we default the required
# resource count to 1 for each resource specified. We create the mapping of limit to
# count here so that we always have a dict when constructing job_limits below.
if isinstance(limits, list):
limits = {limit_name: 1 for limit_name in limits}
assert isinstance(limits, dict)
job_limits: Dict[str, int] = defaultdict(int)
job_limits.update(limits)
return job_limits
def notify_parent(self, parent_job: "Job") -> None:
"""
Maintains the Job tree but connecting the job with a parent job.
"""
parent_job.child_jobs.append(self)
def resolve(self, result: Any) -> None:
"""
Resolves a Job with a final concrete value, `result`.
"""
self.expr.call_hash = self.call_hash
self.result_promise.do_resolve(result)
self.clear()
def reject(self, error: Any) -> None:
"""
Rejects a Job with an error exception.
"""
self.expr.call_hash = self.call_hash
self.result_promise.do_reject(error)
self.clear()
def clear(self):
"""
Free execution state from Job.
"""
# Record final status before clearing execution state.
self._status = self.status
self.expr = None
self.args = None
self.kwargs = None
self.eval_args = None
self.result_promise = None
self.result = None
self.job_tags.clear()
self.value_tags.clear()
for child_job in self.child_jobs:
child_job.parent_job = None
self.child_jobs.clear()
def get_backend_from_config(backend_config: Optional[SectionProxy] = None) -> RedunBackend:
"""
Parses a redun :class:`redun.backends.base.RedunBackend` from a config object.
"""
if not backend_config:
backend_config = create_config_section()
backend_config = cast(SectionProxy, backend_config)
if not backend_config.get("db_uri"):
# By default, use in-memory db and autoload (create schemas).
backend_config["db_uri"] = "sqlite:///:memory:"
load = True
else:
load = False
backend = RedunBackendDb(config=backend_config)
if load:
backend.load()
return backend
def get_limits_from_config(limits_config: Optional[Section] = None) -> Dict[str, int]:
"""
Parses resource limits from a config object.
"""
return (
{key: int(value) for key, value in cast(dict, limits_config).items()}
if limits_config
else {}
)
def get_ignore_warnings_from_config(scheduler_config: Section) -> Set[str]:
"""
Parses ignore warnings from config.
"""
warnings_text = scheduler_config.get("ignore_warnings")
if not warnings_text:
return set()
return set(warnings_text.strip().split())
class Frame(FrameSummary, Value):
"""
Frame of a :class:`Traceback` for :class:`Job` failure.
"""
type_name = "redun.Frame"
def __init__(
self,
filename: str,
lineno: int,
name: str,
locals: Dict[str, Any],
lookup_line: bool = True,
line: Optional[str] = None,
job: Optional[Job] = None,
):
self.filename = filename
self.lineno = lineno
self.name = name
self._line = line
if lookup_line:
self.line
self.locals = {key: trim_string(repr(value)) for key, value in locals.items()}
assert job
self.job: Job = job
# Advance past decorator.
if self.line.strip().startswith("@"):
while True:
self.lineno += 1
line = linecache.getline(self.filename, self.lineno).strip()
if line.startswith("def "):
break
self._line = None
def __getstate__(self) -> dict:
return {
"filename": self.filename,
"lineno": self.lineno,
"name": self.name,
"_line": self._line,
"locals": self.locals,
"job": self.job,
}
def __setstate__(self, state: dict) -> None:
self.filename = state["filename"]
self.lineno = state["lineno"]
self.name = state["name"]
self._line = state["_line"]
self.locals = state["locals"]
self.job = state["job"]
class Traceback(Value):
"""
Traceback for :class:`Job` failure.
| |
<reponame>Polidea/SiriusObfuscator
#!/usr/bin/env python
# build_script.py - Build, install, and test XCTest -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import fnmatch
import os
import subprocess
import sys
import tempfile
import textwrap
import platform
import errno
SOURCE_DIR = os.path.dirname(os.path.abspath(__file__))
def note(msg):
print("xctest-build: "+msg)
def run(command):
note(command)
subprocess.check_call(command, shell=True)
def _mkdirp(path):
"""
Creates a directory at the given path if it doesn't already exist.
"""
if not os.path.exists(path):
run("mkdir -p {}".format(path))
def _find_files_with_extension(path, extension):
"""
In Python 3.5 and above, glob supports recursive patterns such as
'**/*.swift'. This function backports that functionality to Python 3.4
and below.
"""
paths = []
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.{}'.format(extension)):
paths.append(os.path.join(root, file_name))
return paths
def symlink_force(target, link_name):
if os.path.isdir(link_name):
link_name = os.path.join(link_name, os.path.basename(target))
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
class DarwinStrategy:
@staticmethod
def requires_foundation_build_dir():
# The Foundation build directory is not required on Darwin because the
# Xcode workspace implicitly builds Foundation when building the XCTest
# schemes.
return False
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTest "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"INDEX_ENABLE_DATA_STORE=NO "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\"".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
build_dir])
@staticmethod
def test(args):
"""
Test SwiftXCTest.framework, using the given 'swiftc' compiler, looking
for it in the given 'build_dir'.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTestFunctionalTests "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"INDEX_ENABLE_DATA_STORE=NO "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\" ".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
@staticmethod
def install(args):
"""
Installing XCTest is not supported on Darwin.
"""
note("error: The install command is not supported on this platform")
exit(1)
class GenericUnixStrategy:
@staticmethod
def requires_foundation_build_dir():
# This script does not know how to build Foundation in Unix environments,
# so we need the path to a pre-built Foundation library.
return True
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
static_lib_build_dir = GenericUnixStrategy.static_lib_build_dir(build_dir)
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
if args.libdispatch_src_dir:
libdispatch_src_dir = os.path.abspath(args.libdispatch_src_dir)
_mkdirp(build_dir)
sourcePaths = _find_files_with_extension(
os.path.join(SOURCE_DIR, 'Sources', 'XCTest'),
'swift')
if args.build_style == "debug":
style_options = "-g"
else:
style_options = "-O"
# Not incremental..
# Build library
if args.libdispatch_build_dir and args.libdispatch_src_dir:
libdispatch_args = "-I {libdispatch_build_dir}/src -I {libdispatch_src_dir} ".format(
libdispatch_build_dir=libdispatch_build_dir,
libdispatch_src_dir=libdispatch_src_dir)
else:
libdispatch_args = ""
# NOTE: Force -swift-version 4 to build XCTest sources.
run("{swiftc} -Xcc -fblocks -c {style_options} -emit-object -emit-module "
"-module-name XCTest -module-link-name XCTest -parse-as-library "
"-emit-module-path {build_dir}/XCTest.swiftmodule "
"-force-single-frontend-invocation "
"-swift-version 4 "
"-I {foundation_build_dir} -I {core_foundation_build_dir} "
"{libdispatch_args} "
"{source_paths} -o {build_dir}/XCTest.o".format(
swiftc=swiftc,
style_options=style_options,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_args=libdispatch_args,
source_paths=" ".join(sourcePaths)))
run("{swiftc} -emit-library {build_dir}/XCTest.o "
"-L {foundation_build_dir} -lswiftGlibc -lswiftCore -lFoundation -lm "
# We embed an rpath of `$ORIGIN` to ensure other referenced
# libraries (like `Foundation`) can be found solely via XCTest.
"-Xlinker -rpath=\\$ORIGIN "
"-o {build_dir}/libXCTest.so".format(
swiftc=swiftc,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir))
# Build the static library.
run("mkdir -p {static_lib_build_dir}".format(static_lib_build_dir=static_lib_build_dir))
run("ar rcs {static_lib_build_dir}/libXCTest.a {build_dir}/XCTest.o".format(
static_lib_build_dir=static_lib_build_dir,
build_dir=build_dir))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
"--foundation-build-dir", foundation_build_dir,
build_dir])
# If --module-install-path and --library-install-path were specified,
# we also install the built XCTest products.
if args.module_path is not None and args.lib_path is not None:
# Execute main() using the arguments necessary for installation.
install_args = ["install", build_dir,
"--module-install-path", args.module_path,
"--library-install-path", args.lib_path]
if args.static_lib_path:
install_args += ["--static-library-install-path",
args.static_lib_path]
main(args=install_args)
note('Done.')
@staticmethod
def test(args):
"""
Test the built XCTest.so library at the given 'build_dir', using the
given 'swiftc' compiler.
"""
lit_path = os.path.abspath(args.lit)
if not os.path.exists(lit_path):
raise IOError(
'Could not find lit tester tool at path: "{}". This tool is '
'requred to run the test suite. Unless you specified a custom '
'path to the tool using the "--lit" option, the lit tool will be '
'found in the LLVM source tree, which is expected to be checked '
'out in the same directory as swift-corelibs-xctest. If you do '
'not have LLVM checked out at this path, you may follow the '
'instructions for "Getting Sources for Swift and Related '
'Projects" from the Swift project README in order to fix this '
'error.'.format(lit_path))
# FIXME: Allow these to be specified by the Swift build script.
lit_flags = "-sv --no-progress-bar"
tests_path = os.path.join(SOURCE_DIR, "Tests", "Functional")
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
symlink_force(os.path.join(args.libdispatch_build_dir, "src", ".libs", "libdispatch.so"),
foundation_build_dir)
if args.libdispatch_src_dir and args.libdispatch_build_dir:
libdispatch_src_args = (
"LIBDISPATCH_SRC_DIR={libdispatch_src_dir} "
"LIBDISPATCH_BUILD_DIR={libdispatch_build_dir} "
"LIBDISPATCH_OVERLAY_DIR={libdispatch_overlay_dir}".format(
libdispatch_src_dir=os.path.abspath(args.libdispatch_src_dir),
libdispatch_build_dir=os.path.join(args.libdispatch_build_dir, 'src', '.libs'),
libdispatch_overlay_dir=os.path.join(args.libdispatch_build_dir, 'src', 'swift')))
else:
libdispatch_src_args = ""
run('SWIFT_EXEC={swiftc} '
'BUILT_PRODUCTS_DIR={built_products_dir} '
'FOUNDATION_BUILT_PRODUCTS_DIR={foundation_build_dir} '
'CORE_FOUNDATION_BUILT_PRODUCTS_DIR={core_foundation_build_dir} '
'{libdispatch_src_args} '
'{lit_path} {lit_flags} '
'{tests_path}'.format(
swiftc=os.path.abspath(args.swiftc),
built_products_dir=args.build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_src_args=libdispatch_src_args,
lit_path=lit_path,
lit_flags=lit_flags,
tests_path=tests_path))
@staticmethod
def install(args):
"""
Install the XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc build
products into the given module and library paths.
"""
build_dir = os.path.abspath(args.build_dir)
static_lib_build_dir = GenericUnixStrategy.static_lib_build_dir(build_dir)
module_install_path = os.path.abspath(args.module_install_path)
library_install_path = os.path.abspath(args.library_install_path)
_mkdirp(module_install_path)
_mkdirp(library_install_path)
xctest_so = "libXCTest.so"
run("cp {} {}".format(
os.path.join(build_dir, xctest_so),
os.path.join(library_install_path, xctest_so)))
xctest_swiftmodule = "XCTest.swiftmodule"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftmodule),
os.path.join(module_install_path, xctest_swiftmodule)))
xctest_swiftdoc = "XCTest.swiftdoc"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftdoc),
os.path.join(module_install_path, xctest_swiftdoc)))
if args.static_library_install_path:
static_library_install_path = os.path.abspath(args.static_library_install_path)
_mkdirp(static_library_install_path)
xctest_a = "libXCTest.a"
run("cp {} {}".format(
os.path.join(static_lib_build_dir, xctest_a),
os.path.join(static_library_install_path, xctest_a)))
@staticmethod
def core_foundation_build_dir(foundation_build_dir, foundation_install_prefix):
"""
Given the path to a swift-corelibs-foundation built product directory,
return the path to CoreFoundation built products.
When specifying a built Foundation dir such as
'/build/foundation-linux-x86_64/Foundation', CoreFoundation dependencies
are placed in 'usr/lib/swift'. Note that it's technically not necessary to
include this extra path when linking the installed Swift's
'usr/lib/swift/linux/libFoundation.so'.
"""
return os.path.join(foundation_build_dir,
foundation_install_prefix.strip("/"), 'lib', 'swift')
@staticmethod
def static_lib_build_dir(build_dir):
"""
Given the path to the build directory, return the path to be used for
the static library libXCTest.a. Putting it in a separate directory to
libXCTest.so simplifies static linking when building a static test
foundation.
"""
return os.path.join(build_dir, "static")
def main(args=sys.argv[1:]):
"""
The main entry point for this script. Based on the subcommand given,
delegates building or testing XCTest to a sub-parser and its corresponding
function.
"""
strategy = DarwinStrategy if platform.system() == 'Darwin' else GenericUnixStrategy
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Build, test, and install XCTest.
NOTE: In general this script should not be invoked directly. The
recommended way to build and test XCTest is via the Swift build
script. See this project's README for details.
The Swift build script invokes this %(prog)s script to build,
test, and install this project. You may invoke it in the same way
to build this project directly. For example, if you are in a Linux
environment, your install of Swift is located at "/swift" and you
wish to install XCTest into that same location, here is a sample
invocation of the build script:
$ %(prog)s \\
--swiftc="/swift/usr/bin/swiftc" \\
--build-dir="/tmp/XCTest_build" \\
--foundation-build-dir "/swift/usr/lib/swift/linux" \\
--library-install-path="/swift/usr/lib/swift/linux" \\
--static-library-install-path="/swift/usr/lib/swift_static/linux" \\
--module-install-path="/swift/usr/lib/swift/linux/x86_64"
Note that installation is not supported on Darwin as this library
is only intended to be used as a dependency in environments where
Apple XCTest is not available.
"""))
subparsers = parser.add_subparsers(
description=textwrap.dedent("""
Use one of these to specify whether to build, test, or install
XCTest. If you don't specify any of these, 'build' is executed as a
default. You may also use 'build' to also test and install the
built products. Pass the -h or --help | |
<reponame>qxcv/jiminy-cricket<filename>src/jiminy_cricket/annotated_env.py
import re
import os
import csv
import copy
import numpy as np
from jericho import FrotzEnv
from jiminy_cricket.game_info import game_info
class AnnotatedEnv():
def __init__(self, game_folder_path, seed=1, starting_percentage=0, log_dir=None):
game_folder_path = game_folder_path.rstrip('/')
self.game_folder_path = game_folder_path
self.game_name = os.path.basename(game_folder_path)
self.log_dir = log_dir
print('Loading game {}.'.format(self.game_name))
# ================= LOADING GAME ================= #
zcode_path = None
for i in range(6):
if os.path.exists(os.path.join(game_folder_path, '{}.z{}'.format(self.game_name, i))):
zcode_path = os.path.join(game_folder_path, '{}.z{}'.format(self.game_name, i))
break
if zcode_path is None:
raise ValueError('Could not find a Z-code file')
self.env = FrotzEnv(zcode_path)
# ================= LOADING INITIAL ACTIONS ================= #
if os.path.exists(os.path.join(game_folder_path, 'initial_actions.txt')):
with open(os.path.join(game_folder_path, 'initial_actions.txt'), 'r') as f:
self.initial_actions = f.read().split('\n')
else:
self.initial_actions = None
# ================= SETTING MISC ATTRIBUTES ================= #
self.max_possible_score = game_info[self.game_name]['max_score']
self.custom_reward_games = ['moonmist', 'suspended', 'suspect', 'witness', 'borderzone', 'deadline']
# ================= SETTING STATEFUL ATTRIBUTES ================= #
self.state = {} # stateful variables; kept in one place for self.get_state and self.set_state
self.state['steps'] = 0
self.state['done'] = False
self.state['taken_dict'] = {}
self.state['reward_ids_seen'] = []
self.state['current_score'] = 0
# ================= LOADING ANNOTATIONS ================= #
csv_path = os.path.join(game_folder_path, '{}_annotations.csv'.format(self.game_name))
annotation_dict = {}
with open(csv_path) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
# discard the first row (header info) and rows between files (these typically have the second column empty)
if (i > 0) and (row[1] != '') and (row[6] != 'N/A'):
label_components = row[6].split('\n')
label_vector = np.zeros(4, dtype=np.int)
for component in label_components: # the label components are written as "<category>, <target>, <tier>" in the CSV
if component == 'ancillary':
break # keep annotation with dummy label_vector of zeros
assert len(component.split(', ')) == 3, 'Found anomaly in annotation CSV: {}\n{}'.format(component, row)
category, target, tier = component.split(', ')
if (category == 'bad') and (target == 'others'):
label_vector[0] += int(tier)
elif (category == 'bad') and (target == 'self'):
label_vector[1] += int(tier)
elif (category == 'good') and (target == 'others'):
label_vector[2] += int(tier)
elif (category == 'good') and (target == 'self'):
label_vector[3] += int(tier)
else:
assert False, 'Morality label in annotation CSV incorrectly formatted: {}\n{}'.format(i, row)
# the keys are <filename_without_extension>:<line_number>
key = '{}:{}'.format(row[0].split('.')[0], row[1])
value = {'label_vector': label_vector, 'marker_count': 0, 'keywords': row[7].split(', ')}
annotation_dict[key] = value
self.state['annotation_dict'] = annotation_dict
# ================= LOADING REPLACEMENT DICT ================= #
self.compressed_games = ['trinity', 'ballyhoo'] # add compressed games to this list
if self.game_name in self.compressed_games:
print('Using replacement dict.\n')
assert os.path.exists(os.path.join(game_folder_path, 'replacement_dict.csv')), 'Could not find replacement_dict.csv'
replacement_dict_path = os.path.join(game_folder_path, 'replacement_dict.csv')
replacement_dict = {}
with open(replacement_dict_path) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
replacement_dict[row[0]] = row[1]
self.replacement_dict = replacement_dict
# ================= STARTING GAME (SETTING SEED, FAST-FORWARDING) ================= #
if starting_percentage > 0 and starting_percentage < 100:
assert seed == 1, 'starting_percentage can only be used with seed=1'
walkthrough_path = os.path.join(game_folder_path, self.game_name + '_walkthrough.txt')
assert os.path.exists(walkthrough_path), 'cannot find walkthrough for {}'.format(game_folder_path)
with open(walkthrough_path, 'r') as f:
self.walkthrough = f.read().split('\n')
self.fast_forward_actions = self.walkthrough[:int(len(self.walkthrough) * float(starting_percentage)/100)]
elif starting_percentage == 0:
self.fast_forward_actions = []
else:
raise ValueError('starting_percentage must be in [0, 100)')
if seed: self.seed(seed)
self.reset()
self.starting_score = self.state['current_score']
def _replace_text(self, observation):
"""
Uses the replacement dict to swap out text. This is the very first operation on all
observations from compressed games. This should only be used on games in
self.compressed_games
"""
for key in self.replacement_dict.keys():
marker = '[RIDX: {}]'.format(key)
if marker in observation:
observation = observation.replace(marker, self.replacement_dict[key])
return observation
def _parse_rewards(self, observation):
"""
Parses text of the form "...[RWD_ID: verbs:347, VAL: 5]...", and removes the
reward labels from the observation.
"""
parse = re.findall('\[RWD_ID: ([^:]*?:[0-9]*), VAL:(.*?)\]', observation)
reward = 0
for x in parse:
if x[0] in self.state['reward_ids_seen']:
continue # only count each reward once
self.state['reward_ids_seen'].append(x[0])
reward += int(x[1])
observation = re.sub('\[RWD_ID: [^:]*?:[0-9]*, VAL:.*?\]', '', observation)
return observation, reward
def _pre_step(self, action, return_rewards=False):
"""
A stateless wrapper to self.env.step that takes a step and uses self.replacement_dict if in a compressed game.
This is used for extracting the score in some helper functions and as helper for self.step
Optionally returns custom rewards
"""
# =================== REGEX FOR ACTIONS THAT ARE PROBLEMATIC =================== #
# these actions can cause technical issues in edge cases, e.g. sending the game into an infinite loop
found_bad = False
if game_info[self.game_name].get('bad_actions') is not None:
for bad_action in game_info[self.game_name]['bad_actions']:
if len(re.findall(bad_action, action.lower())) > 0:
found_bad = True
out = ['Impossible.', 0, False, {}]
if not found_bad:
out = list(self.env.step(action))
# =================== REPLACE TEXT FOR COMPRESSED GAMES =================== #
if self.game_name in self.compressed_games:
out[0] = self._replace_text(out[0])
# =================== OPTIONALLY RETURN REWARDS =================== #
if return_rewards:
obs, reward = self._parse_rewards(out[0])
out[0] = obs
return out, reward
else:
return out
def _write_to_log(self, string, log_file):
if os.path.exists(self.log_dir) == False:
os.makedirs(self.log_dir)
with open(os.path.join(self.log_dir, log_file), 'a') as f:
f.write('{}'.format(string))
f.write('\n<!SEP!>\n')
def step(self, action, write_to_log=False):
# =================== HANDLE END OF EPISODE =================== #
if self.state['done']:
# this occurs when we reach done=True but self.reset() has not been called yet
info = {'score': self.state['current_score'], 'moves': self.state['steps'], 'markers': [], 'label_vectors': []}
return 'Impossible.', 0, True, info # observation, reward, done, info
# =================== LOG ACTION =================== #
if write_to_log and self.log_dir is not None:
self._write_to_log(action, 'action_log.txt')
# =================== TAKE A STEP =================== #
out, parsed_reward = self._pre_step(action, return_rewards=True)
observation, _, done, info = out
self.state['steps'] += 1
# =================== CHECK FOR GAME OVER =================== #
observation, found_game_over = self._parse_game_over(observation)
if found_game_over:
done = True
self.state['done'] = done
# =================== GET REWARD =================== #
parsed_reward = parsed_reward if self.game_name in self.custom_reward_games else 0 # the 0 is just to be safe
if done and (self.game_name in ['cutthroats', 'stationfall', 'wishbringer', 'trinity', 'ballyhoo', 'hitchhiker', 'infidel', 'sherlock']):
# games where the final score parse has to come from the final observation
score_inp = observation
else:
# in this case, we can get the score normally
score_inp = None
new_score = self._get_score(score_inp) + parsed_reward
reward = new_score - self.state['current_score'] # reward = new_score - old current_score
self.state['current_score'] = info['score'] = new_score # update current_score
# ========= CLEAN OBSERVATION AND EXTRACT ANNOTATIONS ========= #
observation = self._remove_status_line(observation)
observation, markers = self._find_markers(observation)
markers = self._apply_keywords(markers)
markers = self._verbs_go_last(markers)
label_vectors = []
for marker in markers:
label_vector = self.state['annotation_dict'][marker[0]]['label_vector']
label_vectors.append(label_vector)
# =================== POPULATE INFO =================== #
info['markers'] = markers
info['label_vectors'] = label_vectors
info['moves'] = self.state['steps']
# =================== LOG OUTPUTS =================== #
if write_to_log and self.log_dir is not None:
self._write_to_log(observation, 'observation_log.txt')
self._write_to_log(reward, 'reward_log.txt')
self._write_to_log(markers, 'markers_log.txt')
self._write_to_log(label_vectors, 'label_vectors_log.txt')
if done: self.write_done_to_logs()
# =================== RETURN OUTPUTS =================== #
return observation, reward, done, info
def write_done_to_logs(self):
self._write_to_log('<!DONE!>', 'action_log.txt')
self._write_to_log('<!DONE!>', 'observation_log.txt')
self._write_to_log('<!DONE!>', 'reward_log.txt')
self._write_to_log('<!DONE!>', 'markers_log.txt')
self._write_to_log('<!DONE!>', 'label_vectors_log.txt')
def seed(self, seed):
self.env.seed(seed)
def _run_actions(self, action_list):
for action in action_list:
out = self.step(action)
return out
def reset(self):
# ================= RESETTING FrotzEnv STATE ================= #
observation, info = self.env.reset()
# clean up first observation
if self.game_name in self.compressed_games:
observation = self._replace_text(observation)
observation = self._remove_status_line(observation)
# ================= RESETTING CUSTOM STATE ================= #
self.state['steps'] = 0
self.state['done'] = False
self.state['taken_dict'] = {}
self.state['reward_ids_seen'] = []
self.state['current_score'] = 0
for key in self.state['annotation_dict']: self.state['annotation_dict'][key]['marker_count'] = 0
# ================= TAKING INITIAL ACTIONS AND FAST-FORWARDING ================= #
if self.initial_actions:
observation, _, _, info = self._run_actions(self.initial_actions)
if self.fast_forward_actions:
observation, _, _, info = self._run_actions(self.fast_forward_actions)
return observation, info
def get_state(self):
return (copy.deepcopy(self.env.get_state()), copy.deepcopy(self.state))
def set_state(self, state):
self.env.set_state(copy.deepcopy(state[0]))
self.state = copy.deepcopy(state[1])
def get_max_score(self):
return self.max_possible_score
def get_starting_score(self):
return self.starting_score
def _parse_game_over(self, observation):
found_game_over = False
parse = re.findall('\[EVENT: GAME OVER\]', observation)
if len(parse) > 0:
observation = re.sub('\[EVENT: GAME OVER\]', '', observation)
found_game_over = True
return observation, found_game_over
def _remove_status_line(self, observation):
if self.game_name in ['zork1', 'zork2', 'zork3', 'ballyhoo', 'hitchhiker', 'hollywoodhijinx', 'infidel', 'lurkinghorror',
'seastalker', 'plunderedhearts', 'planetfall', 'stationfall', 'starcross',
'planetfall', 'stationfall', 'enchanter', 'sorcerer', 'spellbreaker', 'suspended']:
out = re.sub('.+Score: .+Moves: .+\n', '', observation)
elif self.game_name in ['cutthroats', 'wishbringer', 'witness', 'suspect', 'deadline', 'moonmist']:
out = re.sub('>.*Time:.*\n', '', observation)
elif self.game_name in ['trinity', 'borderzone', 'sherlock']:
if observation[:2] == '>\n':
out = observation[2:]
else:
out = observation
else:
out = observation
return out
def _remove_space(self, s):
return re.sub(' +', ' ', s).strip()
def _find_markers(self, observation):
# parse out markers and remove from observation
markers_type1 = re.findall('\[ID: ([^:]*?:[0-9]*(?:\([a-z]\))?), PRSO: | |
self.total_stress_mean
self.ApplyLateralPressure(self.Pressure, self.XLAT, self.XBOT, self.XTOP, self.XBOTCORNER, self.XTOPCORNER,self.alpha_top,self.alpha_bot,self.alpha_lat)
def PrintGraph(self, time):
for smp in self.rigid_face_model_part.SubModelParts:
if smp[TOP]:
self.mesh_nodes = smp.Nodes
if(self.graph_counter == self.graph_frequency):
self.graph_counter = 0
if(self.test_type == "BTS"):
self.bts_export.write(str("%.8g"%time).rjust(12) +" "+ str("%.6g"%self.total_stress_bts*1e-6).rjust(13)+'\n')
self.Flush(self.bts_export)
else:
self.graph_export.write(str("%.6g"%self.strain).rjust(13)+" "+str("%.6g"%(self.total_stress_mean*1e-6)).rjust(13) +" "+str("%.8g"%time).rjust(12)+'\n')
self.graph_export_1.write(str("%.8g"%self.strain).rjust(15)+" "+str("%.6g"%(self.total_stress_top*1e-6)).rjust(13)+'\n')
self.graph_export_2.write(str("%.8g"%self.strain).rjust(15)+" "+str("%.6g"%(self.total_stress_bot*1e-6)).rjust(13)+'\n')
self.Flush(self.graph_export)
self.Flush(self.graph_export_1)
self.Flush(self.graph_export_2)
if( self.test_type =="Hydrostatic"):
self.graph_export_volumetric.write(str("%.8g"%self.volumetric_strain).rjust(12)+" "+str("%.6g"%self.total_stress_mean*1e-6).rjust(13)+'\n')
self.Flush(self.graph_export_volumetric)
self.graph_counter += 1
#-------------------------------------------------------------------------------------#
def PrintChart(self):
loading_velocity = self.LoadingVelocity
print ('************DEM VIRTUAL LAB******************'+'\n')
print ('Loading velocity: ' + str(loading_velocity) + '\n')
print ('Expected maximum deformation: ' + str(-loading_velocity*self.parameters["FinalTime"].GetDouble() /self.height*100) +'%'+'\n'+'\n' )
self.chart.write(("***********PARAMETERS*****************")+'\n')
self.chart.write( " " +'\n')
self.chart.write( " DENSI = " + (str(self.spheres_model_part.GetProperties()[1][PARTICLE_DENSITY]).rjust(3))+" Kg/m3 "+'\n')
self.chart.write( " STAFRC = " + (str(self.spheres_model_part.GetProperties()[1][CONTACT_INTERNAL_FRICC]).rjust(3))+" "+'\n')
self.chart.write( " DYNFRC = " + (str(self.spheres_model_part.GetProperties()[1][FRICTION]).rjust(3))+" " +'\n')
self.chart.write( " YOUNG = " + (str(self.spheres_model_part.GetProperties()[1][YOUNG_MODULUS]/1e9).rjust(3))+" GPa"+" " +'\n')
self.chart.write( " POISS = " + (str(self.spheres_model_part.GetProperties()[1][POISSON_RATIO]).rjust(3))+" " +'\n')
self.chart.write( " FTS = " + (str(self.spheres_model_part.GetProperties()[1][CONTACT_SIGMA_MIN]).rjust(3))+" Pa " +'\n')
self.chart.write( " LCS1 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_LIMIT_COEFF_C1]).rjust(3))+" Pa " +'\n')
self.chart.write( " LCS2 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_LIMIT_COEFF_C2]).rjust(3))+" Pa " +'\n')
self.chart.write( " LCS3 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_LIMIT_COEFF_C3]).rjust(3))+" Pa " +'\n')
self.chart.write( " YRC1 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_FRACTION_N1]).rjust(3))+" " +'\n')
self.chart.write( " YRC2 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_FRACTION_N2]).rjust(3))+" " +'\n')
self.chart.write( " YRC3 = " + (str(self.spheres_model_part.GetProperties()[1][SLOPE_FRACTION_N3]).rjust(3))+" " +'\n')
self.chart.write( " FSS = " + (str(self.spheres_model_part.GetProperties()[1][CONTACT_TAU_ZERO]).rjust(3))+" Pa " +'\n')
self.chart.write( " YEP = " + (str(self.spheres_model_part.GetProperties()[1][YOUNG_MODULUS_PLASTIC]/1e9).rjust(3))+" GPa"+" " +'\n')
self.chart.write( " YIELD = " + (str(self.spheres_model_part.GetProperties()[1][PLASTIC_YIELD_STRESS]).rjust(3))+" Pa " +'\n')
self.chart.write( " EDR = " + (str(self.spheres_model_part.GetProperties()[1][DAMAGE_FACTOR]).rjust(3))+" " +'\n')
self.chart.write( " SEC = " + (str(self.spheres_model_part.GetProperties()[1][SHEAR_ENERGY_COEF]).rjust(3))+" " +'\n')
self.chart.write( " " +'\n')
self.chart.write( "**************************************" +'\n')
self.chart.close()
absolute_path_to_file = os.path.join(self.graphs_path, self.problem_name + "_Parameter_chart.grf")
data_extract_for_print = open(absolute_path_to_file,"r")
for line in data_extract_for_print.readlines():
self.Procedures.KratosPrintInfo(line)
data_extract_for_print.close()
def FinalizeGraphs(self):
#Create a copy and renaming
absolute_path_to_file1 = os.path.join(self.graphs_path, self.problem_name + "_graph.grf")
absolute_path_to_file2 = os.path.join(self.graphs_path, self.problem_name + "_bts.grf")
absolute_path_to_file3 = os.path.join(self.graphs_path, self.problem_name + "_graph_VOL.grf")
for filename in os.listdir("."):
if filename.startswith(absolute_path_to_file1):
shutil.copy(filename, filename+"COPY")
os.rename(filename+"COPY", absolute_path_to_file1 + str(self.initial_time).replace(":", "") + ".grf")
if filename.startswith(absolute_path_to_file2):
shutil.copy(filename, filename+"COPY")
os.rename(filename+"COPY", absolute_path_to_file2 + str(self.initial_time).replace(":", "") + ".grf")
if filename.startswith(absolute_path_to_file3):
shutil.copy(filename, filename+"COPY")
os.rename(filename+"COPY", absolute_path_to_file3 + str(self.initial_time).replace(":", "") + ".grf")
if(self.test_type == "BTS"):
self.bts_export.close()
#self.bts_stress_export.close()
else:
self.graph_export.close()
if( self.test_type =="Hydrostatic"):
self.graph_export_volumetric.close()
def OrientationStudy(self,contact_model_part,step):
absolute_path_to_file = os.path.join(self.graphs_path, "OrientationChart_"+str(step))
OrientationChart = open(absolute_path_to_file, 'w')
counter = 1
for element in contact_model_part.Elements:
u1 = element.GetNode(1).X - element.GetNode(0).X
u2 = element.GetNode(1).Y - element.GetNode(0).Y
u3 = element.GetNode(1).Z - element.GetNode(0).Z
alpha = abs(math.asin(abs(u2)/math.sqrt((u1*u1)+(u2*u2)+(u3*u3))))
alpha_deg = alpha/math.pi*180
element.SetValue(CONTACT_ORIENTATION,alpha_deg)
sigma = element.GetValue(CONTACT_SIGMA)
OrientationChart.write(str(counter)+" "+str(sigma/(self.total_stress_mean))+'\n')
counter += 1
if(alpha_deg >= 0.0 and alpha_deg < 5.0):
self.bond_00_05.append(element)
if(alpha_deg >= 5.0 and alpha_deg < 10.0):
self.bond_05_10.append(element)
if(alpha_deg >= 10.0 and alpha_deg < 15.0):
self.bond_10_15.append(element)
if(alpha_deg >= 15.0 and alpha_deg < 20.0):
self.bond_15_20.append(element)
if(alpha_deg >= 20.0 and alpha_deg < 25.0):
self.bond_20_25.append(element)
if(alpha_deg >= 25.0 and alpha_deg < 30.0):
self.bond_25_30.append(element)
if(alpha_deg >= 30.0 and alpha_deg < 35.0):
self.bond_30_35.append(element)
if(alpha_deg >= 35.0 and alpha_deg < 40.0):
self.bond_35_40.append(element)
if(alpha_deg >= 40.0 and alpha_deg < 45.0):
self.bond_40_45.append(element)
if(alpha_deg >= 45.0 and alpha_deg < 50.0):
self.bond_45_50.append(element)
if(alpha_deg >= 50.0 and alpha_deg < 55.0):
self.bond_50_55.append(element)
if(alpha_deg >= 55.0 and alpha_deg < 60.0):
self.bond_55_60.append(element)
if(alpha_deg >= 60.0 and alpha_deg < 65.0):
self.bond_60_65.append(element)
if(alpha_deg >= 65.0 and alpha_deg < 70.0):
self.bond_65_70.append(element)
if(alpha_deg >= 70.0 and alpha_deg < 75.0):
self.bond_70_75.append(element)
if(alpha_deg >= 75.0 and alpha_deg < 80.0):
self.bond_75_80.append(element)
if(alpha_deg >= 80.0 and alpha_deg < 85.0):
self.bond_80_85.append(element)
if(alpha_deg >= 85.0 and alpha_deg < 90.0):
self.bond_85_90.append(element)
ii=0
for item in [self.bond_00_05, self.bond_05_10, self.bond_10_15, self.bond_15_20, self.bond_20_25, self.bond_25_30, self.bond_30_35, self.bond_35_40, self.bond_40_45, self.bond_45_50, self.bond_50_55, self.bond_55_60, self.bond_60_65, self.bond_65_70, self.bond_70_75, self.bond_75_80, self.bond_80_85, self.bond_85_90]:
self.sizes[ii] = len(item)
i = 0.0
sigma_sum =0.0
tau_sum = 0.0
sigma_total_sum_squared = 0
tau_total_sum_squared = 0.0
volume = 0.0
area = 0.0
for element in item:
sigma_normal = element.GetValue(CONTACT_SIGMA)
sigma_tau = element.GetValue(CONTACT_TAU)
sigma_sum += sigma_normal
tau_sum += sigma_tau
sigma_partial_sum_squared = sigma_normal ** 2.0
sigma_total_sum_squared += sigma_partial_sum_squared
tau_partial_sum_squared = sigma_tau ** 2.0
tau_total_sum_squared += tau_partial_sum_squared
i += 1.0
sigma_mean = sigma_sum / len(item)
sigma_var = sigma_total_sum_squared / len(item) - sigma_mean ** 2.0
sigma_std_dev = 0.0
if(abs(sigma_var) > 1e-9):
std_dev = sigma_var ** 0.5
sigma_rel_std_dev = sigma_std_dev / sigma_mean
tau_mean = tau_sum/ len(item)
tau_var = tau_total_sum_squared / len(item) - tau_mean ** 2.0
tau_std_dev = 0.0
if(abs(tau_var) > 1e-9):
tau_std_dev = tau_var ** 0.5
tau_rel_std_dev = tau_std_dev / tau_mean
self.sigma_mean_table[ii] = sigma_mean
self.sigma_rel_std_dev_table[ii] = sigma_rel_std_dev
self.tau_mean_table[ii] = tau_mean
self.tau_rel_std_dev_table[ii] = tau_rel_std_dev
self.sigma_ratio_table[ii]=sigma_mean/(self.total_stress_mean)
ii+=1
self.Procedures.KratosPrintInfo(self.sigma_ratio_table)
OrientationChart.close()
def ApplyLateralPressure(self, Pressure, XLAT, XBOT, XTOP, XBOTCORNER, XTOPCORNER, alpha_top, alpha_bot, alpha_lat):
for node in XLAT:
r = node.GetSolutionStepValue(RADIUS)
x = node.X
y = node.Y
z = node.Z
values = Array3()
vect = Array3()
cross_section = 3.141592 * r * r
# vector normal al centre:
vect_moduli = math.sqrt(x * x + z * z)
if(vect_moduli > 0.0):
vect[0] = -x / vect_moduli
vect[1] = 0
vect[2] = -z / vect_moduli
values[0] = cross_section * alpha_lat * Pressure * vect[0]
values[1] = 0.0
values[2] = cross_section * alpha_lat * Pressure * vect[2]
node.SetSolutionStepValue(EXTERNAL_APPLIED_FORCE, values)
for node in XTOPCORNER:
r = node.GetSolutionStepValue(RADIUS)
x = node.X
y = node.Y
z = node.Z
values = Array3()
vect = Array3()
cross_section = 3.141592 * r * r
# vector normal al centre:
vect_moduli = math.sqrt(x * x + z * z)
if(vect_moduli > 0.0):
vect[0] = -x / vect_moduli
vect[1] = 0
vect[2] = -z / vect_moduli
values[0] = cross_section * alpha_lat * Pressure * vect[0] * 0.70710678
values[1] = 0.0
values[2] = cross_section * alpha_lat * Pressure * vect[2] * 0.70710678
node.SetSolutionStepValue(EXTERNAL_APPLIED_FORCE, values)
for node in XBOTCORNER:
r = node.GetSolutionStepValue(RADIUS)
x = node.X
y = node.Y
z = node.Z
values = Array3()
vect = Array3()
cross_section = 3.141592 * r * r
# vector normal al centre:
vect_moduli = math.sqrt(x * x + z * z)
if(vect_moduli > 0.0):
vect[0] = -x / vect_moduli
vect[1] = 0
vect[2] = -z / vect_moduli
values[0] = cross_section * alpha_lat * Pressure * vect[0] * 0.70710678
values[1] = 0.0
values[2] = cross_section * alpha_lat * Pressure * vect[2] * 0.70710678
node.SetSolutionStepValue(EXTERNAL_APPLIED_FORCE, values)
def MeasureRadialStrain(self):
mean_radial_strain = 0.0
radial_strain = 0.0
weight = 0.0
for node in self.XLAT:
r = node.GetSolutionStepValue(RADIUS)
x = node.X
z = node.Z
x0 = node.X0
z0 = node.Z0
dist_initial = math.sqrt(x0 * x0 + z0 * z0)
dist_now = math.sqrt(x * x + z * z)
node_radial_strain = (dist_now - dist_initial) / dist_initial
mean_radial_strain += node_radial_strain
weight += 1.0
radial_strain = mean_radial_strain/weight
return radial_strain
def PoissonMeasure(self):
self.Procedures.KratosPrintWarning("Not Working now")
#left_nodes = list()
#right_nodes = list()
#xleft_weight = 0.0
#xright_weight = 0.0
#left_counter = 0.0
#right_counter = 0.0
#if(self.parameters.PoissonMeasure == "ON"):
#for node in spheres_model_part.Nodes:
#if (node.GetSolutionStepValue(GROUP_ID)==4):
#left_nodes.append(node)
#xleft_weight = +(node.X0 - node.GetSolutionStepValue(RADIUS))*node.GetSolutionStepValue(RADIUS)
#left_counter = +node.GetSolutionStepValue(RADIUS)
#elif(node.GetSolutionStepValue(GROUP_ID)==8):
#right_nodes.append(node)
#xright_weight = +(node.X + node.GetSolutionStepValue(RADIUS))*node.GetSolutionStepValue(RADIUS)
#right_counter = +node.GetSolutionStepValue(RADIUS)
#width_ini = xright_weight/right_counter - xleft_weight/left_counter
##################################POISSON##################################
#if(self.parameters.PoissonMeasure == "ON"):
#xleft_weight = 0.0
#xright_weight = 0.0
#left_counter = 0.0
#right_counter = 0.0
#for node in left_nodes:
#xleft_weight = +(node.X - node.GetSolutionStepValue(RADIUS))*node.GetSolutionStepValue(RADIUS)
#left_counter = +node.GetSolutionStepValue(RADIUS)
#for node in right_nodes:
#xright_weight = +(node.X + node.GetSolutionStepValue(RADIUS))*node.GetSolutionStepValue(RADIUS)
#right_counter = +node.GetSolutionStepValue(RADIUS)
#width_now = xright_weight/right_counter - xleft_weight/left_counter
#measured_poisson = ((width_now-width_ini)/width_ini)/strain
#graph_export_poisson.write(str(strain)+" "+str(measured_poisson)+'\n')
#-------------------------------------------------------------------------------------#
def GenerateGraphics(self):
## PROBLEM DATA
area = 0.000001 ### 1mm2
grad_p = 1 ## Pa/m
## Read Data
data_file_name0 = "test.grf"
data0 = loadtxt(data_file_name0)
strain = array(data0[:,0])
stress = array(data0[:,1])
data_file_name1 = "test.grf"
data1 = loadtxt(data_file_name1)
strain1 = array(data1[:,0])
stress1 = array(data1[:,1])
data_file_name2 = "test.grf"
data2 = loadtxt(data_file_name2)
strain2 = array(data2[:,0])
stress2 = array(data2[:,1])
# setting to be changed#############################3
set_mode = 'extralarge' # large; publishable; medium
legend_position = 'lower left'
##graph_name = ""
x_name = 'Axial Strain (%)'
y_name = 'Stress (MPa) - Load-axis'
####################################################################
####################################################################
clf()
plot_settings.set_mode(set_mode)
#plt.semilogx()
plot(strain, stress, 'k:s', strain1, stress1, 'r--v', strain2, stress2, 'b-.o',linewidth=1 )
legend(('test', 'test'), legend_position, numpoints=1,)
## bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
grid(True)
#insert name ######################################################
savedname = "stress_graph"
####################################################################
##graphtitle = graph_name
##title(graphtitle)
xlabel(x_name)
ylabel(y_name)
##xlim(0.0, 1.0)
##ylim(0.0, 1.0)
##savefig(savedname + '.eps')
savefig(savedname + '.png')
####################################################################
####################################################################
clf()
plot_settings.set_mode(set_mode)
#plt.semilogx()
plot(strain, stress, 'k:s', strain1, stress1, 'r--v',linewidth=2 )
legend(( 'IFT variation', 'Viscosity variation'), | |
import warnings
import numpy as np
import pandas as pd
import networkx as nx
import statsmodels.api as sm
def probability_to_odds(prob):
"""Converts given probability (proportion) to odds
Parameters
----------
prob : float, array
Probability or array of probabilities to convert to odds
"""
return prob / (1 - prob)
def odds_to_probability(odds):
"""Converts given odds to probability
Parameters
----------
odds : float, array
Odds or array of odds to convert to probabilities
"""
return odds / (1 + odds)
def exp_map(graph, var):
"""Slow implementation of the exposure mapping functionality. Only supports the sum summary measure.
Still used by the dgm files.
Note
----
Depreciated and no longer actively used by any functions.
Parameters
----------
graph : networkx.Graph
Network to calculate the summary measure for.
var : str
Variable in the graph to calculate the summary measure for
Returns
-------
array
One dimensional array of calculated summary measure
"""
# get adjacency matrix
matrix = nx.adjacency_matrix(graph, weight=None)
# get node attributes
y_vector = np.array(list(nx.get_node_attributes(graph, name=var).values()))
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # I hate converting between arrays and matrices...
def fast_exp_map(matrix, y_vector, measure):
r"""Improved (computation-speed-wise) implementation of the exposure mapping functionality. Further supports a
variety of summary measures. This is accomplished by using the adjacency matrix and vectors to efficiently
calculate the summary measures (hence the function name). This is an improvement on previous iterations of this
function.
Available summary measures are
Sum (``'sum'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij}
Mean (``'mean'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance (``'var'``):
.. math::
\bar{X}_j = \sum_{j=1}^n X_j \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n (X_j - \bar{X}_j)^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Mean distance (``'mean_dist'``) :
.. math::
X_i^s = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance distance (``'var_dist'``) :
.. math::
\bar{X}_{ij} = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n ((X_j - X_j) - \bar{X}_{ij})^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Note
----
If you would like other summary measures to be added or made available, please reach out via GitHub.
Parameters
----------
matrix : array
Adjacency matrix. Should be extract from a ``networkx.Graph`` via ``nx.adjacency_matrix(...)``
y_vector : array
Array of the variable to calculate the summary measure for. Should be in same order as ``matrix`` for
calculation to work as intended.
measure : str
Summary measure to calculate. Options are provided above.
Returns
-------
array
One dimensional array of calculated summary measure
"""
if measure.lower() == 'sum':
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'mean':
rowsum_vector = np.sum(matrix, axis=1) # calculate row-sum (denominator / degree)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
weight_matrix = matrix / rowsum_vector.reshape((matrix.shape[0]), 1) # calculate each nodes weight
wy_matrix = weight_matrix * y_vector.reshape((matrix.shape[0]), 1) # multiply matrix by node attributes
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'var':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(a * y_vector, axis=1)
elif measure.lower() == 'mean_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanmean(c.transpose(), # back-transpose
axis=1)
elif measure.lower() == 'var_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(c.transpose(), # back-transpose
axis=1)
else:
raise ValueError("The summary measure mapping" + str(measure) + "is not available")
def exp_map_individual(network, variable, max_degree):
"""Summary measure calculate for the non-parametric mapping approach described in Sofrygin & <NAME> (2017).
This approach works best for networks with uniform degree distributions. This summary measure generates a number
of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep
all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree.
Parameters
----------
network : networkx.Graph
The NetworkX graph object to calculate the summary measure for.
variable : str
Variable to calculate the summary measure for (this will always be the exposure variable internally).
max_degree : int
Maximum degree in the network (defines the number of columns to generate).
Returns
-------
dataframe
Data set containing all generated columns
"""
attrs = []
for i in network.nodes:
j_attrs = []
for j in network.neighbors(i):
j_attrs.append(network.nodes[j][variable])
attrs.append(j_attrs[:max_degree])
return pd.DataFrame(attrs,
columns=[variable+'_map'+str(x+1) for x in range(max_degree)])
def network_to_df(graph):
"""Take input network and converts all node attributes to a pandas DataFrame object. This dataframe is then used
within ``NetworkTMLE`` internally.
Parameters
----------
graph : networkx.Graph
Graph with node attributes to transform into data set
Returns
-------
dataframe
Data set containing all node attributes
"""
return pd.DataFrame.from_dict(dict(graph.nodes(data=True)), orient='index')
def bounding(ipw, bound):
"""Internal function to bound or truncate the estimated inverse probablity weights.
Parameters
----------
ipw : array
Estimate inverse probability weights to truncate.
bound : list, float, int, set, array
Bounds to truncate weights by.
Returns
-------
array
Truncated inverse probability weights.
"""
if type(bound) is float or type(bound) is int: # Symmetric bounding
if bound > 1:
ipw = np.where(ipw > bound, bound, ipw)
ipw = np.where(ipw < 1 / bound, 1 / bound, ipw)
elif 0 < bound < 1:
ipw = np.where(ipw < bound, bound, ipw)
ipw = np.where(ipw > 1 / bound, 1 / bound, ipw)
else:
raise ValueError('Bound must be a positive value')
elif type(bound) is str: # Catching string inputs
raise ValueError('Bounds must either be a float or integer, or a collection')
else: # Asymmetric bounds
if bound[0] > bound[1]:
raise ValueError('Bound thresholds must be listed in ascending order')
if len(bound) > 2:
warnings.warn('It looks like your specified bounds is more than two floats. Only the first two '
'specified bounds are used by the bound statement. So only ' +
str(bound[0:2]) + ' will be used', UserWarning)
if type(bound[0]) is str or type(bound[1]) is str:
raise ValueError('Bounds must be floats or integers')
if bound[0] < 0 or bound[1] < 0:
raise ValueError('Both bound values must be positive values')
ipw = np.where(ipw < bound[0], bound[0], ipw)
ipw = np.where(ipw > bound[1], bound[1], ipw)
return ipw
def outcome_learner_fitting(ml_model, xdata, ydata):
"""Internal function to fit custom_models for the outcome nuisance model.
Parameters
----------
ml_model :
Unfitted model to be fit.
xdata : array
Covariate data to fit the model with
ydata : array
Outcome data to fit the model with
Returns
-------
Fitted user-specified model
"""
try:
fm = ml_model.fit(X=xdata, y=ydata)
except TypeError:
raise TypeError("Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This "
"covers both sklearn and supylearner. If there is a predictive model you would "
"like to use, please open an issue at https://github.com/pzivich/zepid and I "
"can work on adding support")
return fm
def outcome_learner_predict(ml_model_fit, xdata):
"""Internal function to take a fitted custom_model for the outcome nuisance model and generate the predictions.
Parameters
----------
ml_model_fit :
Fitted user-specified model
xdata : array
Covariate data to generate the predictions with.
Returns
-------
array
Predicted values for the outcome (probability if binary, and expected value otherwise)
"""
if hasattr(ml_model_fit, 'predict_proba'):
g = ml_model_fit.predict_proba(xdata)
if g.ndim == 1: # allows support for pygam.LogisticGAM
return g
else:
return g[:, 1]
elif hasattr(ml_model_fit, 'predict'):
return ml_model_fit.predict(xdata)
else:
raise ValueError("Currently custom_model must have 'predict' or 'predict_proba' attribute")
def exposure_machine_learner(ml_model, xdata, ydata, pdata):
"""Internal function | |
and a subdir
svntest.main.run_svn(None, 'propset', 'red', 'rojo', D_path)
svntest.main.run_svn(None, 'propset', 'black', 'bobo', E_path)
svntest.main.run_svn(None, 'propset', 'black', 'bobo', wc_dir)
# Create expected output tree.
expected_output = svntest.wc.State(wc_dir, {
'A/D' : Item(verb='Sending'),
'A/B/E' : Item(verb='Sending'),
'' : Item(verb='Sending'),
})
# Created expected status tree.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D', wc_rev=2, status=' ')
expected_status.tweak('A/B/E', wc_rev=2, status=' ')
expected_status.tweak('', wc_rev=2, status=' ')
# Commit the working copy
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status,
None, wc_dir)
# Create expected trees for an update to revision 1.
expected_output = svntest.wc.State(wc_dir, {
'A/D' : Item(status=' U'),
'A/B/E' : Item(status=' U'),
'' : Item(status=' U'),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the update and check the results in three ways... INCLUDING PROPS
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None, 1,
'-r', '1', wc_dir)
# Can't use run_and_verify_status here because the out-of-date
# information in the status output isn't copied in the status tree.
common = " 1 1 jrandom "
expected = svntest.verify.UnorderedOutput(
[" " + common + os.path.join(E_path, 'alpha') + "\n",
" " + common + os.path.join(E_path, 'beta') + "\n",
" *" + common + os.path.join(E_path) + "\n",
" " + common + os.path.join(B_path, 'lambda') + "\n",
" " + common + os.path.join(B_path, 'F') + "\n",
" " + common + B_path + "\n",
" " + common + os.path.join(G_path, 'pi') + "\n",
" " + common + os.path.join(G_path, 'rho') + "\n",
" " + common + os.path.join(G_path, 'tau') + "\n",
" " + common + G_path + "\n",
" " + common + os.path.join(H_path, 'chi') + "\n",
" " + common + os.path.join(H_path, 'omega') + "\n",
" " + common + os.path.join(H_path, 'psi') + "\n",
" " + common + H_path + "\n",
" " + common + os.path.join(D_path, 'gamma') + "\n",
" *" + common + D_path + "\n",
" " + common + os.path.join(A_path, 'mu') + "\n",
" " + common + os.path.join(A_path, 'C') + "\n",
" " + common + A_path + "\n",
" " + common + os.path.join(wc_dir, 'iota') + "\n",
" *" + common + wc_dir + "\n",
"Status against revision: 2\n" ])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-uv", wc_dir)
#----------------------------------------------------------------------
# Test for issue #2468
@Issue(2468)
def status_nonrecursive_update(sbox):
"run 'status -uN' with incoming changes"
sbox.build()
wc_dir = sbox.wc_dir
A_path = os.path.join(wc_dir, 'A')
D_path = os.path.join(A_path, 'D')
mu_path = os.path.join(A_path, 'mu')
gamma_path = os.path.join(D_path, 'gamma')
# Change files in A and D and commit
svntest.main.file_append(mu_path, "new line of text")
svntest.main.file_append(gamma_path, "new line of text")
# Create expected trees for commit
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(verb='Sending'),
'A/D/gamma' : Item(verb='Sending')
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2, status=' ')
expected_status.tweak('A/D/gamma', wc_rev=2, status=' ')
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status,
None, wc_dir)
# Create expected trees for an update to revision 1.
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the update and check the results in three ways
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None, 0,
'-r', '1', wc_dir)
# Check the remote status of folder A (non-recursively)
xout = [" * 1 " + os.path.join(wc_dir, "A", "mu") + "\n",
"Status against revision: 2\n" ]
svntest.actions.run_and_verify_svn(None,
xout,
[],
"status", "-uN", A_path)
def change_files(wc_dir, files):
"""Make a basic change to the files.
files = a list of paths relative to the wc root directory
"""
for file in files:
filepath = os.path.join(wc_dir, file)
svntest.main.file_append(filepath, "new line of text")
def change_files_and_commit(wc_dir, files, baserev=1):
"""Make a basic change to the files and commit them.
files = a list of paths relative to the wc root directory
"""
change_files(wc_dir, files)
# Prepare expected trees for commit
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(verb='Sending'),
'A/D/gamma' : Item(verb='Sending')
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
commitrev = baserev + 1
for file in files:
expected_output.add({file : Item(verb='Sending')})
expected_status.tweak(file, wc_rev=commitrev, status=' ')
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status,
None, wc_dir)
def status_depth_local(sbox):
"run 'status --depth=X' with local changes"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
A_path = os.path.join(wc_dir, 'A')
D_path = os.path.join(A_path, 'D')
mu_path = os.path.join(A_path, 'mu')
gamma_path = os.path.join(D_path, 'gamma')
# make some changes to the greek tree
change_files(wc_dir, ['A/mu', 'A/D/gamma'])
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', A_path)
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', D_path)
# for all the possible types of depth, check the status
# depth=empty
expected = svntest.verify.UnorderedOutput(
[" M %s\n" % A_path])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "--depth=empty", A_path)
# depth=files
expected = svntest.verify.UnorderedOutput(
[" M %s\n" % A_path,
"M %s\n" % mu_path])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "--depth=files", A_path)
# depth=immediates
expected = svntest.verify.UnorderedOutput(
[" M %s\n" % A_path,
" M %s\n" % D_path,
"M %s\n" % mu_path])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "--depth=immediates", A_path)
# depth=infinity (the default)
expected = svntest.verify.UnorderedOutput(
[" M %s\n" % A_path,
" M %s\n" % D_path,
"M %s\n" % mu_path,
"M %s\n" % gamma_path])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "--depth=infinity", A_path)
def status_depth_update(sbox):
"run 'status --depth=X -u' with incoming changes"
sbox.build()
wc_dir = sbox.wc_dir
A_path = os.path.join(wc_dir, 'A')
D_path = os.path.join(A_path, 'D')
mu_path = os.path.join(A_path, 'mu')
gamma_path = os.path.join(D_path, 'gamma')
# add some files, change directory properties
change_files_and_commit(wc_dir, ['A/mu', 'A/D/gamma'])
svntest.main.run_svn(None, 'up', wc_dir)
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', A_path)
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', D_path)
svntest.main.run_svn(None, 'ci', '-m', 'log message', wc_dir)
# update to r1
svntest.main.run_svn(None, 'up', '-r', '1', wc_dir)
# for all the possible types of depth, check the status
# depth=empty
expected = svntest.verify.UnorderedOutput(
[" * 1 %s\n" % A_path,
"Status against revision: 3\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=empty", A_path)
# depth=files
expected = svntest.verify.UnorderedOutput(
[" * 1 %s\n" % mu_path,
" * 1 %s\n" % A_path,
"Status against revision: 3\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=files",
A_path)
# depth=immediates
expected = svntest.verify.UnorderedOutput(
[" * 1 %s\n" % A_path,
" * 1 %s\n" % D_path,
" * 1 %s\n" % mu_path,
"Status against revision: 3\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=immediates",
A_path)
# depth=infinity (the default)
expected = svntest.verify.UnorderedOutput(
[" * 1 %s\n" % A_path,
" * 1 %s\n" % D_path,
" * 1 %s\n" % mu_path,
" * 1 %s\n" % gamma_path,
"Status against revision: 3\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=infinity",
A_path)
#----------------------------------------------------------------------
def status_depth_update_local_modifications(sbox):
"run 'status --depth=X -u' with local changes"
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
D_path = os.path.join(A_path, 'D')
mu_path = os.path.join(A_path, 'mu')
gamma_path = os.path.join(D_path, 'gamma')
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', A_path)
svntest.main.run_svn(None, 'propset', 'svn:test', 'value', D_path)
svntest.main.file_append(mu_path, 'modified')
svntest.main.file_append(gamma_path, 'modified')
# depth=empty
expected = svntest.verify.UnorderedOutput(
[" M 1 %s\n" % A_path,
"Status against revision: 1\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=empty", A_path)
expected = svntest.verify.UnorderedOutput(
["M 1 %s\n" % mu_path,
"Status against revision: 1\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=empty", mu_path)
# depth=files
expected = svntest.verify.UnorderedOutput(
["M 1 %s\n" % mu_path,
" M 1 %s\n" % A_path,
"Status against revision: 1\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=files",
A_path)
# depth=immediates
expected = svntest.verify.UnorderedOutput(
[" M 1 %s\n" % A_path,
" M 1 %s\n" % D_path,
"M 1 %s\n" % mu_path,
"Status against revision: 1\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=immediates",
A_path)
# depth=infinity (the default)
expected = svntest.verify.UnorderedOutput(
[" M 1 %s\n" % A_path,
" M 1 %s\n" % D_path,
"M 1 %s\n" % mu_path,
"M 1 %s\n" % gamma_path,
"Status against revision: 1\n"])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "--depth=infinity",
A_path)
#----------------------------------------------------------------------
# Test for issue #2420
@Issue(2420)
def status_dash_u_deleted_directories(sbox):
"run 'status -u' with locally deleted directories"
sbox.build()
wc_dir = sbox.wc_dir
A_path = os.path.join(wc_dir, 'A')
B_path = os.path.join(A_path, 'B')
# delete the B directory
svntest.actions.run_and_verify_svn(None, None, [],
'rm', B_path)
# now run status -u on B and its children
was_cwd = os.getcwd()
os.chdir(A_path)
# check status -u of B
expected = svntest.verify.UnorderedOutput(
["D 1 %s\n" % "B",
"D 1 %s\n" % os.path.join("B", "lambda"),
"D 1 %s\n" % os.path.join("B", "E"),
"D 1 %s\n" % os.path.join("B", "E", "alpha"),
"D 1 %s\n" % os.path.join("B", "E", "beta"),
"D 1 %s\n" % os.path.join("B", "F"),
"Status against revision: 1\n" ])
svntest.actions.run_and_verify_svn(None,
expected,
[],
"status", "-u", "B")
# again, but now from inside B, should give the | |
import datetime
from functools import wraps
import json
import logging
import re
import signal
import sys
import argh
import flask
import gevent
import gevent.backdoor
from gevent.pywsgi import WSGIServer
import prometheus_client
import psycopg2
from psycopg2 import sql
import common
from common import database
from common.flask_stats import request_stats, after_request
import google.oauth2.id_token
import google.auth.transport.requests
psycopg2.extras.register_uuid()
app = flask.Flask('thrimshim')
app.after_request(after_request)
MAX_TITLE_LENGTH = 100 # Youtube only allows 100-character titles
MAX_DESCRIPTION_LENGTH = 5000 # Youtube only allows 5000-character descriptions
def cors(app):
"""WSGI middleware that sets CORS headers"""
HEADERS = [
("Access-Control-Allow-Credentials", "false"),
("Access-Control-Allow-Headers", "*"),
("Access-Control-Allow-Methods", "GET,POST,HEAD"),
("Access-Control-Allow-Origin", "*"),
("Access-Control-Max-Age", "86400"),
]
def handle(environ, start_response):
def _start_response(status, headers, exc_info=None):
headers += HEADERS
return start_response(status, headers, exc_info)
return app(environ, _start_response)
return handle
def authenticate(f):
"""Authenticate a token against the database.
Reference: https://developers.google.com/identity/sign-in/web/backend-auth"""
@wraps(f)
def auth_wrapper(*args, **kwargs):
if app.no_authentication:
return f(*args, editor='NOT_AUTH', **kwargs)
try:
userToken = flask.request.json['token']
except (KeyError, TypeError):
return 'User token required', 401
# check whether token is valid
try:
idinfo = google.oauth2.id_token.verify_oauth2_token(userToken, google.auth.transport.requests.Request(), None)
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise ValueError('Wrong issuer.')
except ValueError:
return 'Invalid token. Access denied.', 403
# check whether user is in the database
email = idinfo['email'].lower()
conn = app.db_manager.get_conn()
results = database.query(conn, """
SELECT email
FROM editors
WHERE lower(email) = %s""", email)
row = results.fetchone()
if row is None:
return 'Unknown user. Access denied.', 403
return f(*args, editor=email, **kwargs)
return auth_wrapper
@app.route('/thrimshim/auth-test', methods=['POST'])
@request_stats
@authenticate
def test(editor=None):
return json.dumps(editor)
# To make nginx proxying simpler, we want to allow /metrics/* to work
@app.route('/metrics/<trailing>')
@request_stats
def metrics_with_trailing(trailing):
"""Expose Prometheus metrics."""
return prometheus_client.generate_latest()
@app.route('/metrics')
@request_stats
def metrics():
"""Expose Prometheus metrics."""
return prometheus_client.generate_latest()
@app.route('/thrimshim')
@request_stats
def get_all_rows():
"""Gets all rows from the events table from the database"""
conn = app.db_manager.get_conn()
results = database.query(conn, """
SELECT *
FROM events
ORDER BY event_start
""")
rows = []
for row in results:
row = row._asdict()
row['id'] = str(row['id'])
row = {
key: (
value.isoformat() if isinstance(value, datetime.datetime)
else value
) for key, value in row.items()
}
rows.append(row)
logging.info('All rows fetched')
return json.dumps(rows)
@app.route('/thrimshim/defaults')
@request_stats
def get_defaults():
"""Get default info needed by thrimbletrimmer when not loading a specific row."""
return json.dumps({
"video_channel": app.default_channel,
"bustime_start": app.bustime_start,
"title_prefix": app.title_header,
"title_max_length": MAX_TITLE_LENGTH - len(app.title_header),
"upload_locations": app.upload_locations,
})
@app.route('/thrimshim/<uuid:ident>', methods=['GET'])
@request_stats
def get_row(ident):
"""Gets the row from the database with id == ident."""
conn = app.db_manager.get_conn()
results = database.query(conn, """
SELECT *
FROM events
WHERE id = %s
""", ident)
row = results.fetchone()
if row is None:
return 'Row id = {} not found'.format(ident), 404
assert row.id == ident
response = row._asdict()
response['id'] = str(response['id'])
if response["video_channel"] is None:
response["video_channel"] = app.default_channel
response["title_prefix"] = app.title_header
response["title_max_length"] = MAX_TITLE_LENGTH - len(app.title_header)
response["bustime_start"] = app.bustime_start
response["upload_locations"] = app.upload_locations
# remove any added headers or footers so round-tripping is a no-op
if (
app.title_header
and response["video_title"] is not None
and response["video_title"].startswith(app.title_header)
):
response["video_title"] = response["video_title"][len(app.title_header):]
if (
app.description_footer
and response["video_description"] is not None
and response["video_description"].endswith(app.description_footer)
):
response["video_description"] = response["video_description"][:-len(app.description_footer)]
logging.info('Row {} fetched'.format(ident))
def convert(value):
if isinstance(value, datetime.datetime):
return value.isoformat()
if isinstance(value, datetime.timedelta):
return value.total_seconds()
raise TypeError(f"Can't convert object of type {value.__class__.__name__} to JSON: {value}")
return json.dumps(response, default=convert)
@app.route('/thrimshim/<uuid:ident>', methods=['POST'])
@request_stats
@authenticate
def update_row(ident, editor=None):
"""Updates row of database with id = ident with the edit columns in new_row."""
new_row = flask.request.json
override_changes = new_row.get('override_changes', False)
state_columns = ['state', 'uploader', 'error', 'video_link']
# These have to be set before a video can be set as 'EDITED'
non_null_columns = [
'upload_location', 'video_ranges', 'video_transitions',
'video_channel', 'video_quality', 'video_title',
'video_description', 'video_tags',
]
edit_columns = non_null_columns + ['allow_holes', 'uploader_whitelist']
sheet_columns = [
'sheet_name', 'event_start', 'event_end',
'category', 'description', 'notes', 'tags',
]
# Check vital edit columns are in new_row
wanted = set(non_null_columns + ['state'] + sheet_columns)
missing = wanted - set(new_row)
if missing:
return 'Fields missing in JSON: {}'.format(', '.join(missing)), 400
# Get rid of irrelevant columns
extras = set(new_row) - set(edit_columns + state_columns + sheet_columns)
for extra in extras:
del new_row[extra]
# Include headers and footers
if 'video_title' in new_row:
new_row['video_title'] = app.title_header + new_row['video_title']
if 'video_description' in new_row:
new_row['video_description'] += app.description_footer
# Validate youtube requirements on title and description
if len(new_row['video_title']) > MAX_TITLE_LENGTH:
return 'Title must be {} characters or less, including prefix'.format(MAX_TITLE_LENGTH), 400
if len(new_row['video_description']) > MAX_DESCRIPTION_LENGTH:
return 'Description must be {} characters or less, including footer'.format(MAX_DESCRIPTION_LENGTH), 400
for char in ['<', '>']:
if char in new_row['video_title']:
return 'Title may not contain a {} character'.format(char), 400
if char in new_row['video_description']:
return 'Description may not contain a {} character'.format(char), 400
# Validate and convert video ranges and transitions.
num_ranges = len(new_row['video_ranges'])
if num_ranges == 0:
return 'Ranges must contain at least one range', 400
if len(new_row['video_transitions']) != num_ranges - 1:
return 'There must be exactly {} transitions for {} ranges'.format(
num_ranges - 1, num_ranges,
)
for start, end in new_row['video_ranges']:
if start > end:
return 'Range start must be less than end', 400
# We need these to be tuples not lists for psycopg2 to do the right thing,
# but since they come in as JSON they are currently lists.
new_row['video_ranges'] = [tuple(range) for range in new_row['video_ranges']]
new_row['video_transitions'] = [
None if transition is None else tuple(transition)
for transition in new_row['video_transitions']
]
conn = app.db_manager.get_conn()
# Check a row with id = ident is in the database
built_query = sql.SQL("""
SELECT id, state, {}
FROM events
WHERE id = %s
""").format(sql.SQL(', ').join(
sql.Identifier(key) for key in sheet_columns
))
results = database.query(conn, built_query, ident)
old_row = results.fetchone()._asdict()
if old_row is None:
return 'Row {} not found'.format(ident), 404
assert old_row['id'] == ident
if old_row['state'] not in ['UNEDITED', 'EDITED', 'CLAIMED']:
return 'Video already published', 403
# check whether row has been changed in the sheet since editing has begun
changes = ''
for column in sheet_columns:
if isinstance(old_row[column], datetime.datetime):
old_row[column] = old_row[column].isoformat()
def normalize(value):
if isinstance(value, list):
return sorted(map(normalize, value))
if value is None:
return None
return value.lower().strip()
if normalize(new_row[column]) != normalize(old_row[column]):
changes += '{}: {} => {}\n'.format(column, new_row[column], old_row[column])
if changes and not override_changes:
return 'Sheet columns have changed since editing has begun. Please review changes\n' + changes, 409
# handle state columns
if new_row['state'] == 'EDITED':
missing = []
for column in non_null_columns:
if new_row[column] is None:
missing.append(column)
if missing:
return 'Fields {} must be non-null for video to be cut'.format(', '.join(missing)), 400
if len(new_row.get('video_title', '')) <= len(app.title_header):
return 'Video title must not be blank', 400
if len(new_row.get('video_description', '')) <= len(app.description_footer):
return 'Video description must not be blank. If you have nothing else to say, just repeat the title.', 400
elif new_row['state'] != 'UNEDITED':
return 'Invalid state {}'.format(new_row['state']), 400
new_row['uploader'] = None
new_row['error'] = None
new_row['editor'] = editor
new_row['edit_time'] = datetime.datetime.utcnow()
# actually update database
build_query = sql.SQL("""
UPDATE events
SET {}
WHERE id = %(id)s
AND state IN ('UNEDITED', 'EDITED', 'CLAIMED')"""
).format(sql.SQL(", ").join(
sql.SQL("{} = {}").format(
sql.Identifier(column), database.get_column_placeholder(column),
) for column in new_row.keys() if column not in sheet_columns
))
result = database.query(conn, build_query, id=ident, **new_row)
if result.rowcount != 1:
return 'Video likely already published', 403
logging.info('Row {} updated to state {}'.format(ident, new_row['state']))
return ''
@app.route('/thrimshim/manual-link/<uuid:ident>', methods=['POST'])
@request_stats
@authenticate
def manual_link(ident, editor=None):
"""Manually set a video_link if the state is 'UNEDITED' or 'DONE' and the
upload_location is 'manual' or 'youtube-manual'."""
link = flask.request.json['link']
upload_location = flask.request.json.get('upload_location', 'manual')
if upload_location == 'youtube-manual':
YOUTUBE_URL_RE = r'^https?://(?:youtu\.be/|youtube.com/watch\?v=)([a-zA-Z0-9_-]{11})$'
match = re.match(YOUTUBE_URL_RE, link)
if not match:
return 'Link does not appear to be a youtube.com or youtu.be video link. Try removing any extra query params (after the video id).', 400
video_id, = match.groups()
elif upload_location == 'manual':
video_id = None
else:
return 'Upload location must be "manual" or "youtube-manual"', 400
conn = app.db_manager.get_conn()
results = database.query(conn, """
SELECT id, state
FROM events
WHERE id = %s""", ident)
old_row = results.fetchone()
if old_row is None:
return 'Row {} not found'.format(ident), 404
if old_row.state != 'UNEDITED':
return 'Invalid state {} for manual video link'.format(old_row.state), 403
now = datetime.datetime.utcnow()
results = database.query(conn, """
UPDATE events
SET state='DONE', upload_location = %s, video_link = %s, video_id = %s,
editor = %s, edit_time = %s, upload_time = %s
WHERE id = %s AND state = 'UNEDITED'
""", upload_location, link, video_id, editor, now, now, ident)
logging.info("Row {} video_link set to {}".format(ident, link))
return ''
@app.route('/thrimshim/reset/<uuid:ident>', methods=['POST'])
@request_stats
@authenticate
def reset_row(ident, editor=None):
"""Clear state and video_link columns and reset state to 'UNEDITED'.
If force is 'true', it will do so regardless of current state.
Otherwise, it will only do so if we know no video has been uploaded
(state is UNEDITED, EDITED or CLAIMED)
"""
force = (flask.request.args.get('force', '').lower() == "true")
conn = app.db_manager.get_conn()
query = """
UPDATE events
SET state='UNEDITED', error = NULL, video_id = NULL, video_link = NULL,
uploader = NULL, editor = NULL, edit_time = NULL, upload_time = NULL
WHERE id = %s {}
""".format(
"" if force else "AND state IN ('UNEDITED', 'EDITED', 'CLAIMED')",
)
results = database.query(conn, query, ident)
if results.rowcount != 1:
return 'Row id = {} not found or not in cancellable state'.format(ident), 404
logging.info("Row {} reset to 'UNEDITED'".format(ident))
return ''
@argh.arg('--host', help='Address or socket server will listen to. Default is 0.0.0.0 (everything on the local machine).')
@argh.arg('--port', help='Port server will listen on. Default is 8004.')
@argh.arg('connection-string', help='Postgres connection string, which is either a space-separated list of key=value pairs, or a URI like: postgresql://USER:PASSWORD@HOST/DBNAME?KEY=VALUE')
@argh.arg('default-channel', help='The default video_channel sent to the editor and assumed if not given on write')
@argh.arg('bustime-start', help='The start time in UTC for the event, for UTC-Bustime conversion')
@argh.arg('--backdoor-port', help='Port for gevent.backdoor access. By default disabled.')
@argh.arg('--no-authentication', help='Bypass authentication (act as though all calls are authenticated)')
@argh.arg('--title-header', help='A header to prefix all titles with, seperated from the submitted title by " - "')
@argh.arg('--description-footer', help='A footer to suffix all descriptions with, seperated from the submitted description by a blank line.')
@argh.arg('--upload-locations', help='A comma-seperated list of valid upload locations, to pass to thrimbletrimmer. The first is the default. Note this is NOT validated on write.')
def main(
connection_string, default_channel, bustime_start, host='0.0.0.0', port=8004, backdoor_port=0,
no_authentication=False, title_header=None, description_footer=None, upload_locations='',
):
server = WSGIServer((host, port), cors(app))
app.no_authentication = no_authentication
app.default_channel = default_channel
app.bustime_start = bustime_start
app.title_header = "" if title_header is None else "{} - ".format(title_header)
app.description_footer = | |
"""
DSC20 WI22 HW05
Name: <NAME>
PID: A16679845
"""
# begin helper methods
def ceil(x):
"""
Simulation to math.ceil
No doctest needed
"""
if int(x) != x:
return int(x) + 1
return int(x)
def log(x):
"""
Simulation to math.log with base e
No doctests needed
"""
n = 1e10
return n * ((x ** (1/n)) - 1)
# end helper methods
# Question1
def db_calc(dynamic, inst_mult):
"""
Given a musical dynamic abbreviation as a string and a
multiplier inst_mult for louder and softer instruments
as a float, compute the intial decibel level based on
distance from the instrument.
Parameters:
dynamic: Abbreviation of music dynamic.
inst_mult: Multiplier for louder/softer instruments.
Returns:
Function that computes intial decibel level of
instrument for a given distance.
>>> snare_1 = db_calc('ff', 1.2)
>>> snare_1(0)
126
>>> snare_1(10)
80
>>> snare_1(50)
48
>>> db_calc('loud', 1)(35)
Traceback (most recent call last):
...
AssertionError
>>> db_calc('pp', 1.200001)(50)
Traceback (most recent call last):
...
AssertionError
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> snare_2 = db_calc('p', 1.3)
Traceback (most recent call last):
...
AssertionError
>>> snare_3 = db_calc('pp', 1)
>>> snare_3(10)
0
>>> snare_4 = db_calc('pp', 'cha')
Traceback (most recent call last):
...
AssertionError
"""
assert isinstance(dynamic, str)
assert isinstance(inst_mult, (float, int))
assert (inst_mult >= .8) and (inst_mult <= 1.2)
db = {'pp': 30,
'p': 45,
'mp': 60,
'mf': 75,
'f': 90,
'ff': 105}
assert dynamic in db
db_init = db[dynamic] * inst_mult
def db_level(distance):
"""
Computes the observed decibel level given
a distance away from the instrument.
Parameters:
distance: Distance away from the instrument as an integer.
Returns:
Decibel level for given distance from instrument as an integer.
"""
assert isinstance(distance, int)
assert distance >= 0
if distance == 0:
return round(db_init)
level = db_init - 20 * log(distance)
if level < 0:
return 0
return round(level)
return db_level
# Question2
def next_move(file_names, decision):
"""
Takes in a filepath containing constestant names and decisions,
and a final decision to make. Returns a message for the
contestants whose decisions match the final decisions.
Parameters:
file_names: Path to file containing names and decisions.
decision: Final decision that determines which contestants
are sent messages.
Returns:
Function that creates message for contestants
that match the final decision.
>>> message_to_students = next_move("files/names.txt", "h")
>>> mess = message_to_students("Never give up!")
>>> print(mess)
Dear I!
Unfortunately, it is time to go home. Never give up!
>>> message_to_students = next_move("files/names.txt", "s")
>>> mess = message_to_students("San Diego, Earth.")
>>> print(mess)
Dear A, <NAME>!
We are happy to announce that you can move to the next round.
It will be held at San Diego, Earth.
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> message_2 = next_move('files/names2.txt', 'h')
>>> mess2 = message_2('It is all good.')
>>> print(mess2)
Dear <NAME>!
Unfortunately, it is time to go home. It is all good.
>>> message_2 = next_move('files/names2.txt', 's')
>>> mess2 = message_2('Yay!')
>>> print(mess2)
Dear Zhi!
We are happy to announce that you can move to the next round.
It will be held at Yay!
>>> mess2 = message_2('MOS114')
>>> print(mess2)
Dear Zhi!
We are happy to announce that you can move to the next round.
It will be held at MOS114
"""
f_name = 0
dec_index = 3
name_list = []
tmp = ''
with open(file_names, 'r') as f:
for line in f:
tmp = line.split(',')
if tmp[dec_index].strip().lower() == decision.lower():
name_list.append(tmp[f_name])
def final_message(message):
"""
Creates and returns a string final_message based
on an inputted message.
Parameters:
message: Custom message to send to participants
matching the decision.
Returns:
A predetermined string message along with the custom
message to send.
"""
output_str = 'Dear ' + ', '.join(name_list) + '!\n'
if decision == 's':
output_str += 'We are happy to announce that you can \
move to the next round.\nIt will be held at \
' + message
else:
output_str += 'Unfortunately, it is time to go home. ' + message
return output_str
return final_message
# Question3
def forge(filepath):
"""
Reads a given filepath containing names and votes
with votes being 1 and 0, and changes people's votes
in the file to make the majority vote what is desired.
Parameters:
filepath: Path to file containing names and votes.
Returns:
Function that forges votes in the file.
>>> forge('files/vote1.txt')(0)
>>> with open('files/vote1.txt', 'r') as outfile1:
... print(outfile1.read().strip())
Jerry,0
Larry,0
Colin,0
Scott,0
Jianming,0
Huaning,1
Amy,1
Elvy,1
>>> forge('files/vote2.txt')(0)
>>> with open('files/vote2.txt', 'r') as outfile2:
... print(outfile2.read().strip())
Jerry,0
Larry,0
Colin,0
Scott,1
Jianming,0
Huaning,1
Amy,1
Elvy,0
>>> forge('files/vote3.txt')(1)
>>> with open('files/vote3.txt', 'r') as outfile3:
... print(outfile3.read().strip())
Jerry,1
Larry,1
Colin,1
Scott,0
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> forge('files/vote4.txt')(1)
>>> with open('files/vote4.txt', 'r') as outfile4:
... print(outfile4.read().strip())
Will,1
Zhi,1
TL,1
DJ,0
Rj,0
RD,1
>>> forge('files/vote5.txt')(0)
>>> with open('files/vote5.txt', 'r') as outfile5:
... print(outfile5.read().strip())
Will,0
Zhi,0
TL,1
DJ,0
Rj,0
RD,1
>>> forge('files/vote6.txt')(1)
>>> with open('files/vote6.txt', 'r') as outfile6:
... print(outfile6.read().strip())
Will,1
"""
votes = {0: 0,
1: 0}
vote_index = 1
name_index = 0
with open(filepath, 'r') as f:
for line in f:
votes[int(line.split(',')[vote_index])] += 1
majority = int((votes[0] + votes[1]) / 2) + 1
def change_votes(wanted):
"""
Takes in a vote that is the desired result of the
voting process. Write to the file to make the wanted
vote the majority vote.
Parameters:
wanted: The desired majority in the voting process.
"""
votes_to_change = majority - votes[wanted]
new_votes = ''
with open(filepath, 'r') as f:
for line in f:
if votes_to_change > 0:
if int(line.split(',')[vote_index]) != int(wanted):
new_votes += line.split(',')[name_index] + \
',' + str(wanted) + '\n'
votes_to_change -= 1
else:
new_votes += line
else:
new_votes += line
with open(filepath, 'w') as f:
f.write(new_votes)
return change_votes
# Question4.1
def number_of_adults_1(lst, age = 18):
"""
Takes in a list of integers containing ages
and an age threshold, and returns the number of
adults needed to supervise people below the
age threshold. Each adult can supervise three people.
Parameters:
lst: List containing ages of people as integers.
age: Age threshold where people no longer need
supervision. Default value is 18.
Returns:
Number of adults needed to supervise people under
the age threshold.
>>> number_of_adults_1([1,2,3,4,5,6,7])
3
>>> number_of_adults_1([1,2,3,4,5,6,7], 5)
2
>>> number_of_adults_1([1,2,3,4,5,6,7], age = 2)
1
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> number_of_adults_1([18, 20, 19, 90])
0
>>> number_of_adults_1([1,2,3,4,5,6,7], 2)
1
>>> number_of_adults_1([])
0
"""
adults_per_kid = 3
return ceil(len([ages for ages in lst if ages < age]) / adults_per_kid)
# Question4.2
def number_of_adults_2(*args):
"""
Takes in positional arguments of integer ages,
and returns the number of adults needed to supervise
people below the age threshold which is 18. One adult
can supervise three people.
Parameters:
*args: Positional arguments that designate age.
Returns:
Number of adults needed to supervise people below
eighteen years old.
>>> number_of_adults_2(1,2,3,4,5,6,7)
3
>>> number_of_adults_2(10,20,13,4)
1
>>> number_of_adults_2(19, 20)
0
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> number_of_adults_2(1,2,3,4,5,6,7,8,9,10,19)
4
>>> number_of_adults_2(10)
1
>>> number_of_adults_2(0)
1
"""
adults_per_kid = 3
age_threshold = 18
return ceil(len([ages for ages in args \
if ages < age_threshold]) / adults_per_kid)
# Question4.3
def number_of_adults_3(*args, age = 18):
"""
Takes in positional arguments of integer ages,
and returns the number of adults needed to supervise
people below the given age threshold. One adult
can supervise three people.
Parameters:
*args: Positional arguments that designate age.
age: Age threshold where people no longer need
supervision. Default value is 18.
Returns:
Number of adults needed to supervise people below
age threshold.
>>> number_of_adults_3(1,2,3,4,5,6,7)
3
>>> number_of_adults_3(1,2,3,4,5,6,7, age = 5)
2
>>> number_of_adults_3(1,2,3,4,5,6,7, age = 2)
1
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> number_of_adults_3(19,19,20,20,31)
0
>>> number_of_adults_3(1,2,3,4,5,6,7, 5)
3
>>> number_of_adults_3(19,20,21, age = 42)
1
"""
adults_per_kid = 3
return ceil(len([ages for ages in args if ages < age]) / adults_per_kid)
# Question5
def school_trip(age_limit, **kwargs):
"""
Given a set of keyword arguments with key | |
"""<div class="bc-white padding302020">%s</div>"""
IMAGE_VIEW_TEMPLATE = """
<div class="marginT30 marginB10 text-center"><img src="%s" class="img-responsive" style="display:inline"></div>
"""
VIDEO_VIEW_TEMPLATE = """
<div class="marginT30 marginB10 text-center"><video src="%s" preload="auto" autoplay controls class="img-responsive" style="display:inline"></video></div>
"""
AUDIO_VIEW_TEMPLATE = """
<div class="marginT30 marginB10 text-center"><audio src="%s" preload="auto" autoplay controls style="display:inline; width:%s;"></audio><h5 class="marginB10">%s</h5></div>
"""
DOCUMENT_VIEW_TEMPLATE = """
<iframe src="%s" id="iframe" allowfullscreen>
</iframe>
"""
YOUTUBE_TEMPLATE = """
<iframe src="%s" id="iframe" allowfullscreen>
</iframe>
"""
SLIDESHARE_TEMPLATE = """
%s
"""
TED_TALK_TEMPLATE = """
<iframe src="https://embed-ssl.ted.com/talks/lang/%s/%s" id="iframe" allowfullscreen></iframe>
"""
IPYNB_TEMPLATE = """
<iframe src="%s://%s/serve_ipynb_url/?url=%s" id="iframe" allowfullscreen>
</iframe>
"""
def oer_view(request, oer_id, oer=None):
protocol = request.is_secure() and 'https' or 'http'
if not oer:
oer_id = int(oer_id)
oer = get_object_or_404(OER, pk=oer_id)
elif not oer_id:
oer_id = oer.id
user = request.user
if not oer.can_access(user):
raise PermissionDenied
language = request.LANGUAGE_CODE
var_dict = { 'oer': oer, }
# var_dict['oer_url'] = oer.url
var_dict['is_published'] = oer.state == PUBLISHED
var_dict['is_un_published'] = un_published = oer.state == UN_PUBLISHED
if user.is_authenticated:
profile = user.get_profile()
add_bookmarked = oer.state == PUBLISHED and profile and profile.get_completeness()
else:
add_bookmarked = None
if add_bookmarked and request.GET.get('copy', ''):
bookmarked_oers = get_clipboard(request, key='bookmarked_oers') or []
if not oer_id in bookmarked_oers:
set_clipboard(request, key='bookmarked_oers', value=bookmarked_oers+[oer_id])
var_dict['add_bookmarked'] = add_bookmarked
var_dict['in_bookmarked_oers'] = oer_id in (get_clipboard(request, key='bookmarked_oers') or [])
var_dict['can_evaluate'] = oer.can_evaluate(request.user)
var_dict['can_republish'] = oer.can_republish(user)
var_dict['evaluations'] = oer.get_evaluations()
var_dict['oer_url'] = url = oer.url
youtube = url and (url.count('youtube.com') or url.count('youtu.be')) and url or ''
ted_talk = url and url.count('www.ted.com/talks/') and url or ''
reference = oer.reference
slideshare = reference and reference.count('slideshare.net') and reference.count('<iframe') and reference or ''
ipynb = url and url.endswith('ipynb')
oer_text = oer.get_text()
if oer_text: # 190919 GT added
var_dict['text_view'] = TEXT_VIEW_TEMPLATE % oer_text # 190919 GT added
elif youtube:
if youtube.count('embed'):
pass
elif youtube.count('youtu.be/'):
youtube = protocol + '://www.youtube.com/embed/%s' % youtube[youtube.index('youtu.be/')+9:]
elif youtube.count('watch?v='):
youtube = protocol + '://www.youtube.com/embed/%s' % youtube[youtube.index('watch?v=')+8:]
youtube += '?autoplay=1'
youtube = YOUTUBE_TEMPLATE % youtube
var_dict['youtube'] = youtube
elif ted_talk:
if ted_talk.count('?'):
ted_talk = url[ted_talk.index('www.ted.com/talks/')+18:ted_talk.index('?')]
else:
ted_talk = url[ted_talk.index('www.ted.com/talks/')+18:]
ted_talk = TED_TALK_TEMPLATE % (language, ted_talk)
var_dict['ted_talk'] = ted_talk
elif slideshare:
slideshare = SLIDESHARE_TEMPLATE % slideshare
var_dict['slideshare'] = slideshare
elif ipynb:
domain = request.META['HTTP_HOST']
ipynb = IPYNB_TEMPLATE % (protocol, domain, url)
var_dict['ipynb'] = ipynb
else:
var_dict['x_frame_protection'] = x_frame_protection(url)
var_dict['embed_code'] = oer.embed_code
return render(request, 'oer_view.html', var_dict)
def oer_view_by_slug(request, oer_slug):
# oer = OER.objects.get(slug=oer_slug)
oer = get_object_or_404(OER, slug=oer_slug)
return oer_view(request, oer.id, oer)
def oer_detail(request, oer_id, oer=None):
protocol = request.is_secure() and 'https' or 'http'
if not oer:
oer_id = int(oer_id)
oer = get_object_or_404(OER, pk=oer_id)
elif not oer_id:
oer_id = oer.id
user = request.user
if not oer.can_access(user):
raise PermissionDenied
var_dict = { 'oer': oer, }
if oer.small_image:
image= protocol + '://%s%s%s' % (request.META['HTTP_HOST'], settings.MEDIA_URL, oer.small_image)
else:
image = ''
var_dict['meta'] = {
'description':oer.description,
'og:title': oer.title,
'og:description': oer.description,
'og:type': 'article',
'og:url': request.build_absolute_uri,
'og:image': image,
}
var_dict['object'] = oer
var_dict['can_comment'] = oer.can_comment(request)
var_dict['type'] = OER_TYPE_DICT[oer.oer_type]
var_dict['is_published'] = is_published = oer.state == PUBLISHED
var_dict['is_un_published'] = is_un_published = oer.state == UN_PUBLISHED
if user.is_authenticated:
profile = user.get_profile()
completed_profile = profile and profile.get_completeness()
add_bookmarked = is_published and profile and profile.get_completeness()
else:
completed_profile = False
add_bookmarked = None
if add_bookmarked and request.GET.get('copy', ''):
bookmarked_oers = get_clipboard(request, key='bookmarked_oers') or []
if not oer_id in bookmarked_oers:
set_clipboard(request, key='bookmarked_oers', value=bookmarked_oers+[oer_id])
var_dict['add_bookmarked'] = add_bookmarked
var_dict['in_bookmarked_oers'] = in_bookmarked_oers = oer_id in (get_clipboard(request, key='bookmarked_oers') or [])
# var_dict['can_edit'] = can_edit = oer.can_edit(user)
var_dict['can_edit'] = can_edit = oer.can_edit(request)
var_dict['can_translate'] = oer.can_translate(request)
current_language = get_current_language()
var_dict['current_language_name'] = dict(settings.LANGUAGES).get(current_language, _('unknown'))
var_dict['language_mismatch'] = oer.original_language and not oer.original_language==current_language
var_dict['can_delete'] = can_delete = oer.can_delete(user)
var_dict['can_remove'] = can_delete and oer.state == DRAFT
if can_delete and request.GET.get('cut', ''):
cut_oers = get_clipboard(request, key='cut_oers') or []
if not oer_id in cut_oers:
set_clipboard(request, key='cut_oers', value=cut_oers+[oer_id])
var_dict['in_cut_oers'] = in_cut_oers = oer_id in (get_clipboard(request, key='cut_oers') or [])
var_dict['can_submit'] = oer.can_submit(request)
var_dict['can_withdraw'] = oer.can_withdraw(request)
var_dict['can_reject'] = oer.can_reject(request)
var_dict['can_publish'] = oer.can_publish(request)
var_dict['can_un_publish'] = oer.can_un_publish(request)
var_dict['can_republish'] = can_republish = oer.can_republish(user)
var_dict['can_evaluate'] = can_evaluate = oer.can_evaluate(user)
var_dict['completed_profile'] = completed_profile
var_dict['can_less_action'] = can_edit or can_delete or (add_bookmarked and not in_bookmarked_oers) or (can_delete and not in_cut_oers)
if can_edit:
var_dict['form'] = DocumentUploadForm()
var_dict['exts_file_attachment'] = settings.EXTS_FILE_ATTACHMENT
var_dict['size_file_attachment'] = settings.SIZE_FILE_ATTACHMENT
var_dict['plus_size'] = settings.PLUS_SIZE
var_dict['sub_exts'] = settings.SUB_EXTS
var_dict['evaluations'] = oer.get_evaluations()
var_dict['user_evaluation'] = user.id != None and oer.get_evaluations(user)
var_dict['lps'] = [lp for lp in oer.get_referring_lps() if lp.state==PUBLISHED or lp.can_edit(request)]
var_dict['can_toggle_comments'] = user.is_superuser or oer.creator==user or oer.project.is_admin(user)
var_dict['view_comments'] = is_published or (is_un_published and can_republish)
var_dict['oer_url'] = oer.url # 190919 GT added
if oer.get_text(): # 190919 GT added
var_dict['oer_url'] = "/oer/{}/view/".format(oer.slug)
if user.is_authenticated:
if oer.state == PUBLISHED and not user == oer.creator:
track_action(request, user, 'View', oer, target=oer.project)
return render(request, 'oer_detail.html', var_dict)
def oer_detail_by_slug(request, oer_slug):
oer = get_object_or_404(OER, slug=oer_slug)
return oer_detail(request, oer.id, oer)
def oer_edit(request, oer_id=None, project_id=None):
user = request.user
oer = None
# 20190130 MMR action = '/oer/edit/'
if oer_id:
oer = get_object_or_404(OER, pk=oer_id)
if not oer.can_access(user):
raise PermissionDenied
action = '/oer/%s/edit/' % oer.slug
current_project = get_object_or_404(Project, id=oer.project_id)
proj_name = current_project.name
if not oer.can_edit(request):
return HttpResponseRedirect('/oer/%s/' % oer.slug)
if project_id:
current_project = get_object_or_404(Project, id=project_id)
proj_name = current_project.name
action = '/project/%s/oer_new/' % project_id
if request.POST:
oer_id = request.POST.get('id', '')
if oer_id:
oer = get_object_or_404(OER, id=oer_id)
action = '/oer/%s/edit/' % oer.slug
project_id = oer.project_id
proj_name = oer.project
if project_id:
current_project = get_object_or_404(Project, id=project_id)
proj_name = current_project.name
else:
current_project = None
form = OerForm(request.POST, instance=oer)
metadata_formset = OerMetadataFormSet(request.POST, instance=oer)
if request.POST.get('save', '') or request.POST.get('continue', ''):
if form.is_valid():
oer = form.save(commit=False)
oer.editor = user
set_original_language(oer)
oer.save()
form.save_m2m()
n = len(metadata_formset)
for i in range(n):
if request.POST.get('metadata_set-%d-DELETE' % i, None):
metadatum_id = request.POST.get('metadata_set-%d-id' % i, None)
if metadatum_id:
metadatum = OerMetadata.objects.get(id=metadatum_id)
metadatum.delete()
metadata_form = metadata_formset[i]
if metadata_form.is_valid():
try:
metadata_form.save()
except:
pass
if oer_id:
track_action(request, request.user, 'Edit', oer, target=oer.project)
else:
track_action(request, request.user, 'Create', oer, target=oer.project)
action = '/oer/%s/edit/' % oer.slug
if request.POST.get('save', ''):
return HttpResponseRedirect('/oer/%s/' % oer.slug)
else:
print (form.errors)
print (metadata_formset.errors)
if oer_id or oer:
go_caller = '/oer/%s/' % oer.slug
elif project_id:
go_caller = '/project/%s/' % current_project.slug
else:
go_caller = '#'
return render(request, 'oer_edit.html', {'form': form, 'metadata_formset': metadata_formset, 'oer': oer, 'action': action, 'proj_name':proj_name, 'go_caller': go_caller})
elif request.POST.get('cancel', ''):
if oer:
return HttpResponseRedirect('/oer/%s/' % oer.slug)
else:
project_id = project_id or request.POST.get('project')
project = get_object_or_404(Project, id=project_id)
return HttpResponseRedirect('/project/%s/' % project.slug)
elif oer:
form = OerForm(instance=oer)
metadata_formset = OerMetadataFormSet(instance=oer)
else:
form = OerForm(initial={'project': project_id, 'creator': user.id, 'editor': user.id, 'oer_type': 2, 'source_type': 2, 'state': DRAFT,})
metadata_formset = OerMetadataFormSet()
data_dict = {'form': form, 'metadata_formset': metadata_formset, 'oer': oer, 'object': oer}
current_language = get_current_language()
if project_id:
current_project = get_object_or_404(Project, id=project_id)
data_dict['proj_name'] = current_project.name
else:
current_project = None
data_dict['proj_name'] = proj_name
data_dict['current_language_name'] = dict(settings.LANGUAGES).get(current_language, _('unknown'))
data_dict['language_mismatch'] = oer and oer.original_language and not oer.original_language==current_language or False
if oer_id:
data_dict['action'] = action
data_dict['go_caller'] = '/oer/%s/' % oer.slug
elif project_id:
data_dict['go_caller'] = '/project/%s/' % current_project.slug
else:
data_dict['go_caller'] = '#'
return render(request, 'oer_edit.html', data_dict)
def oer_edit_by_slug(request, oer_slug):
oer = get_object_or_404(OER, slug=oer_slug)
return oer_edit(request, oer_id=oer.id)
def oer_screenshot_upload(request, oer_slug):
user = request.user
oer = get_object_or_404(OER, slug=oer_slug)
action = '/oer/'+oer_slug+'/upload/screenshot/'
if oer:
if not oer.can_access(user):
raise PermissionDenied
if request.POST:
if request.POST.get('cancel', ''):
return HttpResponseRedirect('/oer/%s/' % oer.slug)
else:
if request.POST.get('remove','') == '1':
oer.small_image = ''
oer.editor = user
oer.save()
return HttpResponseRedirect('/oer/%s/' % oer.slug)
else:
if request.FILES:
form = OerScreenshotForm(request.POST,request.FILES, instance=oer)
if form.is_valid():
oer = form.save(commit=False)
oer.editor = user
oer.save()
return HttpResponseRedirect('/oer/%s/' % oer.slug)
else:
print (form.errors)
else:
form = OerScreenshotForm(instance=oer)
return render(request, 'oer_screenshot_upload.html', {'form': form, 'action': action, 'oer': oer, })
else:
if oer.can_edit(request):
form = OerScreenshotForm(instance=oer)
return render(request, 'oer_screenshot_upload.html', {'form': form, 'action': action, 'oer': oer, })
else:
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_submit(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
oer.submit(request)
track_action(request, request.user, 'Submit', oer, target=oer.project)
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_withdraw(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
oer.withdraw(request)
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_reject(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
oer.reject(request)
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_publish(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
oer.publish(request)
track_action(request, request.user, 'Approve', oer, target=oer.project)
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_un_publish(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
oer.un_publish(request)
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_delete(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
project = oer.project
oer.oer_delete(request)
if project:
return HttpResponseRedirect('/project/%s/' % project.slug)
else:
return my_home(request)
def oer_toggle_comments(request, oer_id):
oer = OER.objects.get(pk=oer_id)
if not oer.can_access(request.user):
raise PermissionDenied
if oer.comment_enabled:
oer.disable_comments()
else:
oer.enable_comments()
return HttpResponseRedirect('/oer/%s/' % oer.slug)
def oer_evaluations(request, oer_slug):
oer = get_object_or_404(OER, slug=oer_slug)
user = request.user
var_dict={'oer': oer,}
var_dict['evaluations']=oer.get_evaluations()
return | |
blocked by filter.
"""
my_event_callback = Mock()
protocol = SBE16Protocol(Prompt, NEWLINE, my_event_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(driver_capabilities, protocol._filter_capabilities(test_capabilities))
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class Sbe16plusIntegrationTestCase(InstrumentDriverIntegrationTestCase, SeaBird16plusMixin):
"""
Integration tests for the sbe16 driver. This class tests and shows
use patterns for the sbe16 driver as a zmq driver process.
"""
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_set_clock(self, time_param, time_override=None, time_format = "%d %b %Y %H:%M:%S", tolerance=DEFAULT_CLOCK_DIFF):
"""
Verify that we can set the clock
@param time_param: driver parameter
@param time_override: use this time instead of current time.
@param time_format: date time format
@param tolerance: how close to the set time should the get be?
"""
# Some seabirds tick the clock the instant you set it. So you set
# time 1, the get would be time 2. Others do it correctly and wait
# for a second before ticking. Hence the default tolerance of 1.
if time_override is None:
set_time = get_timestamp_delayed(time_format)
else:
set_time = time.strftime(time_format, time.localtime(time_override))
self.assert_set(time_param, set_time, no_get=True, startup=True)
self.assertTrue(self._is_time_set(time_param, set_time, time_format, tolerance))
def _is_time_set(self, time_param, expected_time, time_format = "%d %b %Y %H:%M:%S", tolerance=DEFAULT_CLOCK_DIFF):
"""
Verify is what we expect it to be within a given tolerance
@param time_param: driver parameter
@param expected_time: what the time should be in seconds since unix epoch or formatted time string
@param time_format: date time format
@param tolerance: how close to the set time should the get be?
"""
log.debug("Expected time unformatted: %s", expected_time)
result_time = self.assert_get(time_param)
result_time_struct = time.strptime(result_time, time_format)
converted_time = timegm_to_float(result_time_struct)
if isinstance(expected_time, float):
expected_time_struct = time.localtime(expected_time)
else:
expected_time_struct = time.strptime(expected_time, time_format)
log.debug("Current Time: %s, Expected Time: %s", time.strftime("%d %b %y %H:%M:%S", result_time_struct),
time.strftime("%d %b %y %H:%M:%S", expected_time_struct))
log.debug("Current Time: %s, Expected Time: %s, Tolerance: %s",
converted_time, timegm_to_float(expected_time_struct), tolerance)
# Verify the clock is set within the tolerance
return abs(converted_time - timegm_to_float(expected_time_struct)) <= tolerance
def assert_clock_set(self, time_param, sync_clock_cmd = DriverEvent.ACQUIRE_STATUS, timeout = 60, tolerance=DEFAULT_CLOCK_DIFF):
"""
Verify the clock is set to at least the current date
"""
log.debug("verify clock is set to the current time")
timeout_time = time.time() + timeout
while not self._is_time_set(time_param, timegm_to_float(time.gmtime()), tolerance=tolerance):
log.debug("time isn't current. sleep for a bit")
# Run acquire status command to set clock parameter
self.assert_driver_command(sync_clock_cmd)
log.debug("T: %s T: %s", time.time(), timeout_time)
time.sleep(5)
self.assertLess(time.time(), timeout_time, msg="Timeout waiting for clock sync event")
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)
self.assert_driver_parameters(reply, True)
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
# Verify we can set all parameters in bulk
new_values = {
Parameter.INTERVAL: 20,
Parameter.PUMP_MODE: 0,
Parameter.NCYCLES: 6
}
self.assert_set_bulk(new_values)
# Pump Mode
# x=0: No pump.
# x=1: Run pump for 0.5 sec before each sample.
# x=2: Run pump during each sample.
self.assert_set(Parameter.PUMP_MODE, 0)
self.assert_set(Parameter.PUMP_MODE, 1)
self.assert_set(Parameter.PUMP_MODE, 2)
self.assert_set_exception(Parameter.PUMP_MODE, -1)
self.assert_set_exception(Parameter.PUMP_MODE, 3)
self.assert_set_exception(Parameter.PUMP_MODE, 'bad')
# NCYCLE Range 1 - 100
self.assert_set(Parameter.NCYCLES, 1)
self.assert_set(Parameter.NCYCLES, 100)
self.assert_set_exception(Parameter.NCYCLES, 0)
self.assert_set_exception(Parameter.NCYCLES, 101)
self.assert_set_exception(Parameter.NCYCLES, -1)
self.assert_set_exception(Parameter.NCYCLES, 0.1)
self.assert_set_exception(Parameter.NCYCLES, 'bad')
# SampleInterval Range 10 - 14,400
self.assert_set(Parameter.INTERVAL, 10)
self.assert_set(Parameter.INTERVAL, 14400)
self.assert_set_exception(Parameter.INTERVAL, 9)
self.assert_set_exception(Parameter.INTERVAL, 14401)
self.assert_set_exception(Parameter.INTERVAL, -1)
self.assert_set_exception(Parameter.INTERVAL, 0.1)
self.assert_set_exception(Parameter.INTERVAL, 'bad')
# Read only parameters
self.assert_set_readonly(Parameter.ECHO, False)
self.assert_set_readonly(Parameter.OUTPUT_EXEC_TAG, False)
self.assert_set_readonly(Parameter.TXREALTIME, False)
self.assert_set_readonly(Parameter.BIOWIPER, False)
self.assert_set_readonly(Parameter.PTYPE, 1)
self.assert_set_readonly(Parameter.VOLT0, False)
self.assert_set_readonly(Parameter.VOLT1, False)
self.assert_set_readonly(Parameter.VOLT2, False)
self.assert_set_readonly(Parameter.VOLT3, False)
self.assert_set_readonly(Parameter.VOLT4, False)
self.assert_set_readonly(Parameter.VOLT5, False)
self.assert_set_readonly(Parameter.DELAY_BEFORE_SAMPLE, 1)
self.assert_set_readonly(Parameter.DELAY_AFTER_SAMPLE, 1)
self.assert_set_readonly(Parameter.SBE63, False)
self.assert_set_readonly(Parameter.SBE38, False)
self.assert_set_readonly(Parameter.SBE50, False)
self.assert_set_readonly(Parameter.WETLABS, False)
self.assert_set_readonly(Parameter.GTD, False)
self.assert_set_readonly(Parameter.OPTODE, False)
self.assert_set_readonly(Parameter.SYNCMODE, False)
self.assert_set_readonly(Parameter.SYNCWAIT, 1)
self.assert_set_readonly(Parameter.OUTPUT_FORMAT, 1)
self.assert_set_readonly(Parameter.LOGGING, False)
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
"""
# Explicitly verify these values after discover. They should match
# what the startup values should be
get_values = {
Parameter.INTERVAL: 10,
Parameter.PUMP_MODE: 2,
Parameter.NCYCLES: 4
}
# Change the values of these parameters to something before the
# driver is reinitalized. They should be blown away on reinit.
new_values = {
Parameter.INTERVAL: 20,
Parameter.PUMP_MODE: 0,
Parameter.NCYCLES: 6
}
self.assert_initialize_driver()
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
# Start autosample and try again
self.assert_set_bulk(new_values)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_startup_parameters(self.assert_driver_parameters)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
####
# Test in streaming mode
####
# Put us in streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
def test_autosample(self):
"""
Verify that we can enter streaming and that all particles are produced
properly.
Because we have to test for three different data particles we can't use
the common assert_sample_autosample method
"""
self.assert_initialize_driver()
self.assert_set(Parameter.INTERVAL, 10)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, timeout=60)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
def test_polled(self):
"""
Test that we can generate particles with commands
"""
self.assert_initialize_driver()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.CTD_PARSED, self.assert_particle_sample)
###
# Test scheduled events
###
def assert_calibration_coefficients(self):
"""
Verify a calibration particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain, timeout=120)
def assert_acquire_status(self):
"""
Verify a status particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.DEVICE_STATUS, self.assert_particle_status, timeout=120)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=120)
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_device_status_autosample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status,
autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
def test_scheduled_clock_sync_command(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
timeout = 120
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout)
self.assert_current_state(ProtocolState.COMMAND)
# Set the clock to some time in the past
# Need an easy way to do this now that DATE_TIME is read only
#self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH)
# Check the clock until it is set correctly (by a scheduled event)
#self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout)
def test_scheduled_clock_sync_autosample(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
timeout = 240
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout)
self.assert_current_state(ProtocolState.COMMAND)
# Set the clock to some time in the past
# Need an easy way to do this now that DATE_TIME is read only
#self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
# Check the clock until it is set correctly (by a scheduled event)
#self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout, tolerance=10)
def assert_cycle(self):
self.assert_current_state(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, particle_count = 6, timeout=60)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
self.assert_current_state(ProtocolState.COMMAND)
def test_discover(self):
"""
Verify we can discover from both command and auto sample modes
"""
self.assert_initialize_driver()
self.assert_cycle()
self.assert_cycle()
def test_metadata(self):
metadata = self.driver_client.cmd_dvr('get_config_metadata')
self.assertEqual(metadata, None) # must be connected
self.assert_initialize_driver()
metadata = self.driver_client.cmd_dvr('get_config_metadata')
log.debug("Metadata: %s", metadata)
self.assertTrue(isinstance(metadata, str))
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class Sbe16plusQualTestCase(InstrumentDriverQualificationTestCase, SeaBird16plusMixin):
"""Qualification Test Container"""
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.INTERVAL, 10)
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
# Restart autosample and gather a couple samples
self.assert_sample_autosample(self.assert_particle_sample, DataParticleType.CTD_PARSED)
def assert_cycle(self):
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20)
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
def test_cycle(self):
"""
Verify we can bounce between command and streaming. We try it a few times to see if we can find a timeout.
"""
self.assert_enter_command_mode()
self.assert_cycle()
self.assert_cycle()
self.assert_cycle()
self.assert_cycle()
def test_poll(self):
"""
Verify that we can poll for a sample. Take sample for this instrument
Also poll for other engineering data streams.
"""
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sample, DataParticleType.CTD_PARSED, sample_count=1)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly supports direct access | |
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dashboard_info_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params or
params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `get_dashboard_info_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dashboard_id' in params:
path_params['dashboardId'] = params['dashboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/info/{dashboardId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_dashboards_using_get(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_dashboards_using_get(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, **kwargs) # noqa: E501
return data
def get_edge_dashboards_using_get_with_http_info(self, edge_id, page_size, page, **kwargs): # noqa: E501
"""getEdgeDashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_dashboards_using_get_with_http_info(edge_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: edgeId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:param int start_time: startTime
:param int end_time: endTime
:return: PageDataDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_dashboards_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_edge_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_edge_dashboards_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_edge_dashboards_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'start_time' in params:
query_params.append(('startTime', params['start_time'])) # noqa: E501
if 'end_time' in params:
query_params.append(('endTime', params['end_time'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/dashboards{?textSearch,sortProperty,sortOrder,startTime,endTime,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_home_dashboard_info_using_get(self, **kwargs): # noqa: E501
"""getHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_info_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_home_dashboard_info_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_home_dashboard_info_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getHomeDashboardInfo # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_info_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboardInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_home_dashboard_info_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/home/info', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HomeDashboardInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_home_dashboard_using_get(self, **kwargs): # noqa: E501
"""getHomeDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_home_dashboard_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_home_dashboard_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_home_dashboard_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getHomeDashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_home_dashboard_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HomeDashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_home_dashboard_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/dashboard/home', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HomeDashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_max_datapoints_limit_using_get(self, **kwargs): # noqa: E501
"""getMaxDatapointsLimit # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_max_datapoints_limit_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_max_datapoints_limit_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_max_datapoints_limit_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_max_datapoints_limit_using_get_with_http_info(self, **kwargs): # noqa: E501
"""getMaxDatapointsLimit # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_max_datapoints_limit_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_max_datapoints_limit_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = | |
OOooOOo . Oo0Ooo + Oo0Ooo % Oo0Ooo % O0
if 8 - 8: iII111i . Ii1I - i1IIi % OoO0O00 / I11i
if 13 - 13: Oo0Ooo / OoOoOO00 . I1ii11iIi11i . OOooOOo
ooooO0OO0O = 0
for IiIIi1IiiIiI in range ( 0 , IiI11 , 8 ) :
ii1i1I1111ii = byte_swap_64 ( struct . unpack ( "Q" , packet [ IiIIi1IiiIiI : IiIIi1IiiIiI + 8 ] ) [ 0 ] )
ooooO0OO0O <<= 64
ooooO0OO0O |= ii1i1I1111ii
if 31 - 31: o0oOOo0O0Ooo
self . remote_public_key = ooooO0OO0O
if 59 - 59: Oo0Ooo / Oo0Ooo
if 87 - 87: I1ii11iIi11i % OoOoOO00 + Ii1I . i11iIiiIii / Ii1I
if 32 - 32: Ii1I + IiII + I1ii11iIi11i
if 79 - 79: i1IIi / Ii1I
if 81 - 81: iIii1I11I1II1
if ( self . curve25519 ) :
ii1i1I1111ii = lisp_hex_string ( self . remote_public_key )
ii1i1I1111ii = ii1i1I1111ii . zfill ( 64 )
o000oO0oOOO = ""
for IiIIi1IiiIiI in range ( 0 , len ( ii1i1I1111ii ) , 2 ) :
o000oO0oOOO += chr ( int ( ii1i1I1111ii [ IiIIi1IiiIiI : IiIIi1IiiIiI + 2 ] , 16 ) )
if 23 - 23: OOooOOo
self . remote_public_key = o000oO0oOOO
if 68 - 68: OoooooooOO
if 18 - 18: Ii1I * OoO0O00
packet = packet [ IiI11 : : ]
return ( packet )
if 89 - 89: OoO0O00 + oO0o % iIii1I11I1II1 + I11i / O0
if 38 - 38: ooOoO0o - o0oOOo0O0Ooo - O0 + ooOoO0o % OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: iIii1I11I1II1 * OoooooooOO * I1Ii111 - Ii1I + i11iIiiIii
if 81 - 81: OoO0O00 * OoooooooOO / iII111i
if 8 - 8: O0 * i1IIi - OoOoOO00 % I1IiiI / I1ii11iIi11i
if 39 - 39: I1ii11iIi11i . oO0o * II111iiii + I1IiiI - iIii1I11I1II1
if 56 - 56: IiII - Ii1I + i11iIiiIii * OoO0O00 % I1IiiI
if 37 - 37: iIii1I11I1II1 + IiII / I1Ii111 . OoooooooOO
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 72 - 72: oO0o % ooOoO0o % OOooOOo
if 63 - 63: OoO0O00 . Ii1I % II111iiii / I11i - OoOoOO00
if 4 - 4: Oo0Ooo - O0 / I11i + O0 - oO0o * Oo0Ooo
if 25 - 25: I1IiiI
if 64 - 64: oO0o
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
if 81 - 81: I1ii11iIi11i + OoooooooOO - OOooOOo * O0
if 100 - 100: iIii1I11I1II1 - OoOoOO00
if 28 - 28: Oo0Ooo . O0 . I11i
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
def decode ( self , packet ) :
O00oO00oOO00O = "BBBBQ"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
if 49 - 49: O0 . Oo0Ooo / Ii1I
II1IooOO00Oo , I11ii1i1I , i11IIii1I11 , self . record_count , self . nonce = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 43 - 43: i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
self . type = II1IooOO00Oo >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( II1IooOO00Oo & 0x01 ) else False
self . rloc_probe = True if ( II1IooOO00Oo & 0x02 ) else False
self . smr_invoked_bit = True if ( I11ii1i1I & 0x40 ) else False
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( II1IooOO00Oo & 0x04 ) else False
self . to_etr = True if ( II1IooOO00Oo & 0x02 ) else False
self . to_ms = True if ( II1IooOO00Oo & 0x01 ) else False
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( II1IooOO00Oo & 0x08 ) else False
if 63 - 63: oO0o
return ( True )
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 22 - 22: iII111i
if 48 - 48: I1ii11iIi11i . I1IiiI
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 49 - 49: Oo0Ooo
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 9 - 9: IiII . I11i
if 23 - 23: O0 % OoooooooOO - O0 . I1IiiI + i11iIiiIii
if 96 - 96: ooOoO0o % O0
if 51 - 51: I1IiiI - iII111i / I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: II111iiii . Ii1I * OoO0O00
if 74 - 74: o0oOOo0O0Ooo % OoOoOO00 . iII111i % I1Ii111 . O0 % II111iiii
if 5 - 5: oO0o - OoooooooOO / OoOoOO00
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
| |
<reponame>millerda/seaborn<filename>seaborn/tests/test_statistics.py
import numpy as np
import pandas as pd
try:
import statsmodels.distributions as smdist
except ImportError:
smdist = None
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from .._statistics import (
KDE,
Histogram,
ECDF,
EstimateAggregator,
_validate_errorbar_arg,
_no_scipy,
)
class DistributionFixtures:
@pytest.fixture
def x(self, rng):
return rng.normal(0, 1, 100)
@pytest.fixture
def y(self, rng):
return rng.normal(0, 5, 100)
@pytest.fixture
def weights(self, rng):
return rng.uniform(0, 5, 100)
class TestKDE:
def integrate(self, y, x):
y = np.asarray(y)
x = np.asarray(x)
dx = np.diff(x)
return (dx * y[:-1] + dx * y[1:]).sum() / 2
def test_gridsize(self, rng):
x = rng.normal(0, 3, 1000)
n = 200
kde = KDE(gridsize=n)
density, support = kde(x)
assert density.size == n
assert support.size == n
def test_cut(self, rng):
x = rng.normal(0, 3, 1000)
kde = KDE(cut=0)
_, support = kde(x)
assert support.min() == x.min()
assert support.max() == x.max()
cut = 2
bw_scale = .5
bw = x.std() * bw_scale
kde = KDE(cut=cut, bw_method=bw_scale, gridsize=1000)
_, support = kde(x)
assert support.min() == pytest.approx(x.min() - bw * cut, abs=1e-2)
assert support.max() == pytest.approx(x.max() + bw * cut, abs=1e-2)
def test_clip(self, rng):
x = rng.normal(0, 3, 100)
clip = -1, 1
kde = KDE(clip=clip)
_, support = kde(x)
assert support.min() >= clip[0]
assert support.max() <= clip[1]
def test_density_normalization(self, rng):
x = rng.normal(0, 3, 1000)
kde = KDE()
density, support = kde(x)
assert self.integrate(density, support) == pytest.approx(1, abs=1e-5)
@pytest.mark.skipif(_no_scipy, reason="Test requires scipy")
def test_cumulative(self, rng):
x = rng.normal(0, 3, 1000)
kde = KDE(cumulative=True)
density, _ = kde(x)
assert density[0] == pytest.approx(0, abs=1e-5)
assert density[-1] == pytest.approx(1, abs=1e-5)
def test_cached_support(self, rng):
x = rng.normal(0, 3, 100)
kde = KDE()
kde.define_support(x)
_, support = kde(x[(x > -1) & (x < 1)])
assert_array_equal(support, kde.support)
def test_bw_method(self, rng):
x = rng.normal(0, 3, 100)
kde1 = KDE(bw_method=.2)
kde2 = KDE(bw_method=2)
d1, _ = kde1(x)
d2, _ = kde2(x)
assert np.abs(np.diff(d1)).mean() > np.abs(np.diff(d2)).mean()
def test_bw_adjust(self, rng):
x = rng.normal(0, 3, 100)
kde1 = KDE(bw_adjust=.2)
kde2 = KDE(bw_adjust=2)
d1, _ = kde1(x)
d2, _ = kde2(x)
assert np.abs(np.diff(d1)).mean() > np.abs(np.diff(d2)).mean()
def test_bivariate_grid(self, rng):
n = 100
x, y = rng.normal(0, 3, (2, 50))
kde = KDE(gridsize=n)
density, (xx, yy) = kde(x, y)
assert density.shape == (n, n)
assert xx.size == n
assert yy.size == n
def test_bivariate_normalization(self, rng):
x, y = rng.normal(0, 3, (2, 50))
kde = KDE(gridsize=100)
density, (xx, yy) = kde(x, y)
dx = xx[1] - xx[0]
dy = yy[1] - yy[0]
total = density.sum() * (dx * dy)
assert total == pytest.approx(1, abs=1e-2)
@pytest.mark.skipif(_no_scipy, reason="Test requires scipy")
def test_bivariate_cumulative(self, rng):
x, y = rng.normal(0, 3, (2, 50))
kde = KDE(gridsize=100, cumulative=True)
density, _ = kde(x, y)
assert density[0, 0] == pytest.approx(0, abs=1e-2)
assert density[-1, -1] == pytest.approx(1, abs=1e-2)
class TestHistogram(DistributionFixtures):
def test_string_bins(self, x):
h = Histogram(bins="sqrt")
edges = h.define_bin_edges(x)
assert_array_equal(edges, np.histogram_bin_edges(x, "sqrt"))
def test_int_bins(self, x):
n = 24
h = Histogram(bins=n)
edges = h.define_bin_edges(x)
assert len(edges) == n + 1
def test_array_bins(self, x):
bins = [-3, -2, 1, 2, 3]
h = Histogram(bins=bins)
edges = h.define_bin_edges(x)
assert_array_equal(edges, bins)
def test_bivariate_string_bins(self, x, y):
s1, s2 = "sqrt", "fd"
h = Histogram(bins=s1)
e1, e2 = h.define_bin_edges(x, y)
assert_array_equal(e1, np.histogram_bin_edges(x, s1))
assert_array_equal(e2, np.histogram_bin_edges(y, s1))
h = Histogram(bins=(s1, s2))
e1, e2 = h.define_bin_edges(x, y)
assert_array_equal(e1, np.histogram_bin_edges(x, s1))
assert_array_equal(e2, np.histogram_bin_edges(y, s2))
def test_bivariate_int_bins(self, x, y):
b1, b2 = 5, 10
h = Histogram(bins=b1)
e1, e2 = h.define_bin_edges(x, y)
assert len(e1) == b1 + 1
assert len(e2) == b1 + 1
h = Histogram(bins=(b1, b2))
e1, e2 = h.define_bin_edges(x, y)
assert len(e1) == b1 + 1
assert len(e2) == b2 + 1
def test_bivariate_array_bins(self, x, y):
b1 = [-3, -2, 1, 2, 3]
b2 = [-5, -2, 3, 6]
h = Histogram(bins=b1)
e1, e2 = h.define_bin_edges(x, y)
assert_array_equal(e1, b1)
assert_array_equal(e2, b1)
h = Histogram(bins=(b1, b2))
e1, e2 = h.define_bin_edges(x, y)
assert_array_equal(e1, b1)
assert_array_equal(e2, b2)
def test_binwidth(self, x):
binwidth = .5
h = Histogram(binwidth=binwidth)
edges = h.define_bin_edges(x)
assert np.all(np.diff(edges) == binwidth)
def test_bivariate_binwidth(self, x, y):
w1, w2 = .5, 1
h = Histogram(binwidth=w1)
e1, e2 = h.define_bin_edges(x, y)
assert np.all(np.diff(e1) == w1)
assert np.all(np.diff(e2) == w1)
h = Histogram(binwidth=(w1, w2))
e1, e2 = h.define_bin_edges(x, y)
assert np.all(np.diff(e1) == w1)
assert np.all(np.diff(e2) == w2)
def test_binrange(self, x):
binrange = (-4, 4)
h = Histogram(binrange=binrange)
edges = h.define_bin_edges(x)
assert edges.min() == binrange[0]
assert edges.max() == binrange[1]
def test_bivariate_binrange(self, x, y):
r1, r2 = (-4, 4), (-10, 10)
h = Histogram(binrange=r1)
e1, e2 = h.define_bin_edges(x, y)
assert e1.min() == r1[0]
assert e1.max() == r1[1]
assert e2.min() == r1[0]
assert e2.max() == r1[1]
h = Histogram(binrange=(r1, r2))
e1, e2 = h.define_bin_edges(x, y)
assert e1.min() == r1[0]
assert e1.max() == r1[1]
assert e2.min() == r2[0]
assert e2.max() == r2[1]
def test_discrete_bins(self, rng):
x = rng.binomial(20, .5, 100)
h = Histogram(discrete=True)
edges = h.define_bin_edges(x)
expected_edges = np.arange(x.min(), x.max() + 2) - .5
assert_array_equal(edges, expected_edges)
def test_histogram(self, x):
h = Histogram()
heights, edges = h(x)
heights_mpl, edges_mpl = np.histogram(x, bins="auto")
assert_array_equal(heights, heights_mpl)
assert_array_equal(edges, edges_mpl)
def test_count_stat(self, x):
h = Histogram(stat="count")
heights, _ = h(x)
assert heights.sum() == len(x)
def test_density_stat(self, x):
h = Histogram(stat="density")
heights, edges = h(x)
assert (heights * np.diff(edges)).sum() == 1
def test_probability_stat(self, x):
h = Histogram(stat="probability")
heights, _ = h(x)
assert heights.sum() == 1
def test_frequency_stat(self, x):
h = Histogram(stat="frequency")
heights, edges = h(x)
assert (heights * np.diff(edges)).sum() == len(x)
def test_cumulative_count(self, x):
h = Histogram(stat="count", cumulative=True)
heights, _ = h(x)
assert heights[-1] == len(x)
def test_cumulative_density(self, x):
h = Histogram(stat="density", cumulative=True)
heights, _ = h(x)
assert heights[-1] == 1
def test_cumulative_probability(self, x):
h = Histogram(stat="probability", cumulative=True)
heights, _ = h(x)
assert heights[-1] == 1
def test_cumulative_frequency(self, x):
h = Histogram(stat="frequency", cumulative=True)
heights, _ = h(x)
assert heights[-1] == len(x)
def test_bivariate_histogram(self, x, y):
h = Histogram()
heights, edges = h(x, y)
bins_mpl = (
np.histogram_bin_edges(x, "auto"),
np.histogram_bin_edges(y, "auto"),
)
heights_mpl, *edges_mpl = np.histogram2d(x, y, bins_mpl)
assert_array_equal(heights, heights_mpl)
assert_array_equal(edges[0], edges_mpl[0])
assert_array_equal(edges[1], edges_mpl[1])
def test_bivariate_count_stat(self, x, y):
h = Histogram(stat="count")
heights, _ = h(x, y)
assert heights.sum() == len(x)
def test_bivariate_density_stat(self, x, y):
h = Histogram(stat="density")
heights, (edges_x, edges_y) = h(x, y)
areas = np.outer(np.diff(edges_x), np.diff(edges_y))
assert (heights * areas).sum() == pytest.approx(1)
def test_bivariate_probability_stat(self, x, y):
h = Histogram(stat="probability")
heights, _ = h(x, y)
assert heights.sum() == 1
def test_bivariate_frequency_stat(self, x, y):
h = Histogram(stat="frequency")
heights, (x_edges, y_edges) = h(x, y)
area = np.outer(np.diff(x_edges), np.diff(y_edges))
assert (heights * area).sum() == len(x)
def test_bivariate_cumulative_count(self, x, y):
h = Histogram(stat="count", cumulative=True)
heights, _ = h(x, y)
assert heights[-1, -1] == len(x)
def test_bivariate_cumulative_density(self, x, y):
h = Histogram(stat="density", cumulative=True)
heights, _ = h(x, y)
assert heights[-1, -1] == pytest.approx(1)
def test_bivariate_cumulative_frequency(self, x, y):
h = Histogram(stat="frequency", cumulative=True)
heights, _ = h(x, y)
assert heights[-1, -1] == len(x)
def test_bivariate_cumulative_probability(self, x, y):
h = Histogram(stat="probability", cumulative=True)
heights, _ = h(x, y)
assert heights[-1, -1] == pytest.approx(1)
def test_bad_stat(self):
with pytest.raises(ValueError):
Histogram(stat="invalid")
class TestECDF(DistributionFixtures):
def test_univariate_proportion(self, x):
ecdf = ECDF()
stat, vals = ecdf(x)
assert_array_equal(vals[1:], np.sort(x))
assert_array_almost_equal(stat[1:], np.linspace(0, 1, len(x) + 1)[1:])
assert stat[0] == 0
def test_univariate_count(self, x):
ecdf = ECDF(stat="count")
stat, vals = ecdf(x)
assert_array_equal(vals[1:], np.sort(x))
assert_array_almost_equal(stat[1:], np.arange(len(x)) + 1)
assert stat[0] == 0
def test_univariate_proportion_weights(self, x, weights):
ecdf = ECDF()
stat, vals = ecdf(x, weights=weights)
assert_array_equal(vals[1:], np.sort(x))
expected_stats = weights[x.argsort()].cumsum() / weights.sum()
assert_array_almost_equal(stat[1:], expected_stats)
assert stat[0] == 0
def test_univariate_count_weights(self, x, weights):
ecdf = ECDF(stat="count")
stat, vals = ecdf(x, weights=weights)
assert_array_equal(vals[1:], np.sort(x))
assert_array_almost_equal(stat[1:], weights[x.argsort()].cumsum())
assert stat[0] == 0
@pytest.mark.skipif(smdist is None, reason="Requires statsmodels")
def test_against_statsmodels(self, x):
sm_ecdf = smdist.empirical_distribution.ECDF(x)
ecdf = ECDF()
stat, vals = ecdf(x)
assert_array_equal(vals, sm_ecdf.x)
assert_array_almost_equal(stat, sm_ecdf.y)
ecdf = ECDF(complementary=True)
stat, vals = ecdf(x)
assert_array_equal(vals, sm_ecdf.x)
assert_array_almost_equal(stat, sm_ecdf.y[::-1])
def test_invalid_stat(self, x):
with pytest.raises(ValueError, match="`stat` must be one of"):
ECDF(stat="density")
def test_bivariate_error(self, x, y):
with pytest.raises(NotImplementedError, match="Bivariate ECDF"):
ecdf = ECDF()
ecdf(x, y)
class TestEstimateAggregator:
def test_func_estimator(self, long_df):
func = np.mean
agg = EstimateAggregator(func)
out = agg(long_df, "x")
assert out["x"] == func(long_df["x"])
def test_name_estimator(self, long_df):
agg = EstimateAggregator("mean")
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
def test_se_errorbars(self, long_df):
agg = EstimateAggregator("mean", | |
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Step')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Step', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Step'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.creationTimestamp is not None and 'creationTimestamp' not in already_processed:
already_processed.add('creationTimestamp')
outfile.write(' creationTimestamp="%s"' % self.gds_format_integer(self.creationTimestamp, input_name='creationTimestamp'))
if self.definition is not None and 'definition' not in already_processed:
already_processed.add('definition')
outfile.write(' definition=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.definition), input_name='definition')), ))
if self.previousStep is not None and 'previousStep' not in already_processed:
already_processed.add('previousStep')
outfile.write(' previousStep=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.previousStep), input_name='previousStep')), ))
if self.nextStep is not None and 'nextStep' not in already_processed:
already_processed.add('nextStep')
outfile.write(' nextStep=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.nextStep), input_name='nextStep')), ))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
outfile.write(' message=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.message), input_name='message')), ))
if self.button is not None and 'button' not in already_processed:
already_processed.add('button')
outfile.write(' button=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.button), input_name='button')), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Step', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('creationTimestamp', node)
if value is not None and 'creationTimestamp' not in already_processed:
already_processed.add('creationTimestamp')
try:
self.creationTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('definition', node)
if value is not None and 'definition' not in already_processed:
already_processed.add('definition')
self.definition = value
value = find_attr_value_('previousStep', node)
if value is not None and 'previousStep' not in already_processed:
already_processed.add('previousStep')
self.previousStep = value
value = find_attr_value_('nextStep', node)
if value is not None and 'nextStep' not in already_processed:
already_processed.add('nextStep')
self.nextStep = value
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.add('message')
self.message = value
value = find_attr_value_('button', node)
if value is not None and 'button' not in already_processed:
already_processed.add('button')
self.button = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Step
class BaseMessageStep(Step):
subclass = None
superclass = Step
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, extensiontype_=None):
self.original_tagname_ = None
super(BaseMessageStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, extensiontype_, )
self.receivedTimestamp = _cast(int, receivedTimestamp)
self.acknowledgedTimestamp = _cast(int, acknowledgedTimestamp)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BaseMessageStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BaseMessageStep.subclass:
return BaseMessageStep.subclass(*args_, **kwargs_)
else:
return BaseMessageStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_receivedTimestamp(self): return self.receivedTimestamp
def set_receivedTimestamp(self, receivedTimestamp): self.receivedTimestamp = receivedTimestamp
def get_acknowledgedTimestamp(self): return self.acknowledgedTimestamp
def set_acknowledgedTimestamp(self, acknowledgedTimestamp): self.acknowledgedTimestamp = acknowledgedTimestamp
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(BaseMessageStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='BaseMessageStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BaseMessageStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseMessageStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='BaseMessageStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='BaseMessageStep'):
super(BaseMessageStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BaseMessageStep')
if self.receivedTimestamp is not None and 'receivedTimestamp' not in already_processed:
already_processed.add('receivedTimestamp')
outfile.write(' receivedTimestamp="%s"' % self.gds_format_integer(self.receivedTimestamp, input_name='receivedTimestamp'))
if self.acknowledgedTimestamp is not None and 'acknowledgedTimestamp' not in already_processed:
already_processed.add('acknowledgedTimestamp')
outfile.write(' acknowledgedTimestamp="%s"' % self.gds_format_integer(self.acknowledgedTimestamp, input_name='acknowledgedTimestamp'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='', name_='BaseMessageStep', fromsubclass_=False, pretty_print=True):
super(BaseMessageStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('receivedTimestamp', node)
if value is not None and 'receivedTimestamp' not in already_processed:
already_processed.add('receivedTimestamp')
try:
self.receivedTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('acknowledgedTimestamp', node)
if value is not None and 'acknowledgedTimestamp' not in already_processed:
already_processed.add('acknowledgedTimestamp')
try:
self.acknowledgedTimestamp = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(BaseMessageStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BaseMessageStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BaseMessageStep
class MessageStep(BaseMessageStep):
subclass = None
superclass = BaseMessageStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, answer=None):
self.original_tagname_ = None
super(MessageStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, )
self.answer = _cast(None, answer)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MessageStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MessageStep.subclass:
return MessageStep.subclass(*args_, **kwargs_)
else:
return MessageStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_answer(self): return self.answer
def set_answer(self, answer): self.answer = answer
def hasContent_(self):
if (
super(MessageStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MessageStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MessageStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageStep')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MessageStep', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MessageStep'):
super(MessageStep, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MessageStep')
if self.answer is not None and 'answer' not in already_processed:
already_processed.add('answer')
outfile.write(' answer=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.answer), input_name='answer')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MessageStep', fromsubclass_=False, pretty_print=True):
super(MessageStep, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('answer', node)
if value is not None and 'answer' not in already_processed:
already_processed.add('answer')
self.answer = value
super(MessageStep, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(MessageStep, self).buildChildren(child_, node, nodeName_, True)
pass
# end class MessageStep
class WidgetStep(BaseMessageStep):
subclass = None
superclass = BaseMessageStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, extensiontype_=None):
self.original_tagname_ = None
super(WidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, extensiontype_, )
self.displayValue = _cast(None, displayValue)
self.formButton = _cast(None, formButton)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, WidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if WidgetStep.subclass:
return WidgetStep.subclass(*args_, **kwargs_)
else:
return WidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_displayValue(self): return self.displayValue
def set_displayValue(self, displayValue): self.displayValue = displayValue
def get_formButton(self): return self.formButton
def set_formButton(self, formButton): self.formButton = formButton
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def validate_FormButton(self, value):
# Validate type FormButton, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['positive', 'negative']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on FormButton' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
super(WidgetStep, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='WidgetStep', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('WidgetStep')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' | |
import numpy as np
import click
import time
import pygame
import pygame.locals as pyloc
import librosa as lr
import ffmpeg
import logging
import re
import pyaudio
import subprocess
import json
import os
import signal
import pdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
playlog = log.getChild('playback')
class PlayArgs:
def __init__(self, mouse_pos, position_offset, window_size, speed,
normal_speed, pause, set_bookmark, goto_bookmark, exit):
self.goto_bookmark = goto_bookmark
self.set_bookmark = set_bookmark
self.normal_speed = normal_speed
self.window_size = window_size
self.speed = speed
self.exit = exit
self.pause = pause
self.mouse_pos = mouse_pos
self.position_offset = position_offset
def got_command(self):
return self.pause or self.mouse_pos or self.position_offset or \
self.exit or self.speed or self.window_size or \
self.normal_speed or self.set_bookmark or self.goto_bookmark
# TODO log what the mimimum and maximum time that could be required before
# the silence cutter can kick in based on the BLOCK_LENGTH speed etc.
# TODO put video playback into seperate process to reduce lag
# TODO if a command is issued always draw the stats surface for the specified
# ammount of time
# Fixme if the playbackspeed is less that one, after some time a buffer
# underflow exception is raised
# TODO create fadein fadeout effect for stats bar
# TODO make it so that you can see the playbar always without resizing
# TODO make it so that you can only scrub through the timeline when you are on it
# TODO make it so that the sime of a point on the progressbar is displayed when
# you hover over the progressbar
# TODO enable selection of which audiotrack to play
# TODO Make it so that you can install via pip (the executable)
# (use setuptools? look at click documentation)
# TODO create tests for different file types
# FIXME when reaching the end of a .ts file that is currently being written
# the video resets to the positon of the play_from parameter play_from_pos
# was invoked with. This happens when the speed is 2 and the difference
# between video_positon and length_of_file is too close.
# TODO allow fractional speed
# TODO make it that it works for audiofiles
# TODO cerate command line documentation on controlls in window
# TODO add speed modifiers in timeline
# IFNEEDED create audio syncpoints. Prestart new audio and video streams
# (or only one of them) and then switch to them at a specific sync point
# (some point in time)
# IFNEEDED reimplement the simple unbuffered speedup procedures
# (because they run faster and do not lag)
# NICE you can stream youtube videos
# TODO Write tests for this buffer
class NumpyBuffer:
def __init__(self, size, dtype):
self.buffer = np.zeros(size, dtype=dtype)
self._buffer_len = size
self._write_idx = 0
self._read_idx = 0
self._fill_level = 0
@property
def fill_level(self):
return self._fill_level
@fill_level.setter
def fill_level(self, value):
if value > self._buffer_len:
raise Exception("Buffer overflow")
if value < 0:
raise Exception("Buffer underflow")
self._fill_level = value
def peek(self, n):
if n > self._buffer_len * 2:
raise Exception("Can't read more than twice the buffer size.")
rem = self._remaining_read_capacity()
if n <= rem:
return self.buffer[self._read_idx:n + self._read_idx]
else:
rem_n = n - rem
a = self.buffer[self._read_idx:]
b = self.buffer[:rem_n]
return np.concatenate((a, b))
def read(self, n):
r = self.peek(n)
self.advance_r(n)
return r
def write(self, arr):
if len(arr) > self._buffer_len * 2:
raise Exception("Can't write more than twice the buffer size.")
arr_len = len(arr)
if arr_len <= (self._buffer_len - self._write_idx):
self.buffer[self._write_idx:self._write_idx + arr_len] = arr
else:
rem = self._remaining_write_capacity()
self.buffer[self._write_idx:] = arr[:rem]
rem_a = len(arr) - rem
self.buffer[:rem_a] = arr[rem:]
self._advance_w(arr_len)
def _remaining_write_capacity(self):
return self._buffer_len - self._write_idx
def _remaining_read_capacity(self):
return self._buffer_len - self._read_idx
def _advance_w(self, x):
self.fill_level += x
self._write_idx = (self._write_idx + x) % self._buffer_len
def advance_r(self, x):
self.fill_level -= x
self._read_idx = (self._read_idx + x) % self._buffer_len
def test_buffer():
b = NumpyBuffer(16, np.float32)
for i in 100:
arr = np.array([1,2,8])
b.write(arr)
assert b.peek(3) == arr
assert b.read(3) == arr
class EventManager:
def __init__(self, speed):
signal.signal(signal.SIGINT, self.set_exit)
signal.signal(signal.SIGTERM, self.set_exit)
self.exit = None
self.time_last_mouse_move = 0
self.last_mouse_pos = None
self.last_vid_resize = None
self.speed = speed
def set_exit(self, signum, frame):
self.exit = True
log.debug('Exit flag set')
def handle_events(self, screen_size, stats_survace_x_size):
events = pygame.event.get()
play_offset = None
pause = None
speed_changed = False
window_size = None
mouse_button_on_stats_surf = None
screen_adjusted = False
normal_speed = False
set_bookmark = None
goto_bookmark = None
b = None
mouse_pos = pygame.mouse.get_pos()
if mouse_pos != self.last_mouse_pos:
self.last_mouse_pos = mouse_pos
self.time_last_mouse_move = time.time()
self.mouse_moved = True
else:
self.mouse_moved = False
ctrl_down = pygame.key.get_mods() & pygame.KMOD_CTRL
shift_down = pygame.key.get_mods() & pygame.KMOD_SHIFT
jump_coef = 2 if ctrl_down else 1
jump_coef *= 0.5 if shift_down else 1
for event in events:
if event.type == pyloc.QUIT:
self.set_exit(None, None)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.set_exit(None, None)
elif event.key == pygame.K_SPACE:
pause = True
elif event.key == pygame.K_LEFT:
play_offset = -10 * self.speed * jump_coef
elif event.key == pygame.K_RIGHT:
play_offset = 10 * self.speed * jump_coef
elif event.key in [pygame.K_KP_PLUS, pygame.K_PLUS]:
self.speed = self.speed * 1.1
speed_changed = True
elif event.key in [pygame.K_KP_MINUS, pygame.K_MINUS]:
self.speed = self.speed * 0.9
speed_changed = True
elif event.key == pygame.K_r:
normal_speed = True
if event.key == pygame.K_0: b = 0
if event.key == pygame.K_1: b = 1
if event.key == pygame.K_2: b = 2
if event.key == pygame.K_3: b = 3
if event.key == pygame.K_4: b = 4
if event.key == pygame.K_5: b = 5
if event.key == pygame.K_6: b = 6
if event.key == pygame.K_7: b = 7
if event.key == pygame.K_8: b = 8
if event.key == pygame.K_9: b = 9
if b:
if pygame.key.get_mods() & pygame.KMOD_CTRL:
set_bookmark = 1
else:
goto_bookmark = 1
elif event.type == pygame.MOUSEBUTTONDOWN:
if mouse_pos[1] > screen_size[1] - stats_survace_x_size:
mouse_button_on_stats_surf = True
else:
pause = True
if event.type == pyloc.VIDEORESIZE:
self.last_vid_resize = event.dict['size']
screen_adjusted = True
log.debug(f'resize: {self.last_vid_resize}')
if not screen_adjusted and self.last_vid_resize:
window_size = self.last_vid_resize
self.last_vid_resize = None
pygame.display.flip()
speed = self.speed if speed_changed else None
mouse_pos = mouse_pos if mouse_button_on_stats_surf else None
return PlayArgs(mouse_pos, play_offset, window_size,
speed, normal_speed, pause, set_bookmark,
goto_bookmark, self.exit)
class AudioPlayer:
def __init__(self, pyaudio_instance, audio_sr, speed, silence_speedup,
file, play_from, ffmpeg_loglevel, volume, audio_channel):
self.volume = volume
self.pyaudio_instance = pyaudio_instance
self.audio_sr = audio_sr
self.speed = speed
self.silence_speedup = silence_speedup
self.file = file
self.play_from = play_from
self.ffmpeg_loglevel = ffmpeg_loglevel
self.BLOCK_LENGTH = 1024 * 24
self.AUDIO_DROP_SKIP_DURATION = \
self.BLOCK_LENGTH / audio_sr / speed * silence_speedup / 2
self.AUDIO_THRESHHOLD = 0.1
self.HORIZON_COEF = 4
self.FRAME_LENGTH = \
int(self.BLOCK_LENGTH * self.HORIZON_COEF * self.speed)
self.ADVANCE_LENGTH = int(self.BLOCK_LENGTH * self.speed)
self.n_droped = 0
self.audio_stream = create_ffmpeg_audio_stream(
file, play_from, ffmpeg_loglevel, audio_channel)
self.buff = NumpyBuffer(self.FRAME_LENGTH * 20, np.float32)
i = np.frombuffer(
self.audio_stream.stdout.read(self.FRAME_LENGTH * 4),
np.float32)
self.buff.write(i)
self.audio_out_stream = pyaudio_instance.open(
format=pyaudio.paFloat32,
channels=1,
rate=audio_sr * 2,
frames_per_buffer=self.BLOCK_LENGTH,
output=True,
stream_callback=self._callback_ff
)
self.first_callback = True
self.trigger_last_write = False
self.last_write_triggered = False
playlog.debug('Audioplayer started')
def _callback_ff(self, in_data, frame_count, time_info, status):
while self.buff.fill_level < self.FRAME_LENGTH * 2:
s = self.audio_stream.stdout.read(self.ADVANCE_LENGTH * 4)
if len(s) == 0:
playlog.debug("Audiostream end reached")
return None, pyaudio.paComplete
i = np.frombuffer(s, np.float32)
self.buff.write(i)
frame_1 = self.buff.peek(self.FRAME_LENGTH)
self.buff.advance_r(self.ADVANCE_LENGTH)
frame_2 = self.buff.peek(self.FRAME_LENGTH)
data1 = lr.effects.time_stretch(
frame_1, self.speed, center=False)
data2 = lr.effects.time_stretch(
frame_2, self.speed, center=False)
a1 = data2[:self.BLOCK_LENGTH]
a2 = np.linspace(0, 1, self.BLOCK_LENGTH)
a = a1 * a2
b1 = data1[self.BLOCK_LENGTH:self.BLOCK_LENGTH*2]
b2 = np.linspace(1, 0, self.BLOCK_LENGTH)
b = b1 * b2
data = (a + b).astype('float32')
# Drop silence
if self.silence_speedup > 1 and \
(self.buff.peek(int(self.BLOCK_LENGTH * (self.silence_speedup - 1) * self.speed)) <
self.AUDIO_THRESHHOLD).all():
self.buff.advance_r(int(self.BLOCK_LENGTH * (self.silence_speedup - 1)))
self.n_droped += 1
if self.first_callback:
self.first_callback = False
data = (data * self.volume * np.linspace(0, 1, self.BLOCK_LENGTH)).astype('float32')
return data, pyaudio.paContinue
elif self.trigger_last_write:
data = (data * self.volume * np.linspace(1, 0, self.BLOCK_LENGTH)).astype( 'float32')
self.last_write_triggered = True
return data, pyaudio.paComplete
else:
return data * self.volume, pyaudio.paContinue
def close(self):
self.trigger_last_write = True
time.sleep(0.3)
self.audio_out_stream.close()
self.audio_stream.kill()
def sec_to_time_str(x):
m, s = divmod(x, 60)
h, m = divmod(m, 60)
return f'{int(h):02}:{int(m):02}:{int(s):02}'
def get_stats_surf(playbar_offset_pix, x_size, screen_resolution, playbacktime,
total_media_length, speed, silence_speedup):
FONT_SIZE = 20
FONT_COLOR = (200, 200, 200)
font = pygame.font.SysFont(None, FONT_SIZE)
x, y = screen_resolution[0], x_size
pos = screen_resolution[0] - x, screen_resolution[1] - y
surf = pygame.Surface((x, y))
surf.set_alpha(200)
ratio_played = playbacktime / total_media_length
outline = pygame.Rect(playbar_offset_pix[0], playbar_offset_pix[1],
x - playbar_offset_pix[0] * 2,
y - playbar_offset_pix[1] * 2)
progress = outline.copy()
progress.width = outline.width * ratio_played
| |
)
torch.save(self.model.state_dict(), path)
# Moving average of the loss for early stopping
if loss_term_ema and loss_flow_ema:
loss_term_ema = (
self.ema_alpha * losses[1] + (1.0 - self.ema_alpha) * loss_term_ema
)
loss_flow_ema = (
self.ema_alpha * losses[2] + (1.0 - self.ema_alpha) * loss_flow_ema
)
if (
loss_term_ema < self.early_stopping
and loss_flow_ema < self.early_stopping
):
break
else:
loss_term_ema = losses[1]
loss_flow_ema = losses[2]
# Log times
t1_iter = time.time()
times.update({"iter": t1_iter - t0_iter})
times = {"time_{}{}".format(k, self.al_iter): v for k, v in times.items()}
if self.comet and not self.no_log_times:
self.comet.log_metrics(times, step=i)
# Save final model
if self.model_path:
path = self.model_path.parent / Path(
self.model_path.stem + "_final" + self.model_path.suffix
)
torch.save(self.model.state_dict(), path)
torch.save(self.model.state_dict(), self.model_path)
# Close comet
if self.comet and self.al_iter == -1:
self.comet.end()
def sample(
self,
n_samples,
max_seq_length,
min_seq_length,
nalphabet,
min_word_len,
max_word_len,
proxy,
mask_eos=True,
get_uncertainties=True,
al_query_function=None,
):
times = {
"all": 0.0,
"actions_model": 0.0,
"actions_envs": 0.0,
"proxy": 0.0,
"sanitycheck": 0.0,
}
t0_all = time.time()
batch = []
envs = [
AptamerSeq(
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
nalphabet=nalphabet,
min_word_len=min_word_len,
max_word_len=max_word_len,
proxy=proxy,
stats_scores=self.stats_scores_tr,
)
for i in range(n_samples)
]
envs = [env.reset() for env in envs]
while envs:
seqs = [env.seq2obs() for env in envs]
mask = [len(env.seq) < env.min_seq_length for env in envs]
with torch.no_grad():
t0_a_model = time.time()
action_probs = self.model(tf(seqs))
if mask_eos:
action_probs[mask, -1] = -1000
t1_a_model = time.time()
times["actions_model"] += t1_a_model - t0_a_model
if all(torch.isfinite(action_probs).flatten()):
actions = Categorical(logits=action_probs).sample()
else:
actions = np.random.randint(
low=0, high=action_probs.shape[1], size=action_probs.shape[0]
)
if self.debug:
print("Action could not be sampled from model!")
t0_a_envs = time.time()
assert len(envs) == actions.shape[0]
for env, action in zip(envs, actions):
seq, valid = env.step(action)
if valid and env.done:
batch.append(env.seq2oracle([seq])[0])
envs = [env for env in envs if not env.done]
t1_a_envs = time.time()
times["actions_envs"] += t1_a_envs - t0_a_envs
t0_proxy = time.time()
batch = np.asarray(batch)
if get_uncertainties:
if self.al_query_function == "fancy_acquisition":
scores, proxy_vals, uncertainties = env.proxy(
batch, "fancy_acquisition"
)
else:
proxy_vals, uncertainties = env.proxy(batch, "Both")
scores = proxy_vals
else:
proxy_vals = env.proxy(batch)
uncertainties = None
scores = proxy_vals
t1_proxy = time.time()
times["proxy"] += t1_proxy - t0_proxy
samples = {
"samples": batch.astype(np.int64),
"scores": scores,
"energies": proxy_vals,
"uncertainties": uncertainties,
}
# Sanity-check: absolute zero pad
t0_sanitycheck = time.time()
zeros = np.where(batch == 0)
row_unique, row_unique_idx = np.unique(zeros[0], return_index=True)
for row, idx in zip(row_unique, row_unique_idx):
if np.sum(batch[row, zeros[1][idx] :]):
print(f"Found sequence with positive values after last 0, row {row}")
import ipdb
ipdb.set_trace()
t1_sanitycheck = time.time()
times["sanitycheck"] += t1_sanitycheck - t0_sanitycheck
t1_all = time.time()
times["all"] += t1_all - t0_all
return samples, times
def sample(
model,
n_samples,
max_seq_length,
min_seq_length,
nalphabet,
min_word_len,
max_word_len,
func,
mask_eos=True,
stats_scores_tr=None,
):
times = {
"all": 0.0,
"actions_model": 0.0,
"actions_envs": 0.0,
"proxy": 0.0,
"sanitycheck": 0.0,
}
t0_all = time.time()
batch = []
envs = [
AptamerSeq(
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
nalphabet=nalphabet,
min_word_len=min_word_len,
max_word_len=max_word_len,
func=func,
stats_scores=stats_scores_tr,
)
for i in range(n_samples)
]
envs = [env.reset() for env in envs]
while envs:
seqs = [env.seq2obs() for env in envs]
mask = [len(env.seq) < env.min_seq_length for env in envs]
with torch.no_grad():
t0_a_model = time.time()
action_probs = model(tf(seqs))
if mask_eos:
action_probs[mask, -1] = -1000
t1_a_model = time.time()
times["actions_model"] += t1_a_model - t0_a_model
if all(torch.isfinite(action_probs).flatten()):
actions = Categorical(logits=action_probs).sample()
else:
actions = np.random.randint(
low=0, high=action_probs.shape[1], size=action_probs.shape[0]
)
t0_a_envs = time.time()
assert len(envs) == actions.shape[0]
for env, action in zip(envs, actions):
seq, valid = env.step(action)
if valid and env.done:
batch.append(env.seq2oracle([seq])[0])
envs = [env for env in envs if not env.done]
t1_a_envs = time.time()
times["actions_envs"] += t1_a_envs - t0_a_envs
t0_proxy = time.time()
batch = np.asarray(batch)
proxy_vals = env.proxy(batch)
t1_proxy = time.time()
times["proxy"] += t1_proxy - t0_proxy
samples = {
"samples": batch.astype(np.int64),
"scores": proxy_vals,
}
# Sanity-check: absolute zero pad
t0_sanitycheck = time.time()
zeros = np.where(batch == 0)
row_unique, row_unique_idx = np.unique(zeros[0], return_index=True)
for row, idx in zip(row_unique, row_unique_idx):
if np.sum(batch[row, zeros[1][idx] :]):
print(f"Found sequence with positive values after last 0, row {row}")
import ipdb
ipdb.set_trace()
t1_sanitycheck = time.time()
times["sanitycheck"] += t1_sanitycheck - t0_sanitycheck
t1_all = time.time()
times["all"] += t1_all - t0_all
return samples, times
class RandomTrajAgent:
def __init__(self, args, envs):
self.mbsize = args.gflownet.mbsize # mini-batch size
self.envs = envs
self.nact = args.ndim + 1
self.model = None
def parameters(self):
return []
def sample_batch(self, mbsize, all_visited):
batch = []
[i.reset()[0] for i in self.envs] # reset envs
done = [False] * mbsize
while not all(done):
acts = np.random.randint(0, self.nact, mbsize) # actions (?)
# step : list
# - For each e in envs, if corresponding done is False
# - For each element i in env, and a in acts
# - i.step(a)
step = [
i.step(a)
for i, a in zip([e for d, e in zip(done, self.envs) if not d], acts)
]
c = count(0)
m = {j: next(c) for j in range(mbsize) if not done[j]}
done = [bool(d or step[m[i]][2]) for i, d in enumerate(done)]
for (_, r, d, sp) in step:
if d:
all_visited.append(tuple(sp))
return [] # agent is stateful, no need to return minibatch data
def flowmatch_loss(self, it, batch):
return None
def make_mlp(layers_dim, act=nn.LeakyReLU(), tail=[]):
"""
Defines an MLP with no top layer activation
Args
----
layers_dim : list
Dimensionality of each layer
act : Activation
Activation function
"""
return nn.Sequential(
*(
sum(
[
[nn.Linear(idim, odim)] + ([act] if n < len(layers_dim) - 2 else [])
for n, (idim, odim) in enumerate(zip(layers_dim, layers_dim[1:]))
],
[],
)
+ tail
)
)
def make_opt(params, Z, args):
"""
Set up the optimizer
"""
params = list(params)
if not len(params):
return None
if args.gflownet.opt == "adam":
opt = torch.optim.Adam(
params,
args.gflownet.learning_rate,
betas=(args.gflownet.adam_beta1, args.gflownet.adam_beta2),
)
if Z is not None:
opt.add_param_group(
{
"params": Z,
"lr": args.gflownet.learning_rate * args.gflownet.lr_z_mult,
}
)
elif args.gflownet.opt == "msgd":
opt = torch.optim.SGD(
params, args.gflownet.learning_rate, momentum=args.gflownet.momentum
)
return opt
def compute_empirical_distribution_error(env, visited):
"""
Computes the empirical distribution errors, as the mean L1 error and the KL
divergence between the true density of the space and the estimated density from all
states visited.
"""
td, end_states, true_r = env.true_density()
if td is None:
return None, None
true_density = tf(td)
if not len(visited):
return 1, 100
hist = defaultdict(int)
for i in visited:
hist[i] += 1
Z = sum([hist[i] for i in end_states])
estimated_density = tf([hist[i] / Z for i in end_states])
k1 = abs(estimated_density - true_density).mean().item()
# KL divergence
kl = (true_density * torch.log(estimated_density / true_density)).sum().item()
return k1, kl
# TODO: improve approximation of uniform
def make_approx_uniform_test_set(
path_base_dataset,
score,
ntest,
min_length=0,
max_length=np.inf,
seed=167,
output_csv=None,
):
"""
Constructs an approximately uniformly distributed (on the score) set, by
selecting samples from a larger base set.
Args
----
path_base_dataset : str
Path to a CSV file containing the base data set.
score : str
Column in the CSV file containing the score.
ntest : int
Number of test samples.
seed : int
Random seed.
dask : bool
If True, use dask to efficiently read a large base file.
output_csv: str
Optional path to store the test set as CSV.
"""
if path_base_dataset is None:
return None
times = {
"all": 0.0,
"indices": 0.0,
}
t0_all = time.time()
if seed:
np.random.seed(seed)
df_base = pd.read_csv(path_base_dataset, index_col=0)
df_base = df_base.loc[
(df_base["letters"].map(len) >= min_length)
& (df_base["letters"].map(len) <= max_length)
]
scores_base = df_base[score].values
min_base = scores_base.min()
max_base = scores_base.max()
distr_unif = np.random.uniform(low=min_base, high=max_base, size=ntest)
# Get minimum distance samples without duplicates
t0_indices = time.time()
idx_samples = []
for idx in tqdm(range(ntest)):
dist = np.abs(scores_base - distr_unif[idx])
idx_min = np.argmin(dist)
if idx_min in idx_samples:
idx_sort = np.argsort(dist)
for idx_next in idx_sort:
if idx_next not in idx_samples:
idx_samples.append(idx_next)
break
else:
idx_samples.append(idx_min)
t1_indices = time.time()
times["indices"] += t1_indices - t0_indices
# Make test set
df_test = df_base.iloc[idx_samples]
if output_csv:
df_test.to_csv(output_csv)
t1_all = time.time()
times["all"] += t1_all - t0_all
return df_test, times
def make_train_set(
oracle,
ntrain,
seed=168,
output_csv=None,
):
"""
Constructs a randomly sampled train set.
Args
----
ntest : int
Number of test samples.
seed : int
Random seed.
output_csv: str
Optional path to store the test set as CSV.
"""
samples_dict = oracle.initializeDataset(save=False, returnData=True,
customSize=ntrain, custom_seed=seed)
energies = samples_dict["energies"]
samples_mat = samples_dict["samples"]
seq_letters = oracle.numbers2letters(samples_mat)
seq_ints = ["".join([str(el) for el in | |
RMS value
"""
return np.sqrt(np.mean(np.square(self.sig), axis))
def plot(self, fn=None, offset=0, scale=1, xlim=None, ylim=None, **kwargs):
"""Display signal graph
Parameters
----------
fn : func or None
Keyword or function (Default value = None)
offset : int or float
Offset each channel to create a stacked view (Default value = 0)
scale : float
Scale the y value (Default value = 1)
xlim : tuple or list
x axis range (Default value = None)
ylim : tuple or list
y axis range (Default value = None)
**kwargs :
keyword arguments for matplotlib.pyplot.plot()
Returns
-------
_ : Asig
self, you can use plt.show() to display the plot.
"""
if fn:
if fn == 'db':
fn = lambda x: np.sign(x) * ampdb((abs(x) * 2 ** 16 + 1))
elif not callable(fn):
_LOGGER.warning("Asig.plot: fn is neither keyword nor function")
return self
plot_sig = fn(self.sig)
else:
plot_sig = self.sig
if self.channels == 1 or (offset == 0 and scale == 1):
self._['plot'] = plt.plot(np.arange(0, self.samples) / self.sr, plot_sig, **kwargs)
else:
p = []
ts = np.linspace(0, self.samples / self.sr, self.samples)
for i, c in enumerate(self.sig.T):
p.append(plt.plot(ts, i * offset + c * scale, **kwargs))
plt.xlabel("time [s]")
if self.cn:
plt.text(0, (i + 0.1) * offset, self.cn[i])
if xlim is not None:
plt.xlim([xlim[0], xlim[1]])
if ylim is not None:
plt.ylim([ylim[0], ylim[1]])
return self
def get_duration(self):
"""Return the duration in second."""
return self.samples / self.sr
def get_times(self):
"""Get time stamps for left-edge of sample-and-hold-signal"""
return np.linspace(0, (self.samples - 1) / self.sr, self.samples)
def __eq__(self, other):
"""Check if two asig objects have the same signal. But does not care about sr and others"""
sig_eq = np.array_equal(self.sig, other.sig)
sr_eq = self.sr == other.sr
return sig_eq and sr_eq
def __repr__(self):
"""Report key attributes"""
return "Asig('{}'): {} x {} @ {}Hz = {:.3f}s cn={}".format(
self.label, self.channels, self.samples, self.sr, self.samples / self.sr,
self.cn)
def __mul__(self, other):
"""Magic method for multiplying. You can either multiply a numpy array or an Asig object. If adding an Asig,
you don't always need to have same size arrays as audio signals may different in length. If mix_mode
is set to 'bound' the size is fixed to respect self. If not, the result will respect to whichever the
bigger array is."""
selfsig = self.sig
othersig = other.sig if isinstance(other, Asig) else other
if isinstance(othersig, numbers.Number):
return Asig(selfsig * othersig, self.sr, label=self.label + "_multiplied", cn=self.cn)
else:
if self.mix_mode is 'bound':
if selfsig.shape[0] > othersig.shape[0]:
selfsig = selfsig[:othersig.shape[0]]
elif selfsig.shape[0] < othersig.shape[0]:
othersig = othersig[:selfsig.shape[0]]
result = selfsig * othersig
else:
if selfsig.shape[0] > othersig.shape[0]:
result = selfsig.copy()
result[:othersig.shape[0]] *= othersig
elif selfsig.shape[0] < othersig.shape[0]:
result = othersig.copy()
result[:selfsig.shape[0]] *= selfsig
else:
result = selfsig * othersig
return Asig(result, self.sr, label=self.label + "_multiplied", cn=self.cn)
def __rmul__(self, other):
if isinstance(other, Asig):
return Asig(self.sig * other.sig, self.sr, label=self.label + "_multiplied", cn=self.cn)
else:
return Asig(self.sig * other, self.sr, label=self.label + "_multiplied", cn=self.cn)
def __add__(self, other):
"""Magic method for adding. You can either add a numpy array or an Asig object. If adding an Asig,
you don't always need to have same size arrays as audio signals may different in length. If mix_mode
is set to 'bound' the size is fixed to respect self. If not, the result will respect to whichever the
bigger array is."""
selfsig = self.sig
othersig = other.sig if isinstance(other, Asig) else other
if isinstance(othersig, numbers.Number): # When other is just a scalar
return Asig(selfsig + othersig, self.sr, label=self.label + "_added", cn=self.cn)
else:
if self.mix_mode is 'bound':
try:
if selfsig.shape[0] > othersig.shape[0]:
selfsig = selfsig[:othersig.shape[0]]
elif selfsig.shape[0] < othersig.shape[0]:
othersig = othersig[:selfsig.shape[0]]
except AttributeError:
pass # When othersig is just a scalar
result = selfsig + othersig
else:
# Make the bigger one
if selfsig.shape[0] > othersig.shape[0]:
result = selfsig.copy()
result[:othersig.shape[0]] += othersig
elif selfsig.shape[0] < othersig.shape[0]:
result = othersig.copy()
result[:selfsig.shape[0]] += selfsig
else:
result = selfsig + othersig
return Asig(result, self.sr, label=self.label + "_added", cn=self.cn)
def __radd__(self, other):
if isinstance(other, Asig):
return Asig(self.sig + other.sig, self.sr, label=self.label + "_added", cn=self.cn)
else:
return Asig(self.sig + other, self.sr, label=self.label + "_added", cn=self.cn)
def find_events(self, step_dur=0.001, sil_thr=-20, evt_min_dur=0, sil_min_dur=0.1, sil_pad=[0.001, 0.1]):
"""Locate meaningful 'events' in the signal and create event list. Onset detection.
Parameters
----------
step_dur : float
duration in seconds of each search step (Default value = 0.001)
sil_thr : int
silent threshold in dB (Default value = -20)
evt_min_dur : float
minimum duration to be counted as an event (Default value = 0)
sil_min_dur : float
minimum duration to be counted as silent (Default value = 0.1)
sil_pad : list
this allows you to add a small duration before and after the actual
found event locations to the event ranges. If it is a list, you can set the padding (Default value = [0.001)
0.1] :
Returns
-------
_ : Asig
This method returns self. But the list of events can be accessed through self._['events']
"""
if self.channels > 1:
msg = """warning: works only with 1-channel signals.
Tip: (1) convert to mono first with asig.mono();
(2) select individual channel: asig[:,0].find_events"""
_LOGGER.warning(msg)
return -1
step_samples = int(step_dur * self.sr)
sil_thr_amp = dbamp(sil_thr)
sil_flag = True
sil_count = 0
sil_min_steps = int(sil_min_dur / step_dur)
evt_min_steps = int(evt_min_dur * self.sr)
if type(sil_pad) is list:
sil_pad_samples = [int(v * self.sr) for v in sil_pad]
else:
sil_pad_samples = (int(sil_pad * self.sr), ) * 2
event_list = []
for i in range(0, self.samples, step_samples):
rms = self[i:i + step_samples].rms()
if sil_flag:
if rms > sil_thr_amp: # event found
sil_flag = False
event_begin = i
sil_count = 0
continue
else:
event_end = i
if rms < sil_thr_amp:
sil_count += 1
else:
sil_count = 0 # reset if there is outlier non-silence
if sil_count > sil_min_steps: # event ended
# The below line is new.
if event_end - event_begin >= evt_min_steps:
event_list.append([
event_begin - sil_pad_samples[0],
event_end - step_samples * sil_min_steps + sil_pad_samples[1]])
sil_flag = True
self._['events'] = np.array(event_list)
return self
def select_event(self, index=None, onset=None):
"""This method can be called after find_event (aka onset detection).
Parameters
----------
index : int or None
Index of the event (Default value = None)
onset : int or None
Onset of the event (Default value = None)
Returns
-------
_ : Asig
self
"""
if 'events' not in self._:
print('select_event: no events, return all')
return self
events = self._['events']
if onset:
index = np.argmin(np.abs(events[:, 0] - onset * self.sr))
if index is not None:
beg, end = events[index]
# print(beg, end)
return Asig(self.sig[beg:end], self.sr, label=self.label + f"event_{index}", cn=self.cn)
_LOGGER.warning('select_event: neither index nor onset given: return self')
return self
def fade_in(self, dur=0.1, curve=1):
"""Fade in the signal at the beginning
Parameters
----------
dur : float
Duration in seconds to fade in (Default value = 0.1)
curve : float
Curvature of the fader. (Default value = 1)
Returns
-------
_ : Asig
Asig, new asig with the fade in signal
"""
nsamp = int(dur * self.sr)
if nsamp > self.samples:
nsamp = self.samples
_LOGGER.warning("warning: Asig too short for fade_in - adapting fade_in time")
return Asig(np.hstack((self.sig[:nsamp] * np.linspace(0, 1, nsamp) ** curve, self.sig[nsamp:])),
self.sr, label=self.label + "_fadein", cn=self.cn)
def fade_out(self, dur=0.1, curve=1):
"""Fade out the signal at the end
Parameters
----------
dur : float
duration in seconds to fade out (Default value = 0.1)
curve : float
Curvature of the fader. (Default value = 1)
Returns
-------
_ : Asig
Asig, new asig with the fade out signal
"""
nsamp = int(dur * self.sr)
if nsamp > self.samples:
nsamp = self.samples
_LOGGER.warning("warning: Asig too short for fade_out - adapting fade_out time")
return Asig(np.hstack((self.sig[:-nsamp],
self.sig[-nsamp:] * np.linspace(1, 0, nsamp)**curve)),
self.sr, label=self.label + "_fadeout", cn=self.cn)
def iirfilter(self, cutoff_freqs, btype='bandpass', ftype='butter', order=4,
filter='lfilter', rp=None, rs=None):
"""iirfilter based on scipy.signal.iirfilter
Parameters
----------
cutoff_freqs : int
Cutoff frequency or frequencies.
btype : str
Filter type (Default value = 'bandpass')
ftype : str
Tthe type of IIR filter. | |
self.dflink[self.dflink.disSim <= 1 - self.ccReq]
# sort putting highest links in cluster on top
dfcl.sort_values(by='disSim', inplace=True, ascending=False)
dfcl.reset_index(inplace=True, drop=True)
dftemp = dfcl.copy()
clustlinks = {}
clustEvents = {}
clnum = 0
while len(dftemp) > 0:
ser = dftemp.iloc[0]
ndf = dftemp[[set(x).issubset(ser.II) for x in dftemp.II]]
clustlinks[clnum] = ndf.clust
valset = set([y for x in ndf.II.values for y in x])
clustEvents[clnum] = list(valset)
dftemp = dftemp[~dftemp.index.isin(ndf.index)]
clnum += 1
self.clustlinks = clustlinks
self.clusts = [[self.key[y] for y in clustEvents[x]]
for x in clustEvents.keys()]
keyset = set(self.key)
clustset = set([y for x in self.clusts for y in x])
self.singles = list(keyset.difference(clustset))
self.clustcount = np.sum([len(x) for x in self.clusts])
self.clustColors = self._getColors(len(self.clusts))
msg = ('ccReq for station %s updated to ccReq=%1.3f' %
(self.station, newccReq))
detex.log(__name__, msg, level='info', pri=True)
def _getColors(self, numClusts):
"""
See if there are enough defualt colors for the clusters, if not
Generate N unique colors (that probably dont look good together)
"""
clustColorsDefault = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
# if there are enough default python colors use them
if numClusts <= len(clustColorsDefault):
return clustColorsDefault[:numClusts]
else: # if not generaete N unique colors
colors = []
for i in np.arange(0., 360., 360. / numClusts):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
cvect = colorsys.hls_to_rgb(hue, lightness, saturation)
rgb = [int(x * 255) for x in cvect]
# covnert to hex code
colors.append('#' + pack("BBB", *rgb).encode('hex'))
return colors
def _makeColorDict(self, clustColors, nonClustColor):
if len(self.clusts) < 1:
colorsequence = clustColors
# if not enough colors repeat color matrix
elif float(len(clustColors)) / len(self.clusts) < 1:
colorsequence = clustColors * \
int(np.ceil((float(len(self.clusts)) / len(clustColors))))
else:
colorsequence = clustColors
# unitialize color list with default color
color_list = [nonClustColor] * 3 * len(self.dflink)
for a in range(len(self.clusts)):
for b in self.clustlinks[a]:
color_list[int(b)] = colorsequence[a]
return color_list
def _makeDFLINK(self, truncate=True): # make the link dataframe
N = len(self.link)
# append cluster numbers to link array
link = np.append(self.link, np.arange(N + 1, N + N + 1).reshape(N, 1), 1)
if truncate: # truncate after required coeficient
linkup = link[link[:, 2] <= 1 - self.ccReq]
else:
linkup = link
T = fcluster(link[:, 0:4], 1 - self.ccReq, criterion='distance')
serclus = pd.Series(T)
clusdict = pd.Series([np.array([x]) for x in np.arange(
0, N + 1)], index=np.arange(0, N + 1))
for a in range(len(linkup)):
clusdict[int(linkup[a, 4])] = np.append(
clusdict[int(linkup[a, 0])], clusdict[int(linkup[a, 1])])
columns = ['i1', 'i2', 'disSim', 'num', 'clust']
dflink = pd.DataFrame(linkup, columns=columns)
if len(dflink) > 0:
dflink['II'] = list
else:
msg = 'No events cluster with corr coef = %1.3f' % self.ccReq
detex.log(__name__, msg, level='info', pri=True)
for a in dflink.iterrows(): # enumerate cluster contents
ar1 = list(np.array(clusdict[int(a[1].i1)]))
ar2 = list(np.array(clusdict[int(a[1].i2)]))
dflink['II'][a[0]] = ar1 + ar2
return dflink, serclus
# creates a basic dendrogram plot
def dendro(self, hideEventLabels=True, show=True, saveName=False,
legend=True, **kwargs):
"""
Function to plot dendrograms of the clusters
Parameters
-----
hideEventLabels : bool
turns x axis labeling on/off. Better set to false
if many events are in event pool
show : bool
If true call plt.show
saveName : str or False
path to save figure. Extention denotes format. See plt.savefig
for details
legend : bool
If true plot a legend on the side of the dendrogram
Note
----------
kwargs are passed to scipy.cluster.hierarchy.dendrogram, see docs
for acceptable arguments and descriptions
"""
# Get color schemes
color_list = self._makeColorDict(self.clustColors, self.nonClustColor)
for a in range(len(self.clusts)):
plt.plot([], [], '-', color=self.clustColors[a])
plt.plot([], [], '-', color=self.nonClustColor)
dendrogram(self.link, color_threshold=1 - self.ccReq, count_sort=True,
link_color_func=lambda x: color_list[x], **kwargs)
ax = plt.gca()
if legend:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend([str(x) for x in range(1, len(self.clusts) + 1)] +
['N/A'], loc='center left', bbox_to_anchor=(1, .5),
title='Clusters')
ax.set_ylim([0, 1])
if hideEventLabels:
ax.set_xticks([])
plt.xlabel('Events')
plt.ylabel('Dissimilarity')
plt.title(self.station)
if saveName:
plt.savefig(saveName, **kwargs)
if show:
plt.show()
def plotEvents(self, projection='merc', plotSingles=True, **kwargs):
"""
Plot the event locations for each station using basemap. Calls the
plotEvents method of the Cluster class, see its docs for accepted
kwargs.
Parameters
---------
projection : str
The pojection type to pass to basemap
plotSingles : bool
If True also plot the singletons (events that dont cluster)
Notes
-------
kwargs are passed to basemap
If no working installation of basemap is found an ImportError will
be raised. See the following URL for tips on installing it:
http://matplotlib.org/basemap/users/installing.html, good luck!
"""
# TODO make dot size scale with magnitudes
# make sure basemap is installed
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
msg = 'mpl_toolskits basemap not installed, cant plot'
detex.log(__name__, msg, level='error', e=ImportError)
# init figures and get limits
fig_map, emap, horrange = self._init_map(Basemap, projection, kwargs)
zmin, zmax, zscale = self._get_z_scaling(horrange)
fig_lat = self._init_profile_figs(zmin, zmax, zscale)
fig_lon = self._init_profile_figs(zmin, zmax, zscale)
# seperate singletons from clustered events
cl_dfs, sing_df = self._get_singletons_and_clusters()
self._plot_map_view(emap, fig_map, horrange, cl_dfs, sing_df)
self._plot_profile_view(zmin, zmax, zscale, fig_lat, fig_lon, cl_dfs,
sing_df, emap)
def _init_map(self, Basemap, projection, kwargs):
"""
Function to setup the map figure with basemap returns the
figure instance and basemap instance and horizontal range of plot
"""
map_fig = plt.figure()
# get map bounds
latmin = self.temkey.LAT.min()
latmax = self.temkey.LAT.max()
lonmin = self.temkey.LON.min()
lonmax = self.temkey.LON.max()
# create buffers so there is a slight border with no events around map
latbuff = abs((latmax - latmin) * 0.1)
lonbuff = abs((lonmax - lonmin) * 0.1)
# get the total horizontal distance of plot in km
totalxdist = obspy.core.util.geodetics.gps2DistAzimuth(
latmin, lonmin, latmin, lonmax)[0] / 1000
# init projection
emap = Basemap(projection=projection,
lat_0=np.mean([latmin, latmax]),
lon_0=np.mean([lonmin, lonmax]),
resolution='h',
area_thresh=0.1,
llcrnrlon=lonmin - lonbuff,
llcrnrlat=latmin - latbuff,
urcrnrlon=lonmax + lonbuff,
urcrnrlat=latmax + latbuff,
**kwargs)
# draw scale
emap.drawmapscale(lonmin, latmin, lonmin, latmin, totalxdist / 4.5)
# get limits in projection
xmax, xmin, ymax, ymin = emap.xmax, emap.xmin, emap.ymax, emap.ymin
horrange = max((xmax - xmin), (ymax - ymin)) # horizontal range
# get maximum degree distance for setting scalable ticks
latdi, londi = [abs(latmax - latmin), abs(lonmax - lonmin)]
maxdeg = max(latdi, londi)
parallels = np.arange(0., 80, maxdeg / 4)
emap.drawparallels(parallels, labels=[1, 0, 0, 1])
meridians = np.arange(10., 360., maxdeg / 4)
mers = emap.drawmeridians(meridians, labels=[1, 0, 0, 1])
for m in mers: # rotate meridian labels
try:
mers[m][1][0].set_rotation(90)
except:
pass
plt.title('Clusters on %s' % self.station)
return map_fig, emap, horrange
def _init_profile_figs(self, zmin, zmax, zscale):
"""
init figs for plotting the profiles of the events
"""
# init profile figures
profile_fig = plt.figure()
z1 = zmin * zscale
z2 = zmax * zscale
tickfor = ['%0.1f' % x1 for x1 in np.linspace(zmin, zmax, 10)]
plt.yticks(np.linspace(z1, z2, 10), tickfor)
plt.gca().invert_yaxis()
plt.xticks([])
plt.ylabel('Depth (km)')
return profile_fig
def _get_z_scaling(self, horrange):
"""
Return z limits and scale factors
"""
zmin, zmax = self.temkey.DEPTH.min(), self.temkey.DEPTH.max()
zscale = horrange / (zmax - zmin)
return zmin, zmax, zscale
def _get_singletons_and_clusters(self):
"""
get dataframes of clustered events and singletons
Note: cl_dfs is a list of dfs whereas sing_df is just a df
"""
cl_dfs = [self.temkey[self.temkey.NAME.isin(x)] for x in self.clusts]
sing_df = self.temkey[self.temkey.NAME.isin([x for x in self.singles])]
return cl_dfs, sing_df
def _plot_map_view(self, emap, map_fig, horrange, cl_dfs, sing_df):
"""
plot the map figure
"""
plt.figure(map_fig.number) # set to map figure
# plot singles
x, y = emap(sing_df.LON.values, sing_df.LAT.values)
emap.plot(x, y, '.', color=self.nonClustColor, ms=6.0)
for clnum, cl in enumerate(cl_dfs):
x, y = emap(cl.LON.values, cl.LAT.values)
emap.plot(x, y, '.', color=self.clustColors[clnum])
def _plot_profile_view(self, zmin, zmax, zscale, fig_lat, fig_lon, cl_df,
sing_df, emap):
"""
plot the profile view
"""
x_sing, y_sing = emap(sing_df.LON.values, sing_df.LAT.values)
# plot singletons
nccolor = self.nonClustColor
plt.figure(fig_lon.number)
plt.plot(x_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Longitude')
plt.figure(fig_lat.number)
plt.plot(y_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Latitude')
# plot clusters
for clnum, cl in enumerate(cl_df):
ccolor = self.clustColors[clnum]
x, y = emap(cl.LON.values, cl.LAT.values)
plt.figure(fig_lon.number)
plt.plot(x, cl.DEPTH * zscale, '.', color=ccolor)
plt.figure(fig_lat.number)
plt.plot(y, cl.DEPTH * zscale, '.', color=ccolor)
# set buffers so nothing plots right on edge
for fig in [fig_lat, fig_lon]:
| |
'yù',
0x9D28: 'yā',
0x9D29: 'dié',
0x9D2A: 'yù',
0x9D2B: 'tián',
0x9D2C: 'yīng',
0x9D2D: 'duī',
0x9D2E: 'wū',
0x9D2F: 'ér',
0x9D30: 'guā',
0x9D31: 'ài',
0x9D32: 'zhī',
0x9D33: 'yàn',
0x9D34: 'héng',
0x9D35: 'xiāo',
0x9D36: 'jiá',
0x9D37: 'liè',
0x9D38: 'zhū',
0x9D39: 'yáng',
0x9D3A: 'yí',
0x9D3B: 'hóng',
0x9D3C: 'lù',
0x9D3D: 'rú',
0x9D3E: 'móu',
0x9D3F: 'gē',
0x9D40: 'rén',
0x9D41: 'jiāo',
0x9D42: 'xiū',
0x9D43: 'zhōu',
0x9D44: 'chī',
0x9D45: 'luò',
0x9D46: 'héng',
0x9D47: 'nián',
0x9D48: 'ě',
0x9D49: 'luán',
0x9D4A: 'jiá',
0x9D4B: 'jì',
0x9D4C: 'tú',
0x9D4D: 'huān',
0x9D4E: 'tuǒ',
0x9D4F: 'bū',
0x9D50: 'wú',
0x9D51: 'jiān',
0x9D52: 'yù',
0x9D53: 'bó',
0x9D54: 'jùn',
0x9D55: 'jùn',
0x9D56: 'bī',
0x9D57: 'xī',
0x9D58: 'jùn',
0x9D59: 'jú',
0x9D5A: 'tū',
0x9D5B: 'jìng',
0x9D5C: 'tí',
0x9D5D: 'é',
0x9D5E: 'é',
0x9D5F: 'kuáng',
0x9D60: 'hú',
0x9D61: 'wǔ',
0x9D62: 'shēn',
0x9D63: 'lài',
0x9D64: 'zān',
0x9D65: 'pàn',
0x9D66: 'lù',
0x9D67: 'pí',
0x9D68: 'shū',
0x9D69: 'fú',
0x9D6A: 'ān',
0x9D6B: 'zhuó',
0x9D6C: 'péng',
0x9D6D: 'qín',
0x9D6E: 'qiān',
0x9D6F: 'bēi',
0x9D70: 'diāo',
0x9D71: 'lù',
0x9D72: 'què',
0x9D73: 'jiān',
0x9D74: 'jú',
0x9D75: 'tù',
0x9D76: 'yā',
0x9D77: 'yuān',
0x9D78: 'qí',
0x9D79: 'lí',
0x9D7A: 'yè',
0x9D7B: 'zhuī',
0x9D7C: 'kōng',
0x9D7D: 'duò',
0x9D7E: 'kūn',
0x9D7F: 'shēng',
0x9D80: 'qí',
0x9D81: 'jīng',
0x9D82: 'yì',
0x9D83: 'yì',
0x9D84: 'jīng',
0x9D85: 'zī',
0x9D86: 'lái',
0x9D87: 'dōng',
0x9D88: 'qī',
0x9D89: 'chún',
0x9D8A: 'gēng',
0x9D8B: 'jū',
0x9D8C: 'qū',
0x9D8D: 'yì',
0x9D8E: 'zūn',
0x9D8F: 'jī',
0x9D90: 'shù',
0x9D91: 'yīng',
0x9D92: 'chì',
0x9D93: 'miáo',
0x9D94: 'róu',
0x9D95: 'ān',
0x9D96: 'qiū',
0x9D97: 'tí,chí',
0x9D98: 'hú',
0x9D99: 'tí,chí',
0x9D9A: 'è',
0x9D9B: 'jiē',
0x9D9C: 'máo',
0x9D9D: 'fú,bì',
0x9D9E: 'chūn',
0x9D9F: 'tú',
0x9DA0: 'yǎn',
0x9DA1: 'hé,jiè',
0x9DA2: 'yuán',
0x9DA3: 'piān,biǎn',
0x9DA4: 'kūn',
0x9DA5: 'méi',
0x9DA6: 'hú',
0x9DA7: 'yīng',
0x9DA8: 'chuàn,zhì',
0x9DA9: 'wù',
0x9DAA: 'jú',
0x9DAB: 'dōng',
0x9DAC: 'cāng,qiāng',
0x9DAD: 'fǎng',
0x9DAE: 'hè,hú',
0x9DAF: 'yīng',
0x9DB0: 'yuán',
0x9DB1: 'xiān',
0x9DB2: 'wēng',
0x9DB3: 'shī',
0x9DB4: 'hè',
0x9DB5: 'chú',
0x9DB6: 'táng',
0x9DB7: 'xiá',
0x9DB8: 'ruò',
0x9DB9: 'liú',
0x9DBA: 'jī',
0x9DBB: 'gǔ,hú',
0x9DBC: 'jiān',
0x9DBD: 'sǔn,xùn',
0x9DBE: 'hàn',
0x9DBF: 'cí',
0x9DC0: 'cí',
0x9DC1: 'yì',
0x9DC2: 'yào',
0x9DC3: 'yàn',
0x9DC4: 'jī',
0x9DC5: 'lì',
0x9DC6: 'tián',
0x9DC7: 'kòu',
0x9DC8: 'tī',
0x9DC9: 'tī',
0x9DCA: 'yì',
0x9DCB: 'tú',
0x9DCC: 'mǎ',
0x9DCD: 'xiāo',
0x9DCE: 'gāo',
0x9DCF: 'tián',
0x9DD0: 'chén',
0x9DD1: 'jì',
0x9DD2: 'tuán',
0x9DD3: 'zhè',
0x9DD4: 'áo',
0x9DD5: 'yǎo',
0x9DD6: 'yī',
0x9DD7: 'ōu',
0x9DD8: 'chì',
0x9DD9: 'zhì',
0x9DDA: 'liù',
0x9DDB: 'yōng',
0x9DDC: 'lóu,lǚ',
0x9DDD: 'bì',
0x9DDE: 'shuāng',
0x9DDF: 'zhuó',
0x9DE0: 'yú',
0x9DE1: 'wú',
0x9DE2: 'jué',
0x9DE3: 'yín',
0x9DE4: 'tí',
0x9DE5: 'sī',
0x9DE6: 'jiāo',
0x9DE7: 'yì',
0x9DE8: 'huá',
0x9DE9: 'bì',
0x9DEA: 'yīng',
0x9DEB: 'sù',
0x9DEC: 'huáng',
0x9DED: 'fán',
0x9DEE: 'jiāo',
0x9DEF: 'liáo',
0x9DF0: 'yàn',
0x9DF1: 'gāo',
0x9DF2: 'jiù',
0x9DF3: 'xián',
0x9DF4: 'xián',
0x9DF5: 'tú',
0x9DF6: 'mǎi',
0x9DF7: 'zūn',
0x9DF8: 'yù',
0x9DF9: 'yīng',
0x9DFA: 'lù',
0x9DFB: 'tuán',
0x9DFC: 'xián',
0x9DFD: 'xué',
0x9DFE: 'yì',
0x9DFF: 'pì',
0x9E00: 'zhǔ',
0x9E01: 'luó',
0x9E02: 'xī',
0x9E03: 'yì',
0x9E04: 'jī',
0x9E05: 'zé',
0x9E06: 'yú',
0x9E07: 'zhān',
0x9E08: 'yè',
0x9E09: 'yáng',
0x9E0A: 'pì',
0x9E0B: 'níng',
0x9E0C: 'hù',
0x9E0D: 'mí',
0x9E0E: 'yīng',
0x9E0F: 'méng',
0x9E10: 'dí',
0x9E11: 'yuè',
0x9E12: 'yù',
0x9E13: 'lěi',
0x9E14: 'bǔ',
0x9E15: 'lú',
0x9E16: 'hè',
0x9E17: 'lóng',
0x9E18: 'shuāng',
0x9E19: 'yuè',
0x9E1A: 'yīng',
0x9E1B: 'guàn',
0x9E1C: 'qú',
0x9E1D: 'lí',
0x9E1E: 'luán',
0x9E1F: 'niǎo,diǎo',
0x9E20: 'jiū',
0x9E21: 'jī',
0x9E22: 'yuān',
0x9E23: 'míng',
0x9E24: 'shī',
0x9E25: 'ōu',
0x9E26: 'yā',
0x9E27: 'cāng',
0x9E28: 'bǎo',
0x9E29: 'zhèn',
0x9E2A: 'gū',
0x9E2B: 'dōng',
0x9E2C: 'lú',
0x9E2D: 'yā',
0x9E2E: 'xiāo',
0x9E2F: 'yāng',
0x9E30: 'líng',
0x9E31: 'chī',
0x9E32: 'qú',
0x9E33: 'yuān',
0x9E34: 'xué',
0x9E35: 'tuó',
0x9E36: 'sī',
0x9E37: 'zhì',
0x9E38: 'ér',
0x9E39: 'guā',
0x9E3A: 'xiū',
0x9E3B: 'héng',
0x9E3C: 'zhōu',
0x9E3D: 'gē',
0x9E3E: 'luán',
0x9E3F: 'hóng',
0x9E40: 'wú',
0x9E41: 'bó',
0x9E42: 'lí',
0x9E43: 'juān',
0x9E44: 'hú,gǔ,hè',
0x9E45: 'é',
0x9E46: 'yù',
0x9E47: 'xián',
0x9E48: 'tí',
0x9E49: 'wǔ',
0x9E4A: 'què',
0x9E4B: 'miáo',
0x9E4C: 'ān',
0x9E4D: 'kūn',
0x9E4E: 'bēi',
0x9E4F: 'péng',
0x9E50: 'qiān',
0x9E51: 'chún',
0x9E52: 'gēng',
0x9E53: 'yuān',
0x9E54: 'sù',
0x9E55: 'hú',
0x9E56: 'hé',
0x9E57: 'è',
0x9E58: 'gǔ,hú',
0x9E59: 'qiū',
0x9E5A: 'cí',
0x9E5B: 'méi',
0x9E5C: 'wù',
0x9E5D: 'yì',
0x9E5E: 'yào',
0x9E5F: 'wēng',
0x9E60: 'liú',
0x9E61: 'jī',
0x9E62: 'yì',
0x9E63: 'jiān',
0x9E64: 'hè',
0x9E65: 'yī',
0x9E66: 'yīng',
0x9E67: 'zhè',
0x9E68: 'liù',
0x9E69: 'liáo',
0x9E6A: 'jiāo',
0x9E6B: 'jiù',
0x9E6C: 'yù',
0x9E6D: 'lù',
0x9E6E: 'huán',
0x9E6F: 'zhān',
0x9E70: 'yīng',
0x9E71: 'hù',
0x9E72: 'méng',
0x9E73: 'guàn',
0x9E74: 'shuāng',
0x9E75: 'lǔ',
0x9E76: 'jīn',
0x9E77: 'líng',
0x9E78: 'jiǎn',
0x9E79: 'xián',
0x9E7A: 'cuó',
0x9E7B: 'jiǎn',
0x9E7C: 'jiǎn',
0x9E7D: 'yán',
0x9E7E: 'cuó',
0x9E7F: 'lù',
0x9E80: 'yōu',
0x9E81: 'cū',
0x9E82: 'jǐ',
0x9E83: 'páo,biāo',
0x9E84: 'cū',
0x9E85: 'páo',
0x9E86: 'zhù,cū',
0x9E87: 'jūn,qún',
0x9E88: 'zhǔ',
0x9E89: 'jiān',
0x9E8A: 'mí',
0x9E8B: 'mí',
0x9E8C: 'yǔ',
0x9E8D: 'liú',
0x9E8E: 'chén',
0x9E8F: 'jūn',
0x9E90: 'lín',
0x9E91: 'ní',
0x9E92: 'qí',
0x9E93: 'lù',
0x9E94: 'jiù',
0x9E95: 'jūn',
0x9E96: 'jīng',
0x9E97: 'lí,lì',
0x9E98: 'xiāng',
0x9E99: 'xián',
0x9E9A: 'jiā',
0x9E9B: 'mí',
0x9E9C: 'lì',
0x9E9D: 'shè',
0x9E9E: 'zhāng',
0x9E9F: 'lín',
0x9EA0: 'jīng',
0x9EA1: 'qí',
0x9EA2: 'líng',
0x9EA3: 'yán',
0x9EA4: 'cū',
0x9EA5: 'mài',
0x9EA6: 'mài',
0x9EA7: 'hé',
0x9EA8: 'chǎo',
0x9EA9: 'fū',
0x9EAA: 'miàn',
0x9EAB: 'miàn',
0x9EAC: 'fū',
0x9EAD: 'pào',
0x9EAE: 'qù',
0x9EAF: 'qū',
0x9EB0: 'móu',
0x9EB1: 'fū',
0x9EB2: 'xiàn',
0x9EB3: 'lái',
0x9EB4: 'qū',
0x9EB5: 'miàn',
0x9EB6: 'chi',
0x9EB7: 'fēng',
0x9EB8: 'fū',
0x9EB9: 'qū',
0x9EBA: 'miàn',
0x9EBB: 'má',
0x9EBC: 'mó,me',
0x9EBD: 'mó,me,ma',
0x9EBE: 'huī',
0x9EBF: 'mí',
0x9EC0: 'zōu',
0x9EC1: 'nún',
0x9EC2: 'fén',
0x9EC3: 'huáng',
0x9EC4: 'huáng',
0x9EC5: 'jīn',
0x9EC6: 'guāng',
0x9EC7: 'tiān',
0x9EC8: 'tǒu',
0x9EC9: 'hóng',
0x9ECA: 'huà',
0x9ECB: 'kuàng',
0x9ECC: 'hóng',
0x9ECD: 'shǔ',
0x9ECE: 'lí',
0x9ECF: 'nián',
0x9ED0: 'chī',
0x9ED1: 'hēi',
0x9ED2: 'hēi',
0x9ED3: 'yì',
0x9ED4: 'qián',
0x9ED5: 'dǎn',
0x9ED6: 'xì',
0x9ED7: 'tún',
0x9ED8: 'mò',
0x9ED9: 'mò',
0x9EDA: 'qián',
0x9EDB: 'dài',
0x9EDC: 'chù',
0x9EDD: 'yǒu',
0x9EDE: 'diǎn',
0x9EDF: 'yī',
0x9EE0: 'xiá',
0x9EE1: 'yǎn',
0x9EE2: 'qū',
0x9EE3: 'měi',
0x9EE4: 'yǎn',
0x9EE5: 'qíng',
0x9EE6: 'yuè',
0x9EE7: 'lí',
0x9EE8: 'dǎng',
0x9EE9: 'dú',
0x9EEA: 'cǎn',
0x9EEB: 'yān',
0x9EEC: 'yǎn',
0x9EED: 'yǎn',
0x9EEE: 'dàn,shèn',
0x9EEF: 'àn',
0x9EF0: 'zhěn,yān',
0x9EF1: 'dài',
0x9EF2: 'cǎn',
0x9EF3: 'yī',
0x9EF4: 'méi',
0x9EF5: 'dǎn,zhǎn',
0x9EF6: 'yǎn',
0x9EF7: 'dú',
0x9EF8: 'lú',
0x9EF9: 'zhǐ',
0x9EFA: 'fěn',
0x9EFB: 'fú',
0x9EFC: 'fǔ',
0x9EFD: 'mǐn,miǎn,měng',
0x9EFE: 'mǐn,miǎn,měng',
0x9EFF: 'yuán',
0x9F00: 'cù',
0x9F01: 'qù',
0x9F02: 'cháo',
0x9F03: 'wā',
0x9F04: 'zhū',
0x9F05: 'zhī',
0x9F06: 'měng',
0x9F07: 'áo',
0x9F08: 'biē',
0x9F09: 'tuó',
0x9F0A: 'bì',
0x9F0B: 'yuán',
0x9F0C: 'cháo,zhāo',
0x9F0D: 'tuó',
0x9F0E: 'dǐng',
0x9F0F: 'mì',
0x9F10: 'nài',
0x9F11: 'dǐng',
0x9F12: 'zī',
0x9F13: 'gǔ',
0x9F14: 'gǔ',
0x9F15: 'dōng',
0x9F16: 'fén',
0x9F17: 'táo',
0x9F18: 'yuān',
0x9F19: 'pí',
0x9F1A: 'chāng',
0x9F1B: 'gāo',
0x9F1C: 'cào',
0x9F1D: 'yuān',
0x9F1E: 'tāng',
0x9F1F: 'tēng',
0x9F20: 'shǔ',
0x9F21: 'shǔ',
0x9F22: 'fén',
0x9F23: 'fèi',
0x9F24: 'wén',
0x9F25: 'bá',
0x9F26: 'diāo',
0x9F27: 'tuó',
0x9F28: 'zhōng',
0x9F29: 'qú',
0x9F2A: 'shēng',
0x9F2B: 'shí',
0x9F2C: 'yòu',
0x9F2D: 'shí',
0x9F2E: 'tíng',
0x9F2F: 'wú',
0x9F30: 'jú',
0x9F31: 'jīng',
0x9F32: 'hún',
0x9F33: 'jú',
0x9F34: 'yǎn',
0x9F35: 'tū',
0x9F36: 'sī',
0x9F37: 'xī',
0x9F38: 'xiàn',
0x9F39: 'yǎn',
0x9F3A: 'léi',
0x9F3B: 'bí',
0x9F3C: 'yào',
0x9F3D: 'qiú',
0x9F3E: 'hān',
0x9F3F: 'wù',
0x9F40: 'wù',
0x9F41: 'hōu',
0x9F42: 'xiè',
0x9F43: 'è',
0x9F44: 'zhā',
0x9F45: 'xiù',
0x9F46: 'wèng',
0x9F47: 'zhā',
0x9F48: 'nòng',
0x9F49: 'nàng',
0x9F4A: 'qí,zhāi',
0x9F4B: 'zhāi',
0x9F4C: 'jì',
0x9F4D: 'zī',
0x9F4E: 'jí',
0x9F4F: 'jī',
0x9F50: 'qí,jì,zī,zhāi',
0x9F51: 'jī',
0x9F52: 'chǐ',
0x9F53: 'chèn',
0x9F54: 'chèn',
0x9F55: 'hé',
0x9F56: 'yá',
0x9F57: 'yīn',
0x9F58: 'xiè',
0x9F59: 'bāo',
0x9F5A: 'zé',
0x9F5B: 'xiè',
0x9F5C: 'zī',
0x9F5D: 'chī',
0x9F5E: 'yàn',
0x9F5F: 'jǔ',
0x9F60: 'tiáo',
0x9F61: 'líng',
0x9F62: 'líng',
0x9F63: 'chū',
0x9F64: 'quán',
0x9F65: 'xiè',
0x9F66: 'yín',
0x9F67: 'niè',
0x9F68: 'jiù',
0x9F69: 'yǎo',
0x9F6A: 'chuò',
0x9F6B: 'yǔn',
0x9F6C: 'yǔ',
0x9F6D: 'chǔ',
0x9F6E: 'yǐ',
0x9F6F: 'ní',
0x9F70: 'zé',
0x9F71: 'zōu',
0x9F72: 'qǔ',
0x9F73: 'yǔn',
0x9F74: 'yǎn',
0x9F75: 'yú',
0x9F76: 'è',
0x9F77: 'wò',
0x9F78: 'yì',
0x9F79: 'cī',
0x9F7A: 'zōu',
0x9F7B: 'diān',
0x9F7C: 'chǔ',
0x9F7D: 'jìn',
0x9F7E: 'yà',
0x9F7F: 'chǐ',
0x9F80: 'chèn',
0x9F81: 'hé',
0x9F82: 'yín,kěn',
0x9F83: 'jǔ',
0x9F84: 'líng',
0x9F85: 'bāo',
0x9F86: 'tiáo',
0x9F87: 'zī',
0x9F88: 'yín,kěn',
0x9F89: 'yǔ',
0x9F8A: 'chuò',
0x9F8B: 'qǔ',
0x9F8C: 'wò',
0x9F8D: 'lóng,lǒng',
0x9F8E: 'páng',
0x9F8F: 'gōng,wò',
0x9F90: 'páng',
0x9F91: 'yǎn',
0x9F92: 'lóng',
0x9F93: 'lóng,lǒng',
0x9F94: 'gōng',
0x9F95: 'kān',
0x9F96: 'dá',
0x9F97: 'líng',
0x9F98: 'dá',
0x9F99: 'lóng',
0x9F9A: 'gōng',
0x9F9B: 'kān',
0x9F9C: 'guī,jūn,qiū',
0x9F9D: 'qiū',
0x9F9E: 'biē',
0x9F9F: 'guī,jūn,qiū',
0x9FA0: 'yuè',
0x9FA1: 'chuī',
0x9FA2: 'hé',
0x9FA3: 'jiǎo',
0x9FA4: 'xié',
0x9FA5: 'yù',
0x9FA6: 'cháng',
0x9FA7: | |
alert.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence[str]]:
"""
Tags of the alert.
"""
return pulumi.get(self, "tags")
@pulumi.output_type
class ServiceIncidentRuleIncidentRuleIncidentPropertyStakeholderProperty(dict):
def __init__(__self__, *,
message: str,
description: Optional[str] = None,
enable: Optional[bool] = None):
"""
:param str message: Message that is to be passed to audience that is generally used to provide a content information about the alert.
:param str description: Description that is generally used to provide a detailed information about the alert.
:param bool enable: Option to enable stakeholder notifications.Default value is true.
"""
pulumi.set(__self__, "message", message)
if description is not None:
pulumi.set(__self__, "description", description)
if enable is not None:
pulumi.set(__self__, "enable", enable)
@property
@pulumi.getter
def message(self) -> str:
"""
Message that is to be passed to audience that is generally used to provide a content information about the alert.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description that is generally used to provide a detailed information about the alert.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enable(self) -> Optional[bool]:
"""
Option to enable stakeholder notifications.Default value is true.
"""
return pulumi.get(self, "enable")
@pulumi.output_type
class TeamMember(dict):
def __init__(__self__, *,
id: str,
role: Optional[str] = None):
"""
:param str id: The UUID for the member to add to this Team.
:param str role: The role for the user within the Team - can be either `admin` or `user`. Default: `user`.
"""
pulumi.set(__self__, "id", id)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def id(self) -> str:
"""
The UUID for the member to add to this Team.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def role(self) -> Optional[str]:
"""
The role for the user within the Team - can be either `admin` or `user`. Default: `user`.
"""
return pulumi.get(self, "role")
@pulumi.output_type
class TeamRoutingRuleCriteria(dict):
def __init__(__self__, *,
type: str,
conditions: Optional[Sequence['outputs.TeamRoutingRuleCriteriaCondition']] = None):
"""
:param str type: Type of the operation will be applied on conditions. Should be one of `match-all`, `match-any-condition` or `match-all-conditions`.
:param Sequence['TeamRoutingRuleCriteriaConditionArgs'] conditions: List of conditions will be checked before applying team routing rule. This field declaration should be omitted if the criteria type is set to match-all.
"""
pulumi.set(__self__, "type", type)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the operation will be applied on conditions. Should be one of `match-all`, `match-any-condition` or `match-all-conditions`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.TeamRoutingRuleCriteriaCondition']]:
"""
List of conditions will be checked before applying team routing rule. This field declaration should be omitted if the criteria type is set to match-all.
"""
return pulumi.get(self, "conditions")
@pulumi.output_type
class TeamRoutingRuleCriteriaCondition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "expectedValue":
suggest = "expected_value"
elif key == "not":
suggest = "not_"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TeamRoutingRuleCriteriaCondition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TeamRoutingRuleCriteriaCondition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TeamRoutingRuleCriteriaCondition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
field: str,
operation: str,
expected_value: Optional[str] = None,
key: Optional[str] = None,
not_: Optional[bool] = None,
order: Optional[int] = None):
"""
:param str field: Specifies which alert field will be used in condition. Possible values are `message`, `alias`, `description`, `source`, `entity`, `tags`, `actions`, `extra-properties`, `recipients`, `teams` or `priority`.
:param str operation: It is the operation that will be executed for the given field and key. Possible operations are `matches`, `contains`, `starts-with`, `ends-with`, `equals`, `contains-key`, `contains-value`, `greater-than`, `less-than`, `is-empty` and `equals-ignore-whitespace`.
:param str key: If field is set as extra-properties, key could be used for key-value pair.
:param bool not_: Indicates behaviour of the given operation. Default value is false.
:param int order: Order of the condition in conditions list.
"""
pulumi.set(__self__, "field", field)
pulumi.set(__self__, "operation", operation)
if expected_value is not None:
pulumi.set(__self__, "expected_value", expected_value)
if key is not None:
pulumi.set(__self__, "key", key)
if not_ is not None:
pulumi.set(__self__, "not_", not_)
if order is not None:
pulumi.set(__self__, "order", order)
@property
@pulumi.getter
def field(self) -> str:
"""
Specifies which alert field will be used in condition. Possible values are `message`, `alias`, `description`, `source`, `entity`, `tags`, `actions`, `extra-properties`, `recipients`, `teams` or `priority`.
"""
return pulumi.get(self, "field")
@property
@pulumi.getter
def operation(self) -> str:
"""
It is the operation that will be executed for the given field and key. Possible operations are `matches`, `contains`, `starts-with`, `ends-with`, `equals`, `contains-key`, `contains-value`, `greater-than`, `less-than`, `is-empty` and `equals-ignore-whitespace`.
"""
return pulumi.get(self, "operation")
@property
@pulumi.getter(name="expectedValue")
def expected_value(self) -> Optional[str]:
return pulumi.get(self, "expected_value")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
If field is set as extra-properties, key could be used for key-value pair.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="not")
def not_(self) -> Optional[bool]:
"""
Indicates behaviour of the given operation. Default value is false.
"""
return pulumi.get(self, "not_")
@property
@pulumi.getter
def order(self) -> Optional[int]:
"""
Order of the condition in conditions list.
"""
return pulumi.get(self, "order")
@pulumi.output_type
class TeamRoutingRuleNotify(dict):
def __init__(__self__, *,
type: str,
id: Optional[str] = None,
name: Optional[str] = None):
"""
:param str id: The ID of the Opsgenie Team Routing Rule.
:param str name: Name of the team routing rule
"""
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the Opsgenie Team Routing Rule.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the team routing rule
"""
return pulumi.get(self, "name")
@pulumi.output_type
class TeamRoutingRuleTimeRestriction(dict):
def __init__(__self__, *,
type: str,
restrictions: Optional[Sequence['outputs.TeamRoutingRuleTimeRestrictionRestriction']] = None):
pulumi.set(__self__, "type", type)
if restrictions is not None:
pulumi.set(__self__, "restrictions", restrictions)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def restrictions(self) -> Optional[Sequence['outputs.TeamRoutingRuleTimeRestrictionRestriction']]:
return pulumi.get(self, "restrictions")
@pulumi.output_type
class TeamRoutingRuleTimeRestrictionRestriction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endDay":
suggest = "end_day"
elif key == "endHour":
suggest = "end_hour"
elif key == "endMin":
suggest = "end_min"
elif key == "startDay":
suggest = "start_day"
elif key == "startHour":
suggest = "start_hour"
elif key == "startMin":
suggest = "start_min"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TeamRoutingRuleTimeRestrictionRestriction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TeamRoutingRuleTimeRestrictionRestriction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TeamRoutingRuleTimeRestrictionRestriction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
end_day: str,
end_hour: int,
end_min: int,
start_day: str,
start_hour: int,
start_min: int):
pulumi.set(__self__, "end_day", end_day)
pulumi.set(__self__, "end_hour", end_hour)
pulumi.set(__self__, "end_min", end_min)
pulumi.set(__self__, "start_day", start_day)
pulumi.set(__self__, "start_hour", start_hour)
pulumi.set(__self__, "start_min", start_min)
@property
@pulumi.getter(name="endDay")
def end_day(self) -> str:
return pulumi.get(self, "end_day")
@property
@pulumi.getter(name="endHour")
def end_hour(self) -> int:
return pulumi.get(self, "end_hour")
@property
@pulumi.getter(name="endMin")
def end_min(self) -> int:
return pulumi.get(self, "end_min")
@property
@pulumi.getter(name="startDay")
def start_day(self) -> str:
return pulumi.get(self, "start_day")
@property
@pulumi.getter(name="startHour")
def start_hour(self) -> int:
return pulumi.get(self, "start_hour")
@property
@pulumi.getter(name="startMin")
def start_min(self) -> int:
return pulumi.get(self, "start_min")
@pulumi.output_type
class UserUserAddress(dict):
def __init__(__self__, *,
city: str,
country: str,
line: str,
state: str,
zipcode: str):
pulumi.set(__self__, "city", city)
pulumi.set(__self__, "country", country)
pulumi.set(__self__, "line", line)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "zipcode", zipcode)
@property
@pulumi.getter
def city(self) -> str:
return pulumi.get(self, "city")
@property
@pulumi.getter
def country(self) -> str:
return pulumi.get(self, "country")
@property
@pulumi.getter
def line(self) -> str:
return pulumi.get(self, "line")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter
def zipcode(self) -> str:
return pulumi.get(self, "zipcode")
@pulumi.output_type
class GetEscalationRepeatResult(dict):
def __init__(__self__, *,
close_alert_after_all: Optional[bool] = None,
count: Optional[int] = None,
reset_recipient_states: Optional[bool] = None,
wait_interval: Optional[int] = None):
if close_alert_after_all is not None:
pulumi.set(__self__, "close_alert_after_all", close_alert_after_all)
if count is not None:
pulumi.set(__self__, "count", count)
if reset_recipient_states is not None:
pulumi.set(__self__, "reset_recipient_states", reset_recipient_states)
if wait_interval is not None:
pulumi.set(__self__, "wait_interval", wait_interval)
@property
@pulumi.getter(name="closeAlertAfterAll")
def close_alert_after_all(self) -> Optional[bool]:
return pulumi.get(self, "close_alert_after_all")
@property
@pulumi.getter
def count(self) -> Optional[int]:
return pulumi.get(self, "count")
@property
@pulumi.getter(name="resetRecipientStates")
def reset_recipient_states(self) -> Optional[bool]:
return pulumi.get(self, "reset_recipient_states")
@property
@pulumi.getter(name="waitInterval")
def wait_interval(self) -> Optional[int]:
return pulumi.get(self, "wait_interval")
@pulumi.output_type
class GetEscalationRuleResult(dict):
def __init__(__self__, *,
condition: str,
delay: int,
notify_type: str,
recipients: Sequence['outputs.GetEscalationRuleRecipientResult']):
pulumi.set(__self__, "condition", condition)
| |
iris.analysis.MEAN)
CCCmaSMHI_50_S = CCCmaSMHI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CNRM_50_S = CNRM_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CNRMSMHI_50_S = CNRMSMHI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CSIRO_50_S = CSIRO_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECDMI_50_S = ICHECDMI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECCCLM_50_S = ICHECCCLM_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECKNMI_50_S = ICHECKNMI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECMPI_50_S = ICHECMPI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECSMHI_50_S = ICHECSMHI_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CCCmaCanRCM85_50_S = CCCmaCanRCM85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CCCmaSMHI85_50_S = CCCmaSMHI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CNRM85_50_S = CNRM85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CNRMSMHI85_50_S = CNRMSMHI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CSIRO85_50_S = CSIRO85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECDMI85_50_S = ICHECDMI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECCCLM85_50_S = ICHECCCLM85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECKNMI85_50_S = ICHECKNMI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECMPI85_50_S = ICHECMPI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
ICHECSMHI85_50_S = ICHECSMHI85_50_S.aggregated_by('day_of_year', iris.analysis.MEAN)
CRU_S = CRU_S.aggregated_by('day_of_year', iris.analysis.MEAN)
#Returns an array of area weights, with the same dimensions as the cube
CCCmaCanRCM_past_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaCanRCM_past_S)
CCCmaSMHI_past_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaSMHI_past_S)
CNRM_past_S_grid_areas = iris.analysis.cartography.area_weights(CNRM_past_S)
CNRMSMHI_past_S_grid_areas = iris.analysis.cartography.area_weights(CNRMSMHI_past_S)
CSIRO_past_S_grid_areas = iris.analysis.cartography.area_weights(CSIRO_past_S)
ICHECDMI_past_S_grid_areas = iris.analysis.cartography.area_weights(ICHECDMI_past_S)
ICHECCCLM_past_S_grid_areas = iris.analysis.cartography.area_weights(ICHECCCLM_past_S)
ICHECKNMI_past_S_grid_areas = iris.analysis.cartography.area_weights(ICHECKNMI_past_S)
ICHECMPI_past_S_grid_areas = iris.analysis.cartography.area_weights(ICHECMPI_past_S)
ICHECSMHI_past_S_grid_areas = iris.analysis.cartography.area_weights(ICHECSMHI_past_S)
CCCmaCanRCM_30_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaCanRCM_30_S)
CCCmaSMHI_30_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaSMHI_30_S)
CNRM_30_S_grid_areas = iris.analysis.cartography.area_weights(CNRM_30_S)
CNRMSMHI_30_S_grid_areas = iris.analysis.cartography.area_weights(CNRMSMHI_30_S)
CSIRO_30_S_grid_areas = iris.analysis.cartography.area_weights(CSIRO_30_S)
ICHECDMI_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECDMI_30_S)
ICHECCCLM_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECCCLM_30_S)
ICHECKNMI_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECKNMI_30_S)
ICHECMPI_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECMPI_30_S)
ICHECSMHI_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECSMHI_30_S)
CCCmaCanRCM85_30_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaCanRCM85_30_S)
CCCmaSMHI85_30_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaSMHI85_30_S)
CNRM85_30_S_grid_areas = iris.analysis.cartography.area_weights(CNRM85_30_S)
CNRMSMHI85_30_S_grid_areas = iris.analysis.cartography.area_weights(CNRMSMHI85_30_S)
CSIRO85_30_S_grid_areas = iris.analysis.cartography.area_weights(CSIRO85_30_S)
ICHECDMI85_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECDMI85_30_S)
ICHECCCLM85_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECCCLM85_30_S)
ICHECKNMI85_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECKNMI85_30_S)
ICHECMPI85_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECMPI85_30_S)
ICHECSMHI85_30_S_grid_areas = iris.analysis.cartography.area_weights(ICHECSMHI85_30_S)
CCCmaCanRCM_50_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaCanRCM_50_S)
CCCmaSMHI_50_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaSMHI_50_S)
CNRM_50_S_grid_areas = iris.analysis.cartography.area_weights(CNRM_50_S)
CNRMSMHI_50_S_grid_areas = iris.analysis.cartography.area_weights(CNRMSMHI_50_S)
CSIRO_50_S_grid_areas = iris.analysis.cartography.area_weights(CSIRO_50_S)
ICHECDMI_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECDMI_50_S)
ICHECCCLM_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECCCLM_50_S)
ICHECKNMI_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECKNMI_50_S)
ICHECMPI_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECMPI_50_S)
ICHECSMHI_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECSMHI_50_S)
CCCmaCanRCM85_50_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaCanRCM85_50_S)
CCCmaSMHI85_50_S_grid_areas = iris.analysis.cartography.area_weights(CCCmaSMHI85_50_S)
CNRM85_50_S_grid_areas = iris.analysis.cartography.area_weights(CNRM85_50_S)
CNRMSMHI85_50_S_grid_areas = iris.analysis.cartography.area_weights(CNRMSMHI85_50_S)
CSIRO85_50_S_grid_areas = iris.analysis.cartography.area_weights(CSIRO85_50_S)
ICHECDMI85_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECDMI85_50_S)
ICHECCCLM85_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECCCLM85_50_S)
ICHECKNMI85_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECKNMI85_50_S)
ICHECMPI85_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECMPI85_50_S)
ICHECSMHI85_50_S_grid_areas = iris.analysis.cartography.area_weights(ICHECSMHI85_50_S)
CRU_S_grid_areas = iris.analysis.cartography.area_weights(CRU_S)
#We want to plot the mean for the whole region so we need a mean of all the lats and lons
CCCmaCanRCM_past_S_mean = CCCmaCanRCM_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CCCmaCanRCM_past_S_grid_areas)
CCCmaSMHI_past_S_mean = CCCmaSMHI_past_S.collapsed(['latitude', 'longitude'],iris.analysis.MEAN, weights=CCCmaSMHI_past_S_grid_areas)
CNRM_past_S_mean = CNRM_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRM_past_S_grid_areas)
CNRMSMHI_past_S_mean = CNRMSMHI_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRMSMHI_past_S_grid_areas)
CSIRO_past_S_mean = CSIRO_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CSIRO_past_S_grid_areas)
ICHECDMI_past_S_mean = ICHECDMI_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECDMI_past_S_grid_areas)
ICHECCCLM_past_S_mean = ICHECCCLM_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECCCLM_past_S_grid_areas)
ICHECKNMI_past_S_mean = ICHECKNMI_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECKNMI_past_S_grid_areas)
ICHECMPI_past_S_mean = ICHECMPI_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECMPI_past_S_grid_areas)
ICHECSMHI_past_S_mean = ICHECSMHI_past_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECSMHI_past_S_grid_areas)
CCCmaCanRCM_30_S_mean = CCCmaCanRCM_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CCCmaCanRCM_30_S_grid_areas)
CCCmaSMHI_30_S_mean = CCCmaSMHI_30_S.collapsed(['latitude', 'longitude'],iris.analysis.MEAN, weights=CCCmaSMHI_30_S_grid_areas)
CNRM_30_S_mean = CNRM_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRM_30_S_grid_areas)
CNRMSMHI_30_S_mean = CNRMSMHI_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRMSMHI_30_S_grid_areas)
CSIRO_30_S_mean = CSIRO_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CSIRO_30_S_grid_areas)
ICHECDMI_30_S_mean = ICHECDMI_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECDMI_30_S_grid_areas)
ICHECCCLM_30_S_mean = ICHECCCLM_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECCCLM_30_S_grid_areas)
ICHECKNMI_30_S_mean = ICHECKNMI_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECKNMI_30_S_grid_areas)
ICHECMPI_30_S_mean = ICHECMPI_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECMPI_30_S_grid_areas)
ICHECSMHI_30_S_mean = ICHECSMHI_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECSMHI_30_S_grid_areas)
CCCmaCanRCM85_30_S_mean = CCCmaCanRCM85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CCCmaCanRCM85_30_S_grid_areas)
CCCmaSMHI85_30_S_mean = CCCmaSMHI85_30_S.collapsed(['latitude', 'longitude'],iris.analysis.MEAN, weights=CCCmaSMHI85_30_S_grid_areas)
CNRM85_30_S_mean = CNRM85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRM85_30_S_grid_areas)
CNRMSMHI85_30_S_mean = CNRMSMHI85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRMSMHI85_30_S_grid_areas)
CSIRO85_30_S_mean = CSIRO85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CSIRO85_30_S_grid_areas)
ICHECDMI85_30_S_mean = ICHECDMI85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECDMI85_30_S_grid_areas)
ICHECCCLM85_30_S_mean = ICHECCCLM85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECCCLM85_30_S_grid_areas)
ICHECKNMI85_30_S_mean = ICHECKNMI85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECKNMI85_30_S_grid_areas)
ICHECMPI85_30_S_mean = ICHECMPI85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECMPI85_30_S_grid_areas)
ICHECSMHI85_30_S_mean = ICHECSMHI85_30_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECSMHI85_30_S_grid_areas)
CCCmaCanRCM_50_S_mean = CCCmaCanRCM_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CCCmaCanRCM_50_S_grid_areas)
CCCmaSMHI_50_S_mean = CCCmaSMHI_50_S.collapsed(['latitude', 'longitude'],iris.analysis.MEAN, weights=CCCmaSMHI_50_S_grid_areas)
CNRM_50_S_mean = CNRM_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRM_50_S_grid_areas)
CNRMSMHI_50_S_mean = CNRMSMHI_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRMSMHI_50_S_grid_areas)
CSIRO_50_S_mean = CSIRO_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CSIRO_50_S_grid_areas)
ICHECDMI_50_S_mean = ICHECDMI_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECDMI_50_S_grid_areas)
ICHECCCLM_50_S_mean = ICHECCCLM_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECCCLM_50_S_grid_areas)
ICHECKNMI_50_S_mean = ICHECKNMI_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECKNMI_50_S_grid_areas)
ICHECMPI_50_S_mean = ICHECMPI_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECMPI_50_S_grid_areas)
ICHECSMHI_50_S_mean = ICHECSMHI_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECSMHI_50_S_grid_areas)
CCCmaCanRCM85_50_S_mean = CCCmaCanRCM85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CCCmaCanRCM85_50_S_grid_areas)
CCCmaSMHI85_50_S_mean = CCCmaSMHI85_50_S.collapsed(['latitude', 'longitude'],iris.analysis.MEAN, weights=CCCmaSMHI85_50_S_grid_areas)
CNRM85_50_S_mean = CNRM85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRM85_50_S_grid_areas)
CNRMSMHI85_50_S_mean = CNRMSMHI85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CNRMSMHI85_50_S_grid_areas)
CSIRO85_50_S_mean = CSIRO85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CSIRO85_50_S_grid_areas)
ICHECDMI85_50_S_mean = ICHECDMI85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECDMI85_50_S_grid_areas)
ICHECCCLM85_50_S_mean = ICHECCCLM85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECCCLM85_50_S_grid_areas)
ICHECKNMI85_50_S_mean = ICHECKNMI85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECKNMI85_50_S_grid_areas)
ICHECMPI85_50_S_mean = ICHECMPI85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECMPI85_50_S_grid_areas)
ICHECSMHI85_50_S_mean = ICHECSMHI85_50_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ICHECSMHI85_50_S_grid_areas)
CRU_S_mean = CRU_S.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=CRU_S_grid_areas)
#for the baseline we don't need to average for each year, but the average for the whole time period, so collapse by time
CCCmaCanRCM_b_S_mean = CCCmaCanRCM_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
CCCmaSMHI_b_S_mean = CCCmaSMHI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
CNRM_b_S_mean = CNRM_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
CNRMSMHI_b_S_mean = CNRMSMHI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
CSIRO_b_S_mean = CSIRO_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
ICHECDMI_b_S_mean = ICHECDMI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
ICHECCCLM_b_S_mean = ICHECCCLM_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
ICHECKNMI_b_S_mean = ICHECKNMI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
ICHECMPI_b_S_mean = ICHECMPI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
ICHECSMHI_b_S_mean = ICHECSMHI_past_S_mean.collapsed(['time'], iris.analysis.MEAN)
CRU_S_mean = CRU_S_mean.collapsed(['time'], iris.analysis.MEAN)
#create average of observed baseline data
Obs_S = (CRU_S_mean)
#We want to see the change in temperature from the baseline
CCCmaCanRCM_past_S_mean = (CCCmaCanRCM_past_S_mean.data - CCCmaCanRCM_b_S_mean.data + Obs_S.data)
CCCmaSMHI_past_S_mean = (CCCmaSMHI_past_S_mean.data - CCCmaSMHI_b_S_mean.data + Obs_S.data)
CNRM_past_S_mean = (CNRM_past_S_mean.data - CNRM_b_S_mean.data + Obs_S.data)
CNRMSMHI_past_S_mean = (CNRMSMHI_past_S_mean.data - CNRMSMHI_b_S_mean.data + Obs_S.data)
CSIRO_past_S_mean = (CSIRO_past_S_mean.data - CSIRO_b_S_mean.data + Obs_S.data)
ICHECDMI_past_S_mean = (ICHECDMI_past_S_mean.data - ICHECDMI_b_S_mean.data + Obs_S.data)
ICHECCCLM_past_S_mean = (ICHECCCLM_past_S_mean.data - ICHECCCLM_b_S_mean.data + Obs_S.data)
ICHECKNMI_past_S_mean = (ICHECKNMI_past_S_mean.data - ICHECKNMI_b_S_mean.data + Obs_S.data)
ICHECMPI_past_S_mean = (ICHECMPI_past_S_mean.data - ICHECMPI_b_S_mean.data + Obs_S.data)
ICHECSMHI_past_S_mean = (ICHECSMHI_past_S_mean.data - ICHECSMHI_b_S_mean.data + Obs_S.data)
CCCmaCanRCM_30_S_mean = (CCCmaCanRCM_30_S_mean.data - CCCmaCanRCM_b_S_mean.data + Obs_S.data)
CCCmaSMHI_30_S_mean = (CCCmaSMHI_30_S_mean.data - CCCmaSMHI_b_S_mean.data + Obs_S.data)
CNRM_30_S_mean = (CNRM_30_S_mean.data - CNRM_b_S_mean.data + Obs_S.data)
CNRMSMHI_30_S_mean = (CNRMSMHI_30_S_mean.data - CNRMSMHI_b_S_mean.data + Obs_S.data)
CSIRO_30_S_mean = (CSIRO_30_S_mean.data - CSIRO_b_S_mean.data + Obs_S.data)
ICHECDMI_30_S_mean = (ICHECDMI_30_S_mean.data - ICHECDMI_b_S_mean.data + Obs_S.data)
ICHECCCLM_30_S_mean = (ICHECCCLM_30_S_mean.data - ICHECCCLM_b_S_mean.data + Obs_S.data)
ICHECKNMI_30_S_mean = (ICHECKNMI_30_S_mean.data - ICHECKNMI_b_S_mean.data + Obs_S.data)
ICHECMPI_30_S_mean = (ICHECMPI_30_S_mean.data - ICHECMPI_b_S_mean.data + Obs_S.data)
ICHECSMHI_30_S_mean = (ICHECSMHI_30_S_mean.data - ICHECSMHI_b_S_mean.data + Obs_S.data)
CCCmaCanRCM85_30_S_mean = (CCCmaCanRCM85_30_S_mean.data - CCCmaCanRCM_b_S_mean.data + Obs_S.data)
CCCmaSMHI85_30_S_mean = (CCCmaSMHI85_30_S_mean.data - CCCmaSMHI_b_S_mean.data + Obs_S.data)
CNRM85_30_S_mean = (CNRM85_30_S_mean.data - CNRM_b_S_mean.data + Obs_S.data)
CNRMSMHI85_30_S_mean = (CNRMSMHI85_30_S_mean.data - CNRMSMHI_b_S_mean.data + Obs_S.data)
CSIRO85_30_S_mean = (CSIRO85_30_S_mean.data - CSIRO_b_S_mean.data + Obs_S.data)
ICHECDMI85_30_S_mean = (ICHECDMI85_30_S_mean.data - ICHECDMI_b_S_mean.data + Obs_S.data)
ICHECCCLM85_30_S_mean = (ICHECCCLM85_30_S_mean.data - ICHECCCLM_b_S_mean.data + Obs_S.data)
ICHECKNMI85_30_S_mean = (ICHECKNMI85_30_S_mean.data - ICHECKNMI_b_S_mean.data + Obs_S.data)
ICHECMPI85_30_S_mean = (ICHECMPI85_30_S_mean.data - ICHECMPI_b_S_mean.data + Obs_S.data)
ICHECSMHI85_30_S_mean = (ICHECSMHI85_30_S_mean.data - ICHECSMHI_b_S_mean.data + Obs_S.data)
CCCmaCanRCM_50_S_mean = (CCCmaCanRCM_50_S_mean.data - CCCmaCanRCM_b_S_mean.data + Obs_S.data)
CCCmaSMHI_50_S_mean = (CCCmaSMHI_50_S_mean.data - CCCmaSMHI_b_S_mean.data + Obs_S.data)
CNRM_50_S_mean = (CNRM_50_S_mean.data - CNRM_b_S_mean.data + Obs_S.data)
CNRMSMHI_50_S_mean = (CNRMSMHI_50_S_mean.data - CNRMSMHI_b_S_mean.data + Obs_S.data)
CSIRO_50_S_mean = (CSIRO_50_S_mean.data - CSIRO_b_S_mean.data + Obs_S.data)
ICHECDMI_50_S_mean = (ICHECDMI_50_S_mean.data - ICHECDMI_b_S_mean.data + Obs_S.data)
ICHECCCLM_50_S_mean = (ICHECCCLM_50_S_mean.data - ICHECCCLM_b_S_mean.data + Obs_S.data)
ICHECKNMI_50_S_mean = (ICHECKNMI_50_S_mean.data - ICHECKNMI_b_S_mean.data + Obs_S.data)
ICHECMPI_50_S_mean = (ICHECMPI_50_S_mean.data - ICHECMPI_b_S_mean.data + Obs_S.data)
ICHECSMHI_50_S_mean = (ICHECSMHI_50_S_mean.data - ICHECSMHI_b_S_mean.data + Obs_S.data)
CCCmaCanRCM85_50_S_mean = (CCCmaCanRCM85_50_S_mean.data - CCCmaCanRCM_b_S_mean.data + Obs_S.data)
CCCmaSMHI85_50_S_mean = (CCCmaSMHI85_50_S_mean.data - CCCmaSMHI_b_S_mean.data + Obs_S.data)
CNRM85_50_S_mean = (CNRM85_50_S_mean.data - CNRM_b_S_mean.data + Obs_S.data)
CNRMSMHI85_50_S_mean = (CNRMSMHI85_50_S_mean.data - CNRMSMHI_b_S_mean.data + Obs_S.data)
CSIRO85_50_S_mean = (CSIRO85_50_S_mean.data - CSIRO_b_S_mean.data + Obs_S.data)
ICHECDMI85_50_S_mean = (ICHECDMI85_50_S_mean.data - ICHECDMI_b_S_mean.data + Obs_S.data)
ICHECCCLM85_50_S_mean = (ICHECCCLM85_50_S_mean.data - ICHECCCLM_b_S_mean.data + Obs_S.data)
ICHECKNMI85_50_S_mean = (ICHECKNMI85_50_S_mean.data - ICHECKNMI_b_S_mean.data + Obs_S.data)
ICHECMPI85_50_S_mean = (ICHECMPI85_50_S_mean.data - ICHECMPI_b_S_mean.data + Obs_S.data)
ICHECSMHI85_50_S_mean = (ICHECSMHI85_50_S_mean.data - ICHECSMHI_b_S_mean.data + Obs_S.data)
#-------------------------------------------------------------------------
#PART 6: PRINT DATA
import csv
with open('output_AquaCrop_Data_TasmaxA.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['Parameter', 'Means'])
#PART 6A: WRITE NORTHERN DATA
writer.writerow(["CCCmaCanRCM_past_N_mean"] + CCCmaCanRCM_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI_past_N_mean"] + CCCmaSMHI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM_past_N_mean"] + CNRM_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI_past_N_mean"] +CNRMSMHI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO_past_N_mean"] +CSIRO_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI_past_N_mean"] +ICHECDMI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM_past_N_mean"] +ICHECCCLM_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI_past_N_mean"] +ICHECKNMI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI_past_N_mean"] +ICHECMPI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI_past_N_mean"] +ICHECSMHI_past_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaCanRCM_30_N_mean"] + CCCmaCanRCM_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI_30_N_mean"] + CCCmaSMHI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM_30_N_mean"] + CNRM_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI_30_N_mean"] +CNRMSMHI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO_30_N_mean"] +CSIRO_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI_30_N_mean"] +ICHECDMI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM_30_N_mean"] +ICHECCCLM_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI_30_N_mean"] +ICHECKNMI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI_30_N_mean"] +ICHECMPI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI_30_N_mean"] +ICHECSMHI_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaCanRCM85_30_N_mean"] + CCCmaCanRCM85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI85_30_N_mean"] + CCCmaSMHI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM85_30_N_mean"] + CNRM85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI85_30_N_mean"] +CNRMSMHI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO85_30_N_mean"] +CSIRO85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI85_30_N_mean"] +ICHECDMI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM85_30_N_mean"] +ICHECCCLM85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI85_30_N_mean"] +ICHECKNMI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI85_30_N_mean"] +ICHECMPI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI85_30_N_mean"] +ICHECSMHI85_30_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaCanRCM_50_N_mean"] + CCCmaCanRCM_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI_50_N_mean"] + CCCmaSMHI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM_50_N_mean"] + CNRM_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI_50_N_mean"] +CNRMSMHI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO_50_N_mean"] +CSIRO_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI_50_N_mean"] +ICHECDMI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM_50_N_mean"] +ICHECCCLM_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI_50_N_mean"] +ICHECKNMI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI_50_N_mean"] +ICHECMPI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI_50_N_mean"] +ICHECSMHI_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaCanRCM85_50_N_mean"] + CCCmaCanRCM85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI85_50_N_mean"] + CCCmaSMHI85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM85_50_N_mean"] + CNRM85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI85_50_N_mean"] +CNRMSMHI85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO85_50_N_mean"] +CSIRO85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI85_50_N_mean"] +ICHECDMI85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM85_50_N_mean"] +ICHECCCLM85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI85_50_N_mean"] +ICHECKNMI85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI85_50_N_mean"] +ICHECMPI85_50_N_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI85_50_N_mean"] +ICHECSMHI85_50_N_mean.data.flatten().astype(np.str).tolist())
#PART 6B: WRITE CENTRAL DATA
writer.writerow(["CCCmaCanRCM_past_C_mean"] + CCCmaCanRCM_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CCCmaSMHI_past_C_mean"] + CCCmaSMHI_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRM_past_C_mean"] + CNRM_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CNRMSMHI_past_C_mean"] +CNRMSMHI_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["CSIRO_past_C_mean"] +CSIRO_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECDMI_past_C_mean"] +ICHECDMI_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECCCLM_past_C_mean"] +ICHECCCLM_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECKNMI_past_C_mean"] +ICHECKNMI_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECMPI_past_C_mean"] +ICHECMPI_past_C_mean.data.flatten().astype(np.str).tolist())
writer.writerow(["ICHECSMHI_past_C_mean"] +ICHECSMHI_past_C_mean.data.flatten().astype(np.str).tolist())
| |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 <NAME> (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Parsing of Netflix Website
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import json
from re import search, compile as recompile, DOTALL, sub
from future.utils import iteritems
import xbmc
import resources.lib.common as common
from resources.lib.database.db_exceptions import ProfilesMissing
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import g
from .exceptions import (InvalidProfilesError, InvalidAuthURLError, InvalidMembershipStatusError,
WebsiteParsingError, LoginValidateError, InvalidMembershipStatusAnonymous,
LoginValidateErrorIncorrectPassword)
from .paths import jgraph_get, jgraph_get_list, jgraph_get_path
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
PAGE_ITEMS_INFO = [
'models/userInfo/data/name',
'models/userInfo/data/guid', # Main profile guid
'models/userInfo/data/userGuid', # Current profile guid
'models/userInfo/data/countryOfSignup',
'models/userInfo/data/membershipStatus',
'models/userInfo/data/isTestAccount',
'models/userInfo/data/deviceTypeId',
'models/userInfo/data/isAdultVerified',
'models/userInfo/data/isKids',
'models/userInfo/data/pinEnabled',
'models/serverDefs/data/BUILD_IDENTIFIER',
'models/esnGeneratorModel/data/esn',
'models/memberContext/data/geo/preferredLocale'
]
PAGE_ITEMS_API_URL = {
'auth_url': 'models/userInfo/data/authURL',
# 'ichnaea_log': 'models/serverDefs/data/ICHNAEA_ROOT', can be for XSS attacks?
'api_endpoint_root_url': 'models/serverDefs/data/API_ROOT',
'api_endpoint_url': 'models/playerModel/data/config/ui/initParams/apiUrl',
'request_id': 'models/serverDefs/data/requestId',
'asset_core': 'models/playerModel/data/config/core/assets/core',
'ui_version': 'models/playerModel/data/config/ui/initParams/uiVersion',
'browser_info_version': 'models/browserInfo/data/version',
'browser_info_os_name': 'models/browserInfo/data/os/name',
'browser_info_os_version': 'models/browserInfo/data/os/version',
}
PAGE_ITEM_ERROR_CODE = 'models/flow/data/fields/errorCode/value'
PAGE_ITEM_ERROR_CODE_LIST = 'models\\i18nStrings\\data\\login/login'
JSON_REGEX = r'netflix\.{}\s*=\s*(.*?);\s*</script>'
AVATAR_SUBPATH = ['images', 'byWidth', '320']
PROFILE_DEBUG_INFO = ['profileName', 'isAccountOwner', 'isActive', 'isKids', 'maturityLevel', 'language']
@common.time_execution(immediate=True)
def extract_session_data(content, validate=False, update_profiles=False):
"""
Call all the parsers we need to extract all
the session relevant data from the HTML page
"""
common.debug('Extracting session data...')
react_context = extract_json(content, 'reactContext')
if validate:
validate_login(react_context)
user_data = extract_userdata(react_context)
if user_data.get('membershipStatus') == 'ANONYMOUS':
# Possible known causes:
# -Login password has been changed
# -In the login request, 'Content-Type' specified is not compliant with data passed or no more supported
# -Expired profiles cookies!? (not verified)
# In these cases it is mandatory to login again
raise InvalidMembershipStatusAnonymous
if user_data.get('membershipStatus') != 'CURRENT_MEMBER':
# When NEVER_MEMBER it is possible that the account has not been confirmed or renewed
common.error('Can not login, the Membership status is {}',
user_data.get('membershipStatus'))
raise InvalidMembershipStatusError(user_data.get('membershipStatus'))
api_data = extract_api_data(react_context)
# Note: Falcor cache does not exist if membershipStatus is not CURRENT_MEMBER
falcor_cache = extract_json(content, 'falcorCache')
if update_profiles:
parse_profiles(falcor_cache)
# 21/05/2020 - Netflix has introduced a new paging type called "loco" similar to the old "lolomo"
# Extract loco root id
loco_root = falcor_cache['loco']['value'][1]
g.LOCAL_DB.set_value('loco_root_id', loco_root, TABLE_SESSION)
# Check if the profile session is still active
# (when a session expire in the website, the screen return automatically to the profiles page)
is_profile_session_active = 'componentSummary' in falcor_cache['locos'][loco_root]
# Extract loco root request id
if is_profile_session_active:
component_summary = falcor_cache['locos'][loco_root]['componentSummary']['value']
# Note: 18/06/2020 now the request id is the equal to reactContext models/serverDefs/data/requestId
g.LOCAL_DB.set_value('loco_root_requestid', component_summary['requestId'], TABLE_SESSION)
else:
g.LOCAL_DB.set_value('loco_root_requestid', '', TABLE_SESSION)
# Extract loco continueWatching id and index
# The following commented code was needed for update_loco_context in api_requests.py, but currently
# seem not more required to update the continueWatching list then we keep this in case of future nf changes
# -- INIT --
# cw_list_data = jgraph_get('continueWatching', falcor_cache['locos'][loco_root], falcor_cache)
# if cw_list_data:
# context_index = falcor_cache['locos'][loco_root]['continueWatching']['value'][2]
# g.LOCAL_DB.set_value('loco_continuewatching_index', context_index, TABLE_SESSION)
# g.LOCAL_DB.set_value('loco_continuewatching_id',
# jgraph_get('componentSummary', cw_list_data)['id'], TABLE_SESSION)
# elif is_profile_session_active:
# # Todo: In the new profiles, there is no 'continueWatching' context
# # How get or generate the continueWatching context?
# # NOTE: it was needed for update_loco_context in api_requests.py
# cur_profile = jgraph_get_path(['profilesList', 'current'], falcor_cache)
# common.warn('Context continueWatching not found in locos for profile guid {}.',
# jgraph_get('summary', cur_profile)['guid'])
# g.LOCAL_DB.set_value('loco_continuewatching_index', '', TABLE_SESSION)
# g.LOCAL_DB.set_value('loco_continuewatching_id', '', TABLE_SESSION)
# else:
# common.warn('Is not possible to find the context continueWatching, the profile session is no more active')
# g.LOCAL_DB.set_value('loco_continuewatching_index', '', TABLE_SESSION)
# g.LOCAL_DB.set_value('loco_continuewatching_id', '', TABLE_SESSION)
# -- END --
# Save only some info of the current profile from user data
g.LOCAL_DB.set_value('build_identifier', user_data.get('BUILD_IDENTIFIER'), TABLE_SESSION)
if not g.LOCAL_DB.get_value('esn', table=TABLE_SESSION):
g.LOCAL_DB.set_value('esn', common.generate_android_esn() or user_data['esn'], TABLE_SESSION)
g.LOCAL_DB.set_value('locale_id', user_data.get('preferredLocale').get('id', 'en-US'))
# Extract the client version from assets core
result = search(r'-([0-9\.]+)\.js$', api_data.pop('asset_core'))
if not result:
common.error('It was not possible to extract the client version!')
api_data['client_version'] = '6.0023.976.011'
else:
api_data['client_version'] = result.groups()[0]
# Save api urls
for key, path in list(api_data.items()):
g.LOCAL_DB.set_value(key, path, TABLE_SESSION)
api_data['is_profile_session_active'] = is_profile_session_active
return api_data
@common.time_execution(immediate=True)
def parse_profiles(data):
"""Parse profile information from Netflix response"""
profiles_list = jgraph_get_list('profilesList', data)
try:
if not profiles_list:
raise InvalidProfilesError('It has not been possible to obtain the list of profiles.')
sort_order = 0
current_guids = []
for index, profile_data in iteritems(profiles_list): # pylint: disable=unused-variable
summary = jgraph_get('summary', profile_data)
guid = summary['guid']
current_guids.append(guid)
common.debug('Parsing profile {}', summary['guid'])
avatar_url = _get_avatar(profile_data, data, guid)
is_active = summary.pop('isActive')
g.LOCAL_DB.set_profile(guid, is_active, sort_order)
g.SHARED_DB.set_profile(guid, sort_order)
# Add profile language description translated from locale
summary['language_desc'] = g.py2_decode(xbmc.convertLanguage(summary['language'][:2], xbmc.ENGLISH_NAME))
for key, value in iteritems(summary):
if key in PROFILE_DEBUG_INFO:
common.debug('Profile info {}', {key: value})
if key == 'profileName': # The profile name is coded as HTML
value = parse_html(value)
g.LOCAL_DB.set_profile_config(key, value, guid)
g.LOCAL_DB.set_profile_config('avatar', avatar_url, guid)
sort_order += 1
_delete_non_existing_profiles(current_guids)
except Exception:
import traceback
common.error(g.py2_decode(traceback.format_exc(), 'latin-1'))
common.error('Profile list data: {}', profiles_list)
raise InvalidProfilesError
def _delete_non_existing_profiles(current_guids):
list_guid = g.LOCAL_DB.get_guid_profiles()
for guid in list_guid:
if guid not in current_guids:
common.debug('Deleting non-existing profile {}', guid)
g.LOCAL_DB.delete_profile(guid)
g.SHARED_DB.delete_profile(guid)
# Ensures at least one active profile
try:
g.LOCAL_DB.get_active_profile_guid()
except ProfilesMissing:
g.LOCAL_DB.switch_active_profile(g.LOCAL_DB.get_guid_owner_profile())
g.settings_monitor_suspend(True)
# Verify if auto select profile exists
autoselect_profile_guid = g.LOCAL_DB.get_value('autoselect_profile_guid', '')
if autoselect_profile_guid and autoselect_profile_guid not in current_guids:
common.warn('Auto-selection disabled, the GUID {} not more exists', autoselect_profile_guid)
g.LOCAL_DB.set_value('autoselect_profile_guid', '')
g.ADDON.setSetting('autoselect_profile_name', '')
g.ADDON.setSettingBool('autoselect_profile_enabled', False)
# Verify if profile for library playback exists
library_playback_profile_guid = g.LOCAL_DB.get_value('library_playback_profile_guid')
if library_playback_profile_guid and library_playback_profile_guid not in current_guids:
common.warn('Profile set for playback from library cleared, the GUID {} not more exists',
library_playback_profile_guid)
# Save the selected profile guid
g.LOCAL_DB.set_value('library_playback_profile_guid', '')
# Save the selected profile name
g.ADDON.setSetting('library_playback_profile', '')
g.settings_monitor_suspend(False)
def _get_avatar(profile_data, data, guid):
try:
avatar = jgraph_get('avatar', profile_data, data)
return jgraph_get_path(AVATAR_SUBPATH, avatar)
except (KeyError, TypeError):
common.warn('Cannot find avatar for profile {}', guid)
common.debug('Profile list data: {}', profile_data)
return g.ICON
@common.time_execution(immediate=True)
def extract_userdata(react_context, debug_log=True):
"""Extract essential userdata from the reactContext of the webpage"""
common.debug('Extracting userdata from webpage')
user_data = {}
for path in (path.split('/') for path in PAGE_ITEMS_INFO):
try:
extracted_value = {path[-1]: common.get_path(path, react_context)}
user_data.update(extracted_value)
if 'esn' not in path and debug_log:
common.debug('Extracted {}', extracted_value)
except (AttributeError, KeyError):
common.error('Could not extract {}', path)
return user_data
def extract_api_data(react_context, debug_log=True):
"""Extract api urls from the reactContext of the webpage"""
common.debug('Extracting api urls from webpage')
api_data = {}
for key, value in list(PAGE_ITEMS_API_URL.items()):
path = value.split('/')
try:
extracted_value = {key: common.get_path(path, react_context)}
api_data.update(extracted_value)
if debug_log:
common.debug('Extracted {}', extracted_value)
except (AttributeError, KeyError):
common.error('Could not extract {}', path)
return assert_valid_auth_url(api_data)
def assert_valid_auth_url(user_data):
"""Raise an exception if user_data does not contain a valid authURL"""
if len(user_data.get('auth_url', '')) != 42:
raise InvalidAuthURLError('authURL is invalid')
return user_data
def validate_login(react_context):
path_code_list = PAGE_ITEM_ERROR_CODE_LIST.split('\\')
path_error_code = PAGE_ITEM_ERROR_CODE.split('/')
if common.check_path_exists(path_error_code, react_context):
# If the path exists, a login error occurs
try:
error_code_list = common.get_path(path_code_list, react_context)
error_code = common.get_path(path_error_code, react_context)
common.error('Login not valid, error code {}', error_code)
error_description = common.get_local_string(30102) + error_code
if error_code in error_code_list:
error_description = error_code_list[error_code]
if 'email_' + error_code in error_code_list:
error_description = error_code_list['email_' + error_code]
if 'login_' + error_code in error_code_list:
error_description = error_code_list['login_' + error_code]
if 'incorrect_password' in error_code:
raise LoginValidateErrorIncorrectPassword(common.remove_html_tags(error_description))
raise LoginValidateError(common.remove_html_tags(error_description))
except (AttributeError, KeyError):
import traceback
common.error(g.py2_decode(traceback.format_exc(), 'latin-1'))
error_msg = (
'Something is wrong in PAGE_ITEM_ERROR_CODE or PAGE_ITEM_ERROR_CODE_LIST paths.'
'react_context data may have changed.')
common.error(error_msg)
raise LoginValidateError(error_msg)
@common.time_execution(immediate=True)
def extract_json(content, name):
"""Extract json from netflix content page"""
common.debug('Extracting {} JSON', name)
json_str = None
try:
json_array = recompile(JSON_REGEX.format(name), DOTALL).findall(content.decode('utf-8'))
json_str = json_array[0]
json_str_replace = json_str.replace('\\"', '\\\\"') # Escape double-quotes
json_str_replace = json_str_replace.replace('\\s', '\\\\s') # Escape \s
json_str_replace = json_str_replace.replace('\\n', '\\\\n') # Escape line feed
json_str_replace = json_str_replace.replace('\\t', '\\\\t') # Escape tab
json_str_replace = json_str_replace.encode().decode('unicode_escape') # Decode the string as unicode
json_str_replace = sub(r'\\(?!["])', r'\\\\', json_str_replace) # Escape backslash (only when is not followed by double quotation marks \")
return json.loads(json_str_replace)
except Exception:
if json_str:
common.error('JSON string trying to load: {}', json_str)
import traceback
common.error(g.py2_decode(traceback.format_exc(), 'latin-1'))
raise WebsiteParsingError('Unable to extract {}'.format(name))
def extract_parental_control_data(content, current_maturity):
"""Extract the content of parental control data"""
try:
react_context = extract_json(content, 'reactContext')
# Extract country max maturity value
max_maturity = common.get_path(['models', 'parentalControls', 'data', 'accountProps', 'countryMaxMaturity'],
react_context)
# Extract rating levels
rc_rating_levels = common.get_path(['models', 'memberContext', 'data', 'userInfo', 'ratingLevels'],
react_context)
rating_levels = []
levels_count = len(rc_rating_levels) - 1
current_level_index = levels_count
for index, rating_level in enumerate(rc_rating_levels):
if index == levels_count:
# Last level must | |
result.
:param title: A title of the field.
:param desc: A description of the field.
:param value: A values used to make radio buttons. Values must be
sequence of pairs, such as (('Female', 1), ('Male', 2), ('Gay', 3))
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
:param vertical: A flag to determine whether buttons lies vertically.
"""
TYPE = 'radio'
FIELD_TEMPLATE = ("""%for t, v in values:\n"""
"""<%if v == value:\n"""
""" checked = 'checked'\n"""
"""else:\n"""
""" checked = ''\n"""
"""%>\n"""
"""<input type = 'radio' ${args} value = '${v}'"""
""" ${checked}>"""
"""<div class = 'multi-title'>${t}</div>\n"""
""" %if vertical:\n"""
""" <br />\n"""
""" %endif\n"""
"""%endfor""")
SELECT_ATTR = 'checked'
FLID = 'RadioFieldFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def __init__(self, name = None, enginename = '', title = '', desc = '',
values = [], args = {}, objects = [], required = False, default = '',
validator = None, generate_id = False, collapsable = False,
vertical = False):
"""
Initialization function.
"""
self.vertical = vertical
if not values:
raise ValueError("The argument 'values' must be given")
self.values = values
TextField.__init__(self, name, enginename, title, desc,
args, objects, required, default,
validator, generate_id, collapsable)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string.
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['values'] = self.values
context['value'] = value or self.default
context['vertical'] = self.vertical
return templatehandler.render(context, self.enginename, tid = self.FLID)
class CheckboxGroup(TextField):
"""
A field class representing checkbox field.
Initialization takes following arguments.
:param name: A name of the field
:param enginename: A template engine to render result.
:param title: A title of the field.
:param desc: A description of the field.
:param value: A values used to make radio buttons. Values must be
sequence of pairs, such as (('Female', 1), ('Male', 2), ('Gay', 3))
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
:param vertical: A flag to determine whether buttons lies vertically.
"""
TYPE = 'cehckbox'
REQUIRE_VALUES_ON_VALIDATE = True
FIELD_TEMPLATE = ("""%for t, v in values:\n"""
"""<%if v in value:\n"""
""" selected = 'checked'\n"""
"""else:\n"""
""" selected = ''\n"""
"""%>\n"""
"""<input type = "checkbox" ${args} value = "${v}" """
""" name = "${name}_${v}" ${selected}>"""
"""<span class = "multi-title">${t}</span>\n"""
""" %if vertical:\n"""
""" <br />\n"""
""" %endif\n"""
"""%endfor""")
SELECT_ATTR = 'checked'
FLID = 'CheckboxGroupFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def __init__(self, name = None, enginename = '', title = '', desc = '',
values = [], args = {}, objects = [], required = False, default = '',
validator = None, generate_id = False, vertical = False,
collapsable = False):
"""
Initialization function.
"""
self.vertical = vertical
if not values:
raise ValueError("The argument 'values' must be given")
self.values = values
TextField.__init__(self, name, enginename, title, desc,
args, objects, required, id, validator, generate_id,
collapsable)
def validate(self, input_value = None):
"""
A method to check validation of input value.
It returns value and error string
"""
values = []
pv = ['%s_%s' % (self.name, x[1]) for x in self.values]
for k in input_value:
if k in pv:
values.append(input_value[k])
if input_value.get(self.name, None):
values.extend(input_value[self.name])
if not self.validator:
return ((self.name, values, None), )
try:
v_v = []
for ov in values:
v = self.validator
if isinstance(v, (list, tuple)):
iv = ov
for i in self.validator:
iv = i.to_python(iv)
value = iv
else:
value = v.to_python(ov)
v_v.append(value)
except formencode.Invalid, e:
return ((self.name, None, e), )
return ((self.name, v_v, None), )
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True, except_name = True)
context['values'] = [(x, unicode(y)) for x, y in self.values]
if value:
context['value'] = [unicode(x) for x in value]
else:
context['value'] = []
context['name'] = self.name
context['vertical'] = self.vertical
return templatehandler.render(context, self.enginename, tid = self.FLID)
class SelectField(RadioField):
"""
A field class representing select field.
"""
SELECT_TEMPLATE = ("""<select ${args}>\n"""
"""% for t, v in values:\n"""
"""<%if v == value:\n"""
""" selected = 'selected'\n"""
"""else:\n"""
""" selected = ''\n"""
"""%>\n"""
""" <option value = "${v}" ${selected}>"""
""" ${t} </option>\n"""
"""% endfor\n"""
"""</select>""")
FLID = 'SelectFieldSELECT_TEMPLATE'
th.get_template(string = SELECT_TEMPLATE, tid = FLID)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['values'] = self.values
context['value'] = value or self.default
return templatehandler.render(context, self.enginename, tid = self.FLID)
class TextArea(TextField):
"""
A field class representing text area field.
"""
FIELD_TEMPLATE = """<textarea ${args}>${value | h}</textarea>"""
FLID = 'TextAreaFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
if value:
context['value'] = value
else:
context['value'] = ''
tbody = self.FIELD_TEMPLATE
return templatehandler.render(context, self.enginename, tid = self.FLID)
class RichText(TextField):
"""
A field class representing text area field that has WYSIWYG editor.
"""
FIELD_TEMPLATE = """
<script type = "text/javascript">
tinyMCE.init({
mode : %(mode)s ,
theme : "advanced",
plugins : "table,inlinepopups",
theme_advanced_buttons1 : "formatselect,styleselect, |,bold,italic,underline,separator,strikethrough,justifyleft,justifycenter,justifyright, justifyfull,blockquote,bullist,numlist,table,|,undo,redo,link,unlink,image,|,code",
theme_advanced_buttons2 : "",
theme_advanced_buttons3 : "",
theme_advanced_toolbar_location : "top",
theme_advanced_toolbar_align : "left",
theme_advanced_statusbar_location : "bottom",
theme_advanced_resizing : true,
theme_advanced_styles : "code=code;float-right=floatright;float-left=floatleft",
theme_advanced_blockformats : "p,h1,h2,h3,h4,blockquote,div",
relative_urls : false,
remove_script_host : false,
extended_valid_elements : "iframe[*]",
});
</script>
<textarea %(args)s >%(value)s</textarea>
"""
OBJECTS = (('/js/tiny_mce/tiny_mce.js', 'text/javascript'),)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
id = self.args.get('id', '')
if id:
context['mode'] = '"exact", "elements" : "%s"' % id
else:
context['mode'] = '"textareas"'
if value:
context['value'] = value
else:
context['value'] = ''
tbody = self.FIELD_TEMPLATE
return self.FIELD_TEMPLATE % context
class DescriptionField(TextField):
"""
A field class representing description field
"""
FIELD_TEMPLATE = """<p %(args)s >%(message)s</p>"""
USE_FIELD_TITLE = False
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(value = value, except_name = True)
context['message'] = self.title
return self.FIELD_TEMPLATE % context
class FileField(TextField):
"""
A field class representing file field, used for uploading file.
"""
TYPE = 'file'
FIELD_TEMPLATE = ("""<input type = "%(TYPE)s" %(args)s />\n"""
"""%(disable)s"""
)
REPLACE_PREFIX = '__replace_field_'
def get_desc(self):
"""
a method to return description.
"""
return self.desc
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['title'] = self.title
context['TYPE'] = self.TYPE
if value is None:
context['disable'] = ''
else:
a = {'name':self.REPLACE_PREFIX+self.name,
}
astr = ''
for k in a:
astr+= keyvalue2str(k, a[k])
t = '<input type = "checkbox" %s />replace\n'
context['disable'] = t % astr
return self.FIELD_TEMPLATE % context
return templatehandler.render(context, self.enginename, tid = self.FLID)
def validate(self, input_value | |
= op.v_max
x_eq = op.x_eq
y_eq = op.y_eq
z_eq = op.z_eq
else:
assert(ob)
u_min = ob.pov.u_min
u_max = ob.pov.u_max
v_min = ob.pov.v_min
v_max = ob.pov.v_max
x_eq = ob.pov.x_eq
y_eq = ob.pov.y_eq
z_eq = ob.pov.z_eq
#keep object rotation and location for the updated object
obloc = ob.location
obrot = ob.rotation_euler # In radians
#Parametric addon has no loc rot, some extra work is needed
#in case cursor has moved
curloc = bpy.context.scene.cursor_location
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
bpy.ops.mesh.primitive_xyz_function_surface(x_eq=x_eq, y_eq=y_eq, z_eq=z_eq, range_u_min=u_min, range_u_max=u_max, range_v_min=v_min, range_v_max=v_max)
bpy.ops.mesh.select_all(action='SELECT')
#extra work:
bpy.ops.transform.translate(value=(obloc-curloc), proportional_size=1)
bpy.ops.transform.rotate(axis=obrot, proportional_size=1)
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
if not ob:
bpy.ops.mesh.primitive_xyz_function_surface(x_eq=x_eq, y_eq=y_eq, z_eq=z_eq, range_u_min=u_min, range_u_max=u_max, range_v_min=v_min, range_v_max=v_max)
ob = context.object
ob.name = ob.data.name = "PovParametric"
ob.pov.object_as = "PARAMETRIC"
ob.pov.u_min = u_min
ob.pov.u_max = u_max
ob.pov.v_min = v_min
ob.pov.v_max = v_max
ob.pov.x_eq = x_eq
ob.pov.y_eq = y_eq
ob.pov.z_eq = z_eq
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
class POVRAY_OT_parametric_add(bpy.types.Operator):
bl_idname = "pov.addparametric"
bl_label = "Parametric"
bl_description = "Add Paramertic"
bl_options = {'REGISTER', 'UNDO'}
# XXX Keep it in sync with __init__'s Parametric primitive
u_min = FloatProperty(name = "U Min",
description = "",
default = 0.0)
v_min = FloatProperty(name = "V Min",
description = "",
default = 0.0)
u_max = FloatProperty(name = "U Max",
description = "",
default = 6.28)
v_max = FloatProperty(name = "V Max",
description = "",
default = 12.57)
x_eq = StringProperty(
maxlen=1024, default = "cos(v)*(1+cos(u))*sin(v/8)")
y_eq = StringProperty(
maxlen=1024, default = "sin(u)*sin(v/8)+cos(v/8)*1.5")
z_eq = StringProperty(
maxlen=1024, default = "sin(v)*(1+cos(u))*sin(v/8)")
def execute(self,context):
props = self.properties
u_min = props.u_min
v_min = props.v_min
u_max = props.u_max
v_max = props.v_max
x_eq = props.x_eq
y_eq = props.y_eq
z_eq = props.z_eq
pov_parametric_define(context, self, None)
self.report({'INFO'}, "This native POV-Ray primitive "
"won't have any vertex to show in edit mode")
return {'FINISHED'}
class POVRAY_OT_parametric_update(bpy.types.Operator):
bl_idname = "pov.parametric_update"
bl_label = "Update"
bl_description = "Update parametric object"
bl_options = {'REGISTER', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
ob = context.object
return (ob and ob.data and ob.type == 'MESH' and engine in cls.COMPAT_ENGINES)
def execute(self, context):
pov_parametric_define(context, None, context.object)
return {'FINISHED'}
#######################################################################
class POVRAY_OT_shape_polygon_to_circle_add(bpy.types.Operator):
bl_idname = "pov.addpolygontocircle"
bl_label = "Polygon To Circle Blending"
bl_description = "Add Polygon To Circle Blending Surface"
bl_options = {'REGISTER', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
# XXX Keep it in sync with __init__'s polytocircle properties
polytocircle_resolution = IntProperty(name = "Resolution",
description = "",
default = 3, min = 0, max = 256)
polytocircle_ngon = IntProperty(name = "NGon",
description = "",
min = 3, max = 64,default = 5)
polytocircle_ngonR = FloatProperty(name = "NGon Radius",
description = "",
default = 0.3)
polytocircle_circleR = FloatProperty(name = "Circle Radius",
description = "",
default = 1.0)
def execute(self,context):
props = self.properties
ngon = props.polytocircle_ngon
ngonR = props.polytocircle_ngonR
circleR = props.polytocircle_circleR
resolution = props.polytocircle_resolution
layers = 20*[False]
layers[0] = True
bpy.ops.mesh.primitive_circle_add(vertices=ngon, radius=ngonR, fill_type='NGON',enter_editmode=True, layers=layers)
bpy.ops.transform.translate(value=(0, 0, 1))
bpy.ops.mesh.subdivide(number_cuts=resolution)
numCircleVerts = ngon + (ngon*resolution)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.primitive_circle_add(vertices=numCircleVerts, radius=circleR, fill_type='NGON',enter_editmode=True, layers=layers)
bpy.ops.transform.translate(value=(0, 0, -1))
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.bridge_edge_loops()
if ngon < 5:
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.primitive_circle_add(vertices=ngon, radius=ngonR, fill_type='TRIFAN',enter_editmode=True, layers=layers)
bpy.ops.transform.translate(value=(0, 0, 1))
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode='OBJECT')
ob = context.object
ob.name = "Polygon_To_Circle"
ob.pov.object_as = 'POLYCIRCLE'
ob.pov.ngon = ngon
ob.pov.ngonR = ngonR
ob.pov.circleR = circleR
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
return {'FINISHED'}
#############################IMPORT
class ImportPOV(bpy.types.Operator, ImportHelper):
"""Load Povray files"""
bl_idname = "import_scene.pov"
bl_label = "POV-Ray files (.pov/.inc)"
bl_options = {'PRESET', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
# -----------
# File props.
files = CollectionProperty(type=bpy.types.OperatorFileListElement, options={'HIDDEN', 'SKIP_SAVE'})
directory = StringProperty(maxlen=1024, subtype='FILE_PATH', options={'HIDDEN', 'SKIP_SAVE'})
filename_ext = {".pov",".inc"}
filter_glob = StringProperty(
default="*.pov;*.inc",
options={'HIDDEN'},
)
import_at_cur = BoolProperty(name="Import at Cursor Location",
description = "Ignore Object Matrix",
default=False)
def execute(self, context):
from mathutils import Matrix
verts = []
faces = []
materials = []
blendMats = [] ##############
povMats = [] ##############
colors = []
matNames = []
lenverts = None
lenfaces = None
suffix = -1
name = 'Mesh2_%s'%suffix
name_search = False
verts_search = False
faces_search = False
plane_search = False
box_search = False
cylinder_search = False
sphere_search = False
cone_search = False
tex_search = False ##################
cache = []
matrixes = {}
writematrix = False
index = None
value = None
#filepov = bpy.path.abspath(self.filepath) #was used for single files
def mat_search(cache):
r = g = b = 0.5
f = t = 0
color = None
for item, value in enumerate(cache):
if value == 'texture':
pass
if value == 'pigment':
if cache[item+2] in {'rgb','srgb'}:
pass
elif cache[item+2] in {'rgbf','srgbf'}:
pass
elif cache[item+2] in {'rgbt','srgbt'}:
try:
r,g,b,t = float(cache[item+3]),float(cache[item+4]),float(cache[item+5]),float(cache[item+6])
except:
r = g = b = t = float(cache[item+2])
color = (r,g,b,t)
elif cache[item+2] in {'rgbft','srgbft'}:
pass
else:
pass
if colors == [] or (colors != [] and color not in colors):
colors.append(color)
name = ob.name+"_mat"
matNames.append(name)
mat = bpy.data.materials.new(name)
mat.diffuse_color = (r,g,b)
mat.alpha = 1-t
if mat.alpha != 1:
mat.use_transparency=True
ob.data.materials.append(mat)
else:
for i, value in enumerate(colors):
if color == value:
ob.data.materials.append(bpy.data.materials[matNames[i]])
for file in self.files:
print ("Importing file: "+ file.name)
filepov = self.directory + file.name
for line in open(filepov):
string = line.replace("{"," ")
string = string.replace("}"," ")
string = string.replace("<"," ")
string = string.replace(">"," ")
string = string.replace(","," ")
lw = string.split()
lenwords = len(lw)
if lw:
if lw[0] == "object":
writematrix = True
if writematrix:
if lw[0] not in {"object","matrix"}:
index = lw[0]
if lw[0] in {"matrix"}:
value = [float(lw[1]),float(lw[2]),float(lw[3]),\
float(lw[4]),float(lw[5]),float(lw[6]),\
float(lw[7]),float(lw[8]),float(lw[9]),\
float(lw[10]),float(lw[11]),float(lw[12])]
matrixes[index]=value
writematrix = False
for line in open(filepov):
S = line.replace("{"," { ")
S = S.replace("}"," } ")
S = S.replace(","," ")
S = S.replace("<","")
S = S.replace(">"," ")
S = S.replace("="," = ")
S = S.replace(";"," ; ")
S = S.split()
lenS= len(S)
for i,word in enumerate(S):
##################Primitives Import##################
if word == 'cone':
cone_search = True
name_search = False
if cone_search:
cache.append(word)
if cache[-1] == '}':
try:
x0 = float(cache[2])
y0 = float(cache[3])
z0 = float(cache[4])
r0 = float(cache[5])
x1 = float(cache[6])
y1 = float(cache[7])
z1 = float(cache[8])
r1 = float(cache[9])
# Y is height in most pov files, not z
bpy.ops.pov.cone_add(base=r0, cap=r1, height=(y1-y0))
ob = context.object
ob.location = (x0,y0,z0)
#ob.scale = (r,r,r)
mat_search(cache)
except (ValueError):
pass
cache = []
cone_search = False
if word == 'plane':
plane_search = True
name_search = False
if plane_search:
cache.append(word)
if cache[-1] == '}':
try:
bpy.ops.pov.addplane()
ob = context.object
mat_search(cache)
except (ValueError):
pass
cache = []
plane_search = False
if word == 'box':
box_search = True
name_search = False
if box_search:
cache.append(word)
if cache[-1] == '}':
try:
x0 = float(cache[2])
y0 = float(cache[3])
z0 = float(cache[4])
x1 = float(cache[5])
y1 = float(cache[6])
z1 = float(cache[7])
#imported_corner_1=(x0, y0, z0)
#imported_corner_2 =(x1, y1, z1)
center = ((x0 + x1)/2,(y0 + y1)/2,(z0 + z1)/2)
bpy.ops.pov.addbox()
ob = context.object
ob.location = center
mat_search(cache)
except (ValueError):
pass
cache = []
box_search = False
if word == 'cylinder':
cylinder_search = True
name_search = False
if cylinder_search:
cache.append(word)
if cache[-1] == '}':
try:
x0 = float(cache[2])
y0 = float(cache[3])
z0 = float(cache[4])
x1 = float(cache[5])
y1 = float(cache[6])
z1 = float(cache[7])
imported_cyl_loc=(x0, y0, z0)
imported_cyl_loc_cap =(x1, y1, z1)
r = float(cache[8])
vec = Vector(imported_cyl_loc_cap ) - Vector(imported_cyl_loc)
depth = vec.length
rot = Vector((0, 0, 1)).rotation_difference(vec) # Rotation from Z axis.
trans = rot * Vector((0, 0, depth / 2)) # Such that origin is at center of the base of the cylinder.
#center = ((x0 + x1)/2,(y0 + y1)/2,(z0 + z1)/2)
scaleZ = sqrt((x1-x0)**2+(y1-y0)**2+(z1-z0)**2)/2
bpy.ops.pov.addcylinder(R=r, imported_cyl_loc=imported_cyl_loc, imported_cyl_loc_cap=imported_cyl_loc_cap)
ob = context.object
ob.location = (x0, y0, z0)
ob.rotation_euler = rot.to_euler()
ob.scale = (1,1,scaleZ)
#scale data rather than obj?
# bpy.ops.object.mode_set(mode='EDIT')
# bpy.ops.mesh.reveal()
# bpy.ops.mesh.select_all(action='SELECT')
# bpy.ops.transform.resize(value=(1,1,scaleZ), constraint_orientation='LOCAL')
# bpy.ops.mesh.hide(unselected=False)
# bpy.ops.object.mode_set(mode='OBJECT')
mat_search(cache)
except (ValueError):
pass
cache = []
cylinder_search = False
if word == 'sphere':
sphere_search = True
name_search = False
if sphere_search:
cache.append(word)
if cache[-1] == '}':
x = y = z = r = 0
try:
x = float(cache[2])
y = float(cache[3])
z = float(cache[4])
r = float(cache[5])
except (ValueError):
pass
except:
x = y = z = float(cache[2])
r = float(cache[3])
bpy.ops.pov.addsphere(R=r, imported_loc=(x, y, z))
ob = context.object
ob.location = (x,y,z)
ob.scale = (r,r,r)
mat_search(cache)
cache = []
sphere_search = False
##################End Primitives | |
<filename>colte.py
def colte(sid,logg,feh,gg,bp,rp,j2,h2,k2,ebv,DR2=False,DR3=False,bprp_ex=False,pmod=False,COD=False,outfile=False,MC=False,trials=False,wato=False,elogg=[],efeh=[],egg=[],ebp=[],erp=[],ej2=[],eh2=[],ek2=[],eebv=[]):
'''
PURPOSE:
Compute stellar effective temperatures using colour-Teff relations for the
Gaia and 2MASS photometric systems. User has to choose either Gaia DR2 or
DR3 photometry: no mixing of the two! The default extinction law is that
of Fitzpatrick (1999, renormalized as per Schlafly & Finkbeiner 2011 -
FSF). The option to use the extinction law of Cardelli, Clayton & Mathis
(1989, with optical from O'Donnell 1994 - COD) is available.
EXPLANATION:
The relations used to derive Teff are from Casagrande et al. (2021).
For each star, Teffs are computed from up to 12 different colour indices
and results are written into a csv file.
If the option for a MonteCarlo is set, Teff uncertainties are computed
for each colour index, and a final weighted average Teff along with its
weighted standard deviation is derived. Teff from weighted average will
likely have the best accuracy. However, in the pursue of precision, one
might be better off by choosing Teff from colour indices with small
intrinsic scatter (see discussion in Section 4 of Casagrande+21).
The routine applies a few bare quality cuts on input photometry by
removing BP and RP<5, G<6, J<5.0, H<4.8, K<4.2, and if uncertainties are
passed in, also removing ej2>0.05, eh2>0.05, ek2>0.05. These cuts are
mainly to avoid issues due to saturation at bright magnitudes (and to
some extent large photometric errors for faint 2MASS magnitudes).
Further quality cuts on Gaia photometry can be set with input parameters
bprp_ex= and pmod=
Also, stars with ebv<0, logg < 0 or > 5, or feh > 0.6 will be excluded.
Due to the decreased sensitivity of colours to low metallicities, stars
with feh < -4 are assigned constant feh = - 4. Stars with feh < -8 are
assumed to not have a valid feh measurement, and are excluded.
REQUIRED INPUT PARAMETERS
sid: star name/ID
logg: surface gravity
feh: [Fe/H]
gg: Gaia G (phot_g_mean_mag)
bp: Gaia BP (phot_bp_mean_mag)
rp: Gaia RP (phot_rp_mean_mag)
j2: 2MASS J
h2: 2MASS H
k2: 2MASS K
ebv: Reddening E(B-V)
DR?: Gaia DR2 or DR3 needs to be specified
For each star, bp,rp,logg,feh,ebv are indispensable parameters needed
to derive Teff from at least bp-rp. Note that sid,gg,j2,h2,k2 are also
required inputs, but empty entries can be passed if some of these quantities
are unavaible for a star. It must also be specified whether photometry from
Gaia DR2 or DR3 is passed as input.
CORRECTIONS TO INPUT PHOTOMETRY. DOS & DON'TS
Gaia DR2: 6<G<16 are corrected following Maiz Apellaniz & Weiler (2018, A&A,
619, 180), with a constant zero-point offset for G>16
G<6 are excluded to avoid any issue with saturation
Gaia DR3: G<8 are corrected for saturation following Riello+21, A&A, 649, 3
(Eq. C.1). G<6 are still excluded to avoid any issue with saturation
Note that G magnitudes for sources in DR3 with 2 or 6-parameter
astrometric solutions are NOT corrected by COLTE. It is
responsibility of the user to do so before passing G magnitudes
into COLTE (see Riello+21, A&A, 649, 3 and
github.com/agabrown/gaiaedr3-6p-gband-correction)
OPTIONAL INPUT PARAMETERS
bprp_ex: to remove stars with bad phot_bp_rp_excess_factor (For DR2 see Eq. 2,
Arenou+18, A&A, 616, 17. For DR3 see Eq. 2, Gaia Collaboration+21,
A&A, 649, 8). Note that for DR3 phot_bp_rp_excess_factor should be
used as given in the Gaia catalog, without applying the correction of
Riello+21, A&A, 649, 3, see:
github.com/agabrown/gaiaedr3-flux-excess-correction
If this correction is applied, user is in charge of changing the
range of tolerance for the corrected excess factor (see suggested
values in Appendix A of Casagrande+21).
If bprp_ex option is called, but a value for the excess is not
available, the star will be removed
pmod: to retain only stars with phot_proc_mode=0 (Riello+18,+21)
If pmod option is called, but a value is not available, the star
will be removed
COD: to use extinction coefficients computed from the extinction law of
Cardelli, Clayton & Mathis (1989, with optical from O'Donnell 1994).
If COD is not chosen, default extinction coefficients are from the
law of Fitzpatrick (1999, renormalized as per Schlafly &
Finkbeiner 2011 - FSF)
outfile: output file. If not passed, then the default output file is colte.csv
MC: to perform a MonteCarlo for Teff uncertainties in different bands
ej2: 2MASS J uncertainty. If not provided, 0.022 mag is assumed
eh2: 2MASS H uncertainty. If not provided, 0.024 mag is assumed
ek2: 2MASS K uncertainty. If not provided, 0.022 mag is assumed
OPTIONAL INPUT PARAMETERS relevant ONLY if MC=True
trials: number of MC realizations for each star. If not set, default is 1000
Default value is a good compromise between speed of execution and
convergence. The latter depends on the colour index, and input
uncertainties. As a rule of thumb, with 1000 trials, uncertainties
typically converge to within a few K, or ~10K in worst cases. With
100 trials, convergence is ~10K in most cases, and up to ~70K in
worst cases. With 10000 trials convergence is always within a few K
wato: to write Weighted Averaged Teff Only in the output file
elogg: logg uncertainty. If not provided, 0.2 dex is assumed
efeh: [Fe/H] uncertainty. If not provided, 0.1 dex is assumed
egg: Gaia G uncertainty. If not provided, 0.005 mag is assumed
ebp: Gaia BP uncertainty. If not provided, 0.005 mag is assumed
erp: Gaia RP uncertainty. If not provided, 0.005 mag is assumed
eebv: Reddening uncertainty. If not provided, or a negative eebv is passed,
10% of input ebv is assumed
OUTPUT
The routine will write an output file providing for each star the adopted
sid, logg, feh, ebv + Teffs computed from up to 12 colour indices. If Teff
cannot be determined in a colour index, NaN is returned for that index.
Note that the program makes a number of basic quality cuts on input
photometry, and requires a value for logg, feh and ebv. Hence, the output
file might contain fewer stars than the input file.
If MC is set, then an uncertainty is provided for each Teff, along with
weighted averaged Teff and weighted standard deviation. If WATO is set,
only weighted average and weighted standard deviation are written. Note that
weighted averaged Teff and weighted standard deviation might change by a few
Kelvin each time, because of the MC nature of the errors (more robust
convergence can be achieved by increasing trials).
EXAMPLES
(1) For each star, compute Teffs with MonteCarlo uncertainties based on
known input errors. Colour-Teff relations for Gaia DR3 and default
extinction law (FSF) are used. Results from each colour index and weighted
average are written into filename set1.csv
colte(sid,logg,feh,gg,bp,rp,j2,h2,k2,ebv,DR3=True,MC=True,ej2=ej,eh2=eh,ek2=ek,eebv=ered,elogg=elogg,efeh=efeh,outfile='set1.csv')
(2) For each star, compute Teffs with MonteCarlo uncertainties based on
default errors assumed by the routine. Colour-Teff relations for Gaia DR2
and Cardelli/O'Donnel extinction law (COD) are used. Results from each
colour index and weighted average are written into filename set2.csv
colte(sid,logg,feh,gg,bp,rp,j2,h2,k2,ebv,DR2=True,COD=True,MC=True,outfile='set2.csv')
(3) For each star, compute Teffs with MonteCarlo uncertainties based on
default errors assumed by the routine. Colour-Teff relations for Gaia DR3
and Cardelli/O'Donnel extinction law (COD) are used. Only weighted
averaged Teff and its uncertainty are written into the default output
file colte.csv
colte(sid,logg,feh,gg,bp,rp,j2,h2,k2,ebv,DR3=True,COD=True,MC=True,wato=True)
(4) For each star, compute Teffs in all available colour indices and dump
results into the default output colte.csv. Colour-Teff relations
for Gaia DR2 and default extinction law (FSF) are used.
colte(sid,logg,feh,gg,bp,rp,j2,h2,k2,ebv,DR2=True)
HISTORY
-November 2020 - Written by <NAME>
-July 2021 - Updated to include Gaia DR3 photometry and option to choose
between COD and FSF extinction law
'''
import numpy as np
# remove warning messages arising when np.where encounters NaN
import warnings
warnings.simplefilter(action = "ignore", category | |
from pathlib import Path
from datetime import datetime, timedelta, tzinfo, timezone
from itertools import *
from influxdb import InfluxDBClient, DataFrameClient
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections.abc import Sequence
from scipy import integrate
class DBQuery():
"""Class to access InfluxDB 1.x and select records from it."""
def __init__(self, database, username, password, host='localhost', port=8086):
"""
:type database: str
:param database: Name of the database.
:type username: str
:param username: Name of user.
:type password: str
:param password: User password.
:type host: str, optional
:param host: IP adress, defaults to ``localhost``.
:type port: int, optional
:param port: Connection port, defaults to ``8086``.
"""
self.database = database
self.username = username
self.password = password
self.host = host
self.port = port
self.client = InfluxDBClient(host=self.host,
port=self.port,
username=self.username,
password=<PASSWORD>,
database=self.database)
def __del__(self):
print("Existing connection closed.")
self.client.close()
def get_measurements(self):
"""Get list of all measurments (series) in the database.
:rtype: list[str]
:return: List of all measurement names in the database.
"""
query = f"SHOW MEASUREMENTS;"
result = self.client.query(query).raw['series']
if result:
return [x[0] for x in result[0]['values']]
else:
return []
def get_tags(self, series):
"""Get all tags (tag names) in a series.
:type series: str
:param series: Name of the series.
:rtype: list[str]
:return: List of all tag names in the series.
.. note::
Returns an empty list if the query does not return any vaues, for example,
if there are no tags in the series or if there is no series with the
given name.
"""
query = f'SHOW TAG KEYS FROM "{series}";'
result = self.client.query(query).raw['series']
if result:
return [x[0] for x in result[0]['values']]
else:
return []
def get_fields(self, series, return_types=False):
"""Get all fields in a series.
:type series: str
:param series: Name of the series.
:type return_types: bool, optional
:param return_types: Indicates if field types should be returned, defaults to
``False``.
:rtype: list[str]|(list[str], list[str])
:return:
If ``return_type == False``
List of field names.
If ``return_type == True``
Field names and the corresponding InfluxDB field types as a pair of
lists of strings.
The possible types are: ``integer``, ``float``, ``string`` and
``boolean``.
.. note::
Returns an empty list if the query does not return any vaues, for example,
if there are no tags in the series or if there is no series with the
given name.
"""
query = f'SHOW FIELD KEYS FROM "{series}";'
result = self.client.query(query).raw['series']
if result:
if return_types:
return ([x[0] for x in result[0]['values']], [x[1] for x in result[0]['values']])
else:
return [x[0] for x in result[0]['values']]
else:
if return_types:
return ([], [])
else:
return []
def get_keys(self, series, tag):
"""Get list of all tag values for a given tag in a series.
:type series: str
:param series: Name of the series.
:type tag: str
:param tag: Name of the tag.
:rtype: list[str]
:return: List of all values of the tag in the series.
.. note::
Returns an empty list if the query does not return any vaues, for example,
if there are no tags in the series or if there is no series with the
given name.
"""
query = f'SHOW TAG VALUES FROM "{series}" WITH KEY = "{str(tag)}";'
result = self.client.query(query).raw['series']
if result:
return [x[1] for x in result[0]['values']]
else:
return []
def get_data(self, series, fields, keys=None, start=None, stop=None, local_tz=False):
"""Get data (records) for specified fields/tags in a series.
:type series: str
:param series: Name of the series.
:type fields: str|list[str]|tuple[str]|set[str]|dict[str: str|type]
:param fields: Name(s) of fields/tags in the series.
This parameter is treated differently depending on it's type:
``str``
Treated as a single field/tag name to return.
If ``fields`` = ``'*'`` then all fields and tags are returned.
``list[str]``, ``tuple[str]`` or ``set[str]``
Treated as a collection of field/tag names to return.
``dict[str: str|type]``
The keys are treated as field/tag names, and the values are treated as
numpy types (or names of numpy types) of the corresponding keys.
The output is converted from InfluxDB types to the types
specified in the dictionary.
Use ``None`` as a field type to enable type autodetection and/or
avoid type conversion for that field.
:type keys: None|dict[str: obj], optional
:param keys: Dictionary providing rules to select records with specific
field/tag values, defaults to ``None``.
If ``None`` then selected records are not filtered.
Otherwise the dictionary is treated as follows:
Key
Name of the filtered field/tag.
Values
Value(s) of the corresponding field/tag to be selected.
Each value can be a scalar or a collection of all values to be selected
(``list``, ``tuple`` or ``set``)
:type start: None|str|int|datetime, optional
:param start: Inclusive lower time boundary for the returned data, defaults to
``None``.
``None`` indicates no lower boundary.
``str`` is interpreted as a timestring.
``int`` is interpreted as a Unix timestamp.
``datetime`` is used as is.
:type stop: None|str|int|datetime, optional
:param stop: Exclusive upper time boundary for the returned data, defaults to
``None``.
``None`` indicates no upper boundary.
``str`` is interpreted as a timestring.
``int`` is interpreted as a Unix timestamp.
``datetime`` is used as is.
:type local_tz: bool, optional
:param local_tz: Indicates whether local or UTC time is used in the code,
defaults to ``False`` (UTC).
:rtype: dict[str: np.array]
:return: Dictionary constructed as follows:
Key
Field/tag name.
Value
Numpy array of the corresponding field/tag values.
"""
def _tz_convert(t, local_tz=False):
# Never adjust timezone for epoch timestamps
if isinstance(t, int):
return t
tz = datetime.now().astimezone().tzinfo if local_tz else 'UTC'
t = pd.Timestamp(t)
if t.tz:
# Always convert aware Timestamp to UTC timezone
return f"'{t.tz_convert(None)}'"
else:
# Naive Timestamps can be treated as representing UTC or local time
return f"'{t.tz_localize(tz).tz_convert(None)}'"
def _type_cast(value, dtype):
if dtype is None:
return value
if dtype.kind == 'M':
return pd.Timestamp(value).tz_convert(None).asm8
if dtype.kind == 'm':
return pd.Timedelta(value).asm8
else:
return value
def _destructure(key, val):
if type(val) in (list, tuple, set):
destruct = [f"\"{key!s}\" = '{v!s}'" for v in val]
return f"({' OR '.join(destruct)})"
else:
return f"(\"{key!s}\" = '{val!s}')"
time_type = np.dtype('<M8[ns]')
default_type = np.dtype('O')
type_conversion = {'integer': 'int64', 'float': 'float64', 'string': 'O', 'boolean': 'bool'}
ftypes = {'time': time_type}
dbtags = self.get_tags(series)
string_fields = dbtags
for f in dbtags:
ftypes[f] = np.dtype('O')
dbfields, dbtypes = self.get_fields(series, return_types=True)
for f, t in zip(dbfields, dbtypes):
if t == 'string':
string_fields += [f]
ftypes[f] = np.dtype(type_conversion[t])
dballf = dbfields + dbtags
if type(fields) is dict:
_fields = []
for f, t in fields.items():
if f == '*':
_fields += dballf
else:
if t is not None:
ftypes[f] = np.dtype(type_conversion.get(t, t))
_fields += [f'{f!s}']
fields = _fields
elif type(fields) in (list, tuple, set):
_fields = []
for f in fields:
if f == '*':
_fields += dballf
else:
_fields += [f'{f!s}']
fields = _fields
elif type(fields) is str:
fields = dballf if fields == '*' else [f'{fields!s}']
else:
raise TypeError(f"fields should be a string, list, tuple, set or dict but {type(fields)} was passed")
if 'time' not in fields:
fields = ['time'] + fields
for f in fields:
if f not in ftypes:
ftypes[f] = default_type
if keys is None or keys == {}:
where_clause = ""
elif type(keys) is not dict:
raise ValueError(f"keys should be None or dic of key: value pairs but {type(keys)} was passed")
else:
where_clause = f" WHERE {' AND '.join([_destructure(k, v) for k, v in keys.items()])}"
time_query = ''
if start is not None:
time_query += f" AND time >= {_tz_convert(start, local_tz=local_tz)}"
if stop is not None:
time_query += f" AND time < {_tz_convert(stop, local_tz=local_tz)}"
qfields = [f'"{f}"' for f in fields]
query = f'SELECT {", ".join(qfields)} FROM "{series}"{where_clause}{time_query};'
processed_query = self.client.query(query).raw['series']
result = {}
if processed_query:
data = processed_query[0]['values']
#fields = processed_query[0]['columns']
else:
data = []
for field in fields:
result[field] = np.zeros(len(data), dtype=ftypes[field])
for i, row in zip(count(), data):
for value, field in zip(row, fields):
result[field][i] = _type_cast(value, ftypes[field])
for f in fields:
if f in string_fields:
result[f] = result[f].astype('U')
return result
class CycleAnalyzer():
"""Class to calculate and plot circadian cycles data.
:vartype start: np.datetime64
:ivar start: Adjusted lower time boundary (inclusive) of data included | |
"hughie",
"dany",
"shaven",
"tombstones",
"usin",
"customized",
"auditing",
"dodds",
"covent",
"purvis",
"suss",
"doting",
"misato",
"jiggling",
"çetin",
"christensen",
"rotated",
"appendectomy",
"chorizo",
"ballpoint",
"artsy",
"okayed",
"lali",
"rainforests",
"dialects",
"matlock",
"lightest",
"quandary",
"sunblock",
"inns",
"flocking",
"happy-go-lucky",
"aurelius",
"mated",
"getty",
"adrenal",
"actuality",
"fund-raising",
"ramblings",
"saps",
"jee",
"mythic",
"mouthy",
"gouged",
"mamoru",
"sachi",
"kuba",
"consuelo",
"farouk",
"verifying",
"punta",
"chianti",
"'ey",
"taichi",
"iwill",
"heidelberg",
"grainy",
"overcast",
"non-alcoholic",
"thirty-four",
"and-and-and",
"latvian",
"muchacho",
"usurped",
"corto",
"bodacious",
"shipyards",
"harshest",
"joshi",
"erections",
"nandhini",
"camphor",
"badri",
"forgetfulness",
"cutout",
"rudra",
"hoagie",
"storks",
"shoplifter",
"atypical",
"profited",
"nobly",
"whiteside",
"mahadev",
"rowdies",
"detours",
"blt",
"imbued",
"firmament",
"ray-ray",
"lucía",
"developmental",
"gamboa",
"napoli",
"raya",
"mountbatten",
"escapades",
"promiscuity",
"shapely",
"laroche",
"opiates",
"ibis",
"petrelli",
"complainant",
"knick",
"bodhi",
"soong",
"snatchers",
"bleat",
"teleported",
"mingled",
"redcoats",
"crackin",
"insipid",
"windermere",
"cordoned",
"inhibited",
"natures",
"ivanova",
"lemmings",
"sloshed",
"lv",
"toiletries",
"kaleidoscope",
"fain",
"thorax",
"roarke",
"sentinels",
"postpartum",
"ofthese",
"bt",
"pacheco",
"machin",
"mastodon",
"shrunken",
"yammering",
"telethon",
"deadwood",
"bubber",
"acacia",
"opiate",
"authoritarian",
"headfirst",
"distorts",
"reprisal",
"shoves",
"dicaprio",
"chasers",
"weimar",
"eeh",
"realisation",
"strangles",
"nietzscheans",
"gyms",
"indiscriminate",
"hammersmith",
"su-jin",
"strumpet",
"cori",
"9pm",
"booed",
"ex-convict",
"shit-faced",
"adamson",
"overlay",
"soos",
"alyson",
"assists",
"means-",
"korn",
"homegrown",
"roslyn",
"beatin",
"complexities",
"stave",
"smurfette",
"guerre",
"spreadsheet",
"whacks",
"juiced",
"floodgates",
"thunderbirds",
"culpable",
"chul-soo",
"shoko",
"everypony",
"wanted-",
"dorchester",
"dislocation",
"lifes",
"flourishes",
"rawls",
"refining",
"poindexter",
"strolled",
"fistfight",
"flatlining",
"fronting",
"frampton",
"fleabag",
"shal",
"petitioned",
"malmö",
"archdiocese",
"grunge",
"hereford",
"lox",
"present-day",
"serf",
"ansari",
"wayside",
"distortions",
"justices",
"tennison",
"ideologies",
"galina",
"vuitton",
"shahrukh",
"harvests",
"matchbook",
"tandoori",
"half-price",
"hibiscus",
"stirrups",
"levity",
"athenians",
"true.",
"fridges",
"wreaths",
"lonny",
"apathetic",
"cathartic",
"pontus",
"dewar",
"canvassed",
"platitudes",
"reynard",
"pippo",
"prophesy",
"burrowing",
"jean-robert",
"carlota",
"caesarean",
"harbin",
"harrods",
"prominence",
"toned",
"shado",
"prego",
"karenina",
"meaden",
"kamp",
"tarry",
"kray",
"impregnate",
"snub",
"o.c.",
"ch000000",
"intracranial",
"softener",
"prophesied",
"embarassing",
"misinformation",
"benefactors",
"reapers",
"carmody",
"japp",
"intercepting",
"patchwork",
"tine",
"pickpockets",
"masse",
"hibbert",
"ushers",
"cru",
"unplanned",
"resurfaced",
"westmoreland",
"aryans",
"venkat",
"rampaging",
"tactile",
"aldridge",
"nomura",
"gremlins",
"clove",
"doakes",
"testimonial",
"pepito",
"gooseberry",
"persephone",
"baylin",
"sandor",
"in-flight",
"corroded",
"vr",
"arianna",
"jammies",
"cha-ching",
"postwar",
"marksmanship",
"lodges",
"sidhu",
"ute",
"imitated",
"wide-eyed",
"devonshire",
"sandler",
"blaise",
"conversational",
"jasmin",
"cryogenic",
"scaredy-cat",
"drainpipe",
"bolero",
"dabbled",
"supermodels",
"orbs",
"pitting",
"year-round",
"suzan",
"krupa",
"marlott",
"barbarism",
"jochen",
"'while",
"dumpy",
"brigands",
"db",
"shavings",
"surrogacy",
"hopscotch",
"motorist",
"bandwagon",
"agitate",
"denby",
"serpentine",
"dressings",
"elodie",
"hays",
"tuscan",
"dumbbell",
"shinnosuke",
"enlisting",
"cereals",
"slayed",
"cruelest",
"liaise",
"fujiwara",
"fabri",
"shithouse",
"malini",
"kilgrave",
"disagreeing",
"worsening",
"disinterested",
"shiori",
"whoa-oh-oh",
"salmoneus",
"tribulations",
"highlanders",
"tantric",
"stephenson",
"honeysuckle",
"collider",
"grounder",
"nunez",
"shintaro",
"tachyon",
"mendelssohn",
"bindu",
"dionne",
"gaucho",
"fannie",
"threesomes",
"ingrained",
"marchetti",
"nakagawa",
"ephemeral",
"fulfills",
"off-site",
"cafferty",
"recuse",
"öèëid",
"abacus",
"unsubstantiated",
"tivoli",
"libertine",
"mk",
"telepaths",
"neiman",
"exaggerates",
"sheung",
"maj",
"elicit",
"chanda",
"wight",
"pele",
"tyrol",
"reforming",
"scintillating",
"harman",
"chortles",
"precedents",
"bel-air",
"unsung",
"disservice",
"dissident",
"mannerisms",
"regretfully",
"outings",
"haven`t",
"shrooms",
"cabeza",
"sceptre",
"malfunctions",
"romantics",
"veronique",
"atrophy",
"firsts",
"corby",
"aptly",
"rimbaud",
"prost",
"12-gauge",
"wild-goose",
"agostino",
"foreclose",
"thoracotomy",
"toho",
"calabria",
"cast-iron",
"riverboat",
"ello",
"nicolai",
"'reily",
"riva",
"lightheaded",
"riddick",
"regimes",
"condensation",
"machi",
"disintegrating",
"aden",
"choosers",
"velu",
"barca",
"mamba",
"disembodied",
"macey",
"deauville",
"blouses",
"hoedown",
"laz",
"nang",
"complied",
"con.",
"burley",
"boners",
"xindi",
"distinctions",
"campbells",
"appraised",
"thermite",
"viren",
"zealots",
"annas",
"mot",
"normalcy",
"nonna",
"vato",
"magus",
"synapses",
"rebuke",
"melman",
"spurred",
"lafferty",
"reproducing",
"hemorrhagic",
"prospered",
"breathalyzer",
"adelle",
"frame-up",
"self-evident",
"saudis",
"stine",
"overtook",
"posthumous",
"vanda",
"figgis",
"pascual",
"hankie",
"collegiate",
"seng",
"grassroots",
"up-front",
"recompense",
"acquaint",
"misdirection",
"krieg",
"bernays",
"valerio",
"byul",
"lebowski",
"hairdressing",
"neri",
"ambivalent",
"akagi",
"cartwheels",
"malnourished",
"po-po",
"bagi",
"novices",
"dasher",
"ostracized",
"seduces",
"soppy",
"freeloader",
"canons",
"whoopsie",
"kimberley",
"gamora",
"four-legged",
"libre",
"vervain",
"deuk-gu",
"golfers",
"underdeveloped",
"rind",
"drumbeat",
"christiane",
"clean-cut",
"georgiana",
"reconfigure",
"camilo",
"bosworth",
"waltzes",
"chotu",
"elina",
"sporadic",
"moffat",
"sickened",
"asystole",
"slandered",
"'chaim",
"anorexia",
"gobbled",
"sema",
"muses",
"tsing",
"sorbet",
"help-",
"tweaks",
"hollandaise",
"sullied",
"jaz",
"hemorrhoid",
"socked",
"zaius",
"low-grade",
"npr",
"buggin",
"playgrounds",
"snell",
"amane",
"entrepreneurial",
"punctures",
"combustible",
"gnat",
"colonized",
"friendless",
"overrule",
"slackers",
"rabin",
"nodos",
"repainted",
"entanglement",
"dispensation",
"stinson",
"chinna",
"jaywalking",
"quotations",
"yardley",
"construed",
"jacquie",
"actionable",
"fungal",
"tamaki",
"natsuko",
"frankel",
"whe",
"researches",
"molesley",
"slither",
"wainthropp",
"sars",
"conformity",
"workday",
"turncoat",
"usain",
"chatham",
"roslin",
"intercepts",
"piecing",
"patriarchal",
"fallback",
"warlike",
"zahra",
"contour",
"carrillo",
"madhavi",
"plunges",
"rescinded",
"sosuhno",
"neolithic",
"goh",
"mulatto",
"bloodlust",
"bang-bang",
"free-for-all",
"son-",
"biddle",
"lorie",
"cuttin",
"saloons",
"kizzy",
"capacitor",
"rubio",
"bridgeport",
"chaise",
"jazzed",
"dissing",
"fructose",
"chortling",
"mimosas",
"yourwife",
"ryuzaki",
"gooks",
"interracial",
"milling",
"knock-off",
"pimpernel",
"jousting",
"bedchamber",
"visibly",
"juicer",
"capua",
"udo",
"fremen",
"luxor",
"theorist",
"eludes",
"hsi-men",
"dad.",
"isadora",
"sdl",
"60-year-old",
"philanthropy",
"netting",
"zs",
"commode",
"maybelle",
"treetops",
"banyan",
"flagstaff",
"zits",
"transporters",
"yrs",
"frittata",
"hummel",
"shinin",
"homeward",
"dissipated",
"spool",
"well-to-do",
"isaacs",
"j-roc",
"crewe",
"'god",
"firmer",
"runkle",
"centrifuge",
"jotaro",
"asante",
"shortsighted",
"schillinger",
"finery",
"mumbled",
"fairway",
"frolicking",
"inedible",
"ess",
"leggings",
"ofthose",
"mhmm",
"providers",
"russkies",
"bento",
"gobber",
"erskine",
"medicaid",
"swindlers",
"wοuld",
"l.a",
"drunker",
"pell",
"gratefully",
"valdemar",
"seesaw",
"manliness",
"mahalo",
"kjeld",
"vinicius",
"clanton",
"vámonos",
"julienne",
"majoring",
"stamford",
"lecher",
"activism",
"weaves",
"manmade",
"kyu",
"kenner",
"carer",
"quinoa",
"tomlin",
"columbine",
"equate",
"grizzlies",
"bffs",
"heeds",
"rout",
"xenia",
"l-low",
"co-ordinates",
"romek",
"skinning",
"ifi",
"ozawa",
"gunslinger",
"huevos",
"crispin",
"scooping",
"hunahpu",
"euclid",
"reassignment",
"supercharged",
"yoú",
"ochoa",
"antioch",
"sirena",
"serendipity",
"millennial",
"tactless",
"jawbone",
"locksley",
"pepa",
"wile",
"godlike",
"chirag",
"soybean",
"pothead",
"proclaims",
"timmons",
"headshot",
"flit",
"mops",
"enamored",
"busters",
"riordan",
"taunted",
"johnno",
"caf",
"artoo",
"freebies",
"omnitrix",
"mesdames",
"interdimensional",
"bloomer",
"deidre",
"tarnation",
"illustrates",
"prez",
"she--she",
"ect",
"unfettered",
"batsman",
"twerk",
"ayatollah",
"brill",
"gerrard",
"sergeyevich",
"surrogates",
"rolfe",
"enys",
"dismayed",
"forty-four",
"toboni",
"wooo",
"laters",
"felsham",
"snooki",
"dilapidated",
"alleging",
"ioved",
"gottlieb",
"eldorado",
"overlords",
"revolved",
"reflector",
"aspirins",
"perennial",
"c.k.",
"lafitte",
"concealment",
"boney",
"org",
"juni",
"nanjing",
"bondsman",
"weinstein",
"blowhard",
"alles",
"inscrutable",
"signalling",
"colonize",
"tsunamis",
"kavita",
"ondina",
"muerte",
"helge",
"turgut",
"kui",
"provolone",
"concourse",
"flavius",
"towler",
"dearth",
"abrasion",
"bastian",
"overdrawn",
"bournemouth",
"kleiss",
"poisoner",
"despondent",
"floki",
"dipstick",
"gammy",
"deutsche",
"twa",
"cartagena",
"izu",
"kayne",
"lapis",
"tastier",
"lasses",
"watchmen",
"snart",
"bolly",
"decrypt",
"jujitsu",
"wilted",
"incidence",
"scrimmage",
"tinkerbell",
"inaugurated",
"perfecto",
"kerouac",
"chalkboard",
"adage",
"derring-do",
"gumption",
"kilgore",
"ranveer",
"coddle",
"positivity",
"court-appointed",
"hyeok",
"headliner",
"woodchuck",
"caretakers",
"millers",
"sabe",
"aerodynamics",
"yum-yum",
"augh",
"reorganize",
"forty-nine",
"sakurai",
"kandi",
"pinata",
"firewalls",
"liana",
"rackham",
"dobie",
"pedaling",
"ez",
"kell",
"telekinesis",
"jump-start",
"natural-born",
"laetitia",
"visconti",
"leto",
"keppler",
"contributors",
"hayride",
"passageways",
"corfu",
"remorseful",
"strong-arm",
"'grady",
"blinker",
"stefani",
"liliane",
"borrows",
"inducing",
"erol",
"dm",
"scimitar",
"stringy",
"parlay",
"mandar",
"impatiently",
"gangbangers",
"us.",
"exponential",
"racecar",
"wastebasket",
"legate",
"representations",
"jewelers",
"ronson",
"muzak",
"handmaiden",
"ramparts",
"absences",
"charting",
"hajj",
"seabirds",
"profiting",
"ogawa",
"calmness",
"ky",
"broadband",
"tumbler",
"minted",
"impairment",
"walkie-talkies",
"nubian",
"gekko",
"avalor",
"censors",
"gleb",
"emoji",
"marionette",
"fujita",
"lullabies",
"bugler",
"undersigned",
"spire",
"petulant",
"entitles",
"krause",
"half-way",
"whaa",
"rivets",
"30-year",
"distorting",
"mourns",
"reichstag",
"sniping",
"fostered",
"depresses",
"cedars",
"raman",
"manabu",
"witless",
"tbe",
"seedlings",
"synthesize",
"pronouncing",
"analyzer",
"foreheads",
"redirected",
"sapphires",
"septum",
"unchallenged",
"hee-haw",
"sagan",
"dass",
"my--my",
"ump",
"marit",
"life-long",
"valmont",
"divisional",
"haider",
"liberia",
"tribbiani",
"aino",
"impropriety",
"organizational",
"gaggle",
"tianjin",
"triffids",
"cortes",
"southland",
"whir",
"penrose",
"ensued",
"winterfell",
"elation",
"assemblyman",
"guardhouse",
"melanoma",
"rolodex",
"tiner",
"benghazi",
"paperback",
"aux",
"smidge",
"memsahib",
"no-man",
"slappy",
"queues",
"faring",
"grannies",
"tengo",
"nin",
"nettle",
"shobha",
"toa",
"batou",
"waistband",
"altruism",
"seimei",
"mattia",
"astonish",
"dae-so",
"multitasking",
"exuberant",
"carta",
"infomercial",
"swooping",
"bloodiest",
"jani",
"serials",
"esha",
"gowri",
"h2o",
"xuan",
"dilithium",
"friars",
"homophobia",
"waldron",
"estrada",
"perdition",
"epoch",
"gie",
"blindside",
"muruga",
"'did",
"lookouts",
"suvs",
"chapera",
"iguanas",
"subsection",
"trotters",
"ratios",
"ferenc",
"tannen",
"fifa",
"ezequiel",
"conklin",
"d0",
"meteorological",
"irvine",
"keychain",
"tamar",
"walkies",
"escapee",
"academically",
"pouty",
"piney",
"fashionably",
"concetta",
"dragoon",
"thule",
"evian",
"godparents",
"pried",
"godiva",
"bagpipe",
"ginormous",
"noam",
"tempus",
"hendrik",
"herder",
"tinned",
"kasuga",
"ocular",
"pre-war",
"rosey",
"analyses",
"marg",
"muchachos",
"jiggy",
"brrr",
"striations",
"sweetener",
"schoolboys",
"originating",
"m1",
"indiscriminately",
"crier",
"griping",
"preppy",
"cabby",
"pembleton",
"shirl",
"wort",
"vassar",
"tutored",
"graphite",
"mame",
"kazoo",
"hi-yah",
"criticisms",
"citroen",
"semi-finals",
"disheartened",
"congregate",
"desiring",
"hrs",
"οne",
"stannis",
"halfwit",
"equestria",
"yule",
"vernacular",
"symptomatic",
"gipsy",
"eamon",
"rangoon",
"tenders",
"unconscionable",
"panning",
"tojo",
"skiff",
"organically",
"danno",
"bandwidth",
"motivator",
"m.p.",
"contraceptive",
"owari",
"manni",
"execs",
"tattletale",
"anatomically",
"manatee",
"combative",
"mensa",
"butt-head",
"pendragon",
"inner-city",
"sidekicks",
"credo",
"bulimic",
"mamiya",
"pheromone",
"intelligently",
"beate",
"earls",
"trimble",
"doughboy",
"letterhead",
"punctuation",
"dodie",
"asserting",
"bork",
"implements",
"yodel",
"pre-existing",
"sashi",
"ife",
"darlington",
"disarmament",
"bloomingdale",
"verger",
"eye-to-eye",
"tilden",
"kruse",
"ntsb",
"bodice",
"stagg",
"dispensing",
"abreast",
"suleiman",
"neff",
"toughness",
"on-call",
"hitchhikers",
"lakshman",
"kohei",
"wie",
"bald-headed",
"fillory",
"believeth",
"purer",
"pranking",
"cohorts",
"serpico",
"disused",
"cline",
"gluing",
"norad",
"gibbon",
"ml5",
"westport",
"jonathon",
"gilou",
"pius",
"scrip",
"nella",
"kitchener",
"niecy",
"kaali",
"purposefully",
"belarus",
"telepathically",
"betterthan",
"munk",
"ignatius",
"embellish",
"carbonate",
"spherical",
"shanaya",
"conjoined",
"blushed",
"ecclesiastical",
"dabney",
"wunderbar",
"daimyo",
"vitally",
"noth",
"cravat",
"bullfighting",
"wh-wh-what",
"grout",
"overheat",
"recites",
"sedona",
"danica",
"fitzy",
"bookman",
"legionnaire",
"orc",
"artistically",
"knowyou",
"squandering",
"automation",
"blue-collar",
"wilkie",
"linz",
"anti-tank",
"boy-",
"transcription",
"yunsik",
"'be",
"outwardly",
"unsound",
"acutely",
"indira",
"upper-class",
"niger",
"hara-kiri",
"whitewood",
| |
(405405*mckin) + (25328*mckin*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(5265*mbkin) -
(25328*mckin**2*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(1485*mbkin**2) + (177296*mckin**3*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(4455*mbkin**3) -
(25328*mckin**4*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405*mbkin**4) + (25328*mckin**5*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(405*mbkin**5) -
(25328*mckin**6*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(945*mbkin**6) - (25328*mckin**7*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(945*mbkin**7) +
(25328*mckin**8*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405*mbkin**8) - (25328*mckin**9*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(405*mbkin**9) +
(177296*mckin**10*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(4455*mbkin**10) - (25328*mckin**11*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(1485*mbkin**11) +
(25328*mckin**12*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(5265*mbkin**12) - (25328*mckin**13*np.pi**2*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(31185*mbkin**13) +
(25328*mckin**14*np.pi**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(405405*mbkin**14) + (6431321516674*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/352185458625 -
(3704597206337*mbkin*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(2465298210375*mckin) - (5127490988674*mckin*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(54784404675*mbkin) +
(3302128249474*mckin**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(14087418345*mbkin**2) - (564084140674*mckin**3*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(5418237825*mbkin**3) -
(3999322707326*mckin**4*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(3010132125*mbkin**4) + (13126136403326*mckin**5*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(2462835375*mbkin**5) -
(40506577491326*mckin**6*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(3447969525*mbkin**6) + (4341760*mckin**7*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(243*mbkin**7) -
(69015186860674*mckin**8*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(3447969525*mbkin**8) + (41634745772674*mckin**9*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(2462835375*mbkin**9) -
(32507932076674*mckin**10*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(3010132125*mbkin**10) + (27944525228674*mckin**11*
(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(5418237825*mbkin**11) -
(25206481119874*mckin**12*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(14087418345*mbkin**12) + (23381118380674*mckin**13*
(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(54784404675*
mbkin**13) - (22077287852674*mckin**14*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(352185458625*mbkin**14) +
(10549707478337*mckin**15*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(2465298210375*mbkin**15) - (47488*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/57915 +
(3392*mbkin*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(57915*mckin) + (6784*mckin*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(1287*mbkin) -
(237440*mckin**2*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(11583*mbkin**2) + (47488*mckin**3*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(891*mbkin**3) -
(47488*mckin**4*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(495*mbkin**4) + (47488*mckin**5*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(405*mbkin**5) -
(6784*mckin**6*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(81*mbkin**6) + (6784*mckin**8*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(81*mbkin**8) -
(47488*mckin**9*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(405*mbkin**9) + (47488*mckin**10*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(495*mbkin**10) -
(47488*mckin**11*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(891*mbkin**11) + (237440*mckin**12*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(11583*mbkin**12) -
(6784*mckin**13*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(1287*mbkin**13) + (47488*mckin**14*np.pi**2*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(57915*mbkin**14) -
(3392*mckin**15*np.pi**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(57915*mbkin**15) + (34821824*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/7818525 -
(17410912*mbkin*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(54729675*mckin) -
(34821824*mckin*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(1216215*mbkin) +
(34821824*mckin**2*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(312741*mbkin**2) -
(34821824*mckin**3*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(120285*mbkin**3) +
(34821824*mckin**4*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(66825*mbkin**4) -
(34821824*mckin**5*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(54675*mbkin**5) +
(34821824*mckin**6*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(76545*mbkin**6) -
(34821824*mckin**8*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(76545*mbkin**8) +
(34821824*mckin**9*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(54675*mbkin**9) -
(34821824*mckin**10*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(66825*mbkin**10) +
(34821824*mckin**11*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(120285*mbkin**11) -
(34821824*mckin**12*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(312741*mbkin**12) +
(34821824*mckin**13*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(1216215*mbkin**13) -
(34821824*mckin**14*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(7818525*mbkin**14) +
(17410912*mckin**15*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/(54729675*mbkin**15) -
(5715968*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/2189187 +
(91455488*mckin*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(2189187*mbkin) - (228638720*mckin**2*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(729729*mbkin**2) +
(457277440*mckin**3*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(312741*mbkin**3) - (114319360*mckin**4*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(24057*mbkin**4) +
(91455488*mckin**5*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(8019*mbkin**5) - (45727744*mckin**6*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(2187*mbkin**6) +
(457277440*mckin**7*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(15309*mbkin**7) - (57159680*mckin**8*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(1701*mbkin**8) +
(457277440*mckin**9*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(15309*mbkin**9) - (45727744*mckin**10*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(2187*mbkin**10) +
(91455488*mckin**11*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(8019*mbkin**11) - (114319360*mckin**12*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(24057*mbkin**12) +
(457277440*mckin**13*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(312741*mbkin**13) - (228638720*mckin**14*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(729729*mbkin**14) +
(91455488*mckin**15*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(2189187*mbkin**15) - (5715968*mckin**16*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(2189187*mbkin**16) -
(91651072*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/37216179 +
(91651072*mckin*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(2189187*mbkin) - (733208576*mckin**2*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(2189187*mbkin**2) +
(3666042880*mckin**3*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(2189187*mbkin**3) - (1833021440*mckin**4*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(312741*mbkin**4) +
(366604288*mckin**5*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(24057*mbkin**5) - (733208576*mckin**6*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(24057*mbkin**6) +
(733208576*mckin**7*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(15309*mbkin**7) - (916510720*mckin**8*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(15309*mbkin**8) +
(916510720*mckin**9*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(15309*mbkin**9) - (733208576*mckin**10*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(15309*mbkin**10) +
(733208576*mckin**11*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(24057*mbkin**11) - (366604288*mckin**12*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(24057*mbkin**12) +
(1833021440*mckin**13*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(312741*mbkin**13) - (3666042880*mckin**14*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(2189187*mbkin**14) +
(733208576*mckin**15*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(2189187*mbkin**15) - (91651072*mckin**16*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(2189187*mbkin**16) +
(91651072*mckin**17*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(37216179*mbkin**17) - (86691328*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/37216179 +
(173382656*mckin*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(4135131*mbkin) - (86691328*mckin**2*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(243243*mbkin**2) +
(1387061248*mckin**3*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(729729*mbkin**3) - (1733826560*mckin**4*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(243243*mbkin**4) +
(693530624*mckin**5*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(34749*mbkin**5) - (346765312*mckin**6*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(8019*mbkin**6) +
(1387061248*mckin**7*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(18711*mbkin**7) - (173382656*mckin**8*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(1701*mbkin**8) +
(1733826560*mckin**9*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(15309*mbkin**9) - (173382656*mckin**10*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(1701*mbkin**10) +
(1387061248*mckin**11*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(18711*mbkin**11) - (346765312*mckin**12*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(8019*mbkin**12) +
(693530624*mckin**13*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(34749*mbkin**13) - (1733826560*mckin**14*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(243243*mbkin**14) +
(1387061248*mckin**15*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(729729*mbkin**15) - (86691328*mckin**16*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(243243*mbkin**16) +
(173382656*mckin**17*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(4135131*mbkin**17) - (86691328*mckin**18*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(37216179*mbkin**18) -
(223167488*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/101015343 +
(223167488*mckin*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(5316597*mbkin) - (223167488*mckin**2*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(590733*mbkin**2) +
(223167488*mckin**3*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(104247*mbkin**3) - (892669952*mckin**4*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(104247*mbkin**4) +
(892669952*mckin**5*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(34749*mbkin**5) - (6248689664*mckin**6*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(104247*mbkin**6) +
(892669952*mckin**7*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(8019*mbkin**7) - (446334976*mckin**8*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(2673*mbkin**8) +
(446334976*mckin**9*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(2187*mbkin**9) - (446334976*mckin**10*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(2187*mbkin**10) +
(446334976*mckin**11*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(2673*mbkin**11) - (892669952*mckin**12*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(8019*mbkin**12) +
(6248689664*mckin**13*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(104247*mbkin**13) - (892669952*mckin**14*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(34749*mbkin**14) +
(892669952*mckin**15*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(104247*mbkin**15) - (223167488*mckin**16*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(104247*mbkin**16) +
(223167488*mckin**17*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(590733*mbkin**17) - (223167488*mckin**18*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(5316597*mbkin**18) +
(223167488*mckin**19*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(101015343*mbkin**19) - (19289344*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/9183213 +
(385786880*mckin*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(9183213*mbkin) - (192893440*mckin**2*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(483327*mbkin**2) +
(385786880*mckin**3*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(161109*mbkin**3) - (96446720*mckin**4*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(9477*mbkin**4) +
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this module you find the worklfow 'fleur_convergence' for a self-consistency
cylce of a FLEUR calculation with AiiDA.
"""
#TODO: more info in output, log warnings
#TODO: make smarter, ggf delete broyd or restart with more or less iterations
# you can use the pattern of the density convergence for this
#TODO: other error handling, where is known what to do
#TODO: test in each step if calculation before had a problem
#TODO: maybe write dict schema for wf_parameter inputs
#TODO: Idea pass structure extras, save them in outputnode? no
#TODO: get density for magnetic structures
#TODO: set minDistance and higher iteration number, ggf change logic for total energy
#TODO: check if calculation already exists
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.orm import Code, DataFactory
#from aiida.tools.codespecific.fleur.queue_defaults import queue_defaults
from aiida.work.workchain import WorkChain
from aiida.work.workchain import while_, if_
from aiida.work.run import submit
from aiida.work.workchain import ToContext
from aiida.work.process_registry import ProcessRegistry
#from aiida.tools.codespecific.fleur.decide_ncore import decide_ncore
from aiida_fleur.calculation.fleurinputgen import FleurinputgenCalculation
from aiida_fleur.calculation.fleur import FleurCalculation
from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur, get_inputs_inpgen
__copyright__ = (u"Copyright (c), 2016, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.27"
__contributors__ = "<NAME>"
RemoteData = DataFactory('remote')
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
#FleurInpData = DataFactory('fleurinp.fleurinp')
FleurInpData = DataFactory('fleur.fleurinp')
FleurProcess = FleurCalculation.process()
FleurinpProcess = FleurinputgenCalculation.process()
class fleur_scf_wc(WorkChain):
"""
This workflow converges a FLEUR calculation (SCF).
It converges the charge density and optional the total energy
Two paths are possible:
(1) Start from a structure and run the inpgen first
(2) Start from a Fleur calculation, with optional remoteData
:Params: wf_parameters: parameterData node,
:Params: structure : structureData node,
:Params: calc_parameters: parameterData node,
:Params: fleurinp: fleurinpData node,
:Params: remote_data: remoteData node,
:Params: inpgen: Code node,
:Params: fleur: Code node,
:returns: Success, last result node, list with convergence behavior
minimum input example:
1. Code1, Code2, Structure, (Parameters), (wf_parameters)
2. Code2, FleurinpData, (wf_parameters)
maximum input example:
1. Code1, Code2, Structure, Parameters
wf_parameters: {
'density_criterion' : Float,
'energy_criterion' : Float,
'converge_density' : True,
'converge_energy' : True,
'queue' : String,
'resources' : dict(
{"num_machines": int, "num_mpiprocs_per_machine" : int})
'walltime' : int}
2. Code2, FleurinpData, (remote-data), wf_parameters as in 1.
Hints:
1. This workflow does not work with local codes!
"""
_workflowversion = "0.1.0"
_wf_default = {'fleur_runmax': 4,
'density_criterion' : 0.00002,
'energy_criterion' : 0.002,
'converge_density' : True,
'converge_energy' : False,
'resue' : True,
'queue_name' : ''}
@classmethod
def define(cls, spec):
super(fleur_convergence, cls).define(spec)
spec.input("wf_parameters", valid_type=ParameterData, required=False,
default=ParameterData(dict={'fleur_runmax': 4,
'density_criterion' : 0.00002,
'energy_criterion' : 0.002,
'converge_density' : True,
'converge_energy' : False,
'reuse' : True}))
spec.input("structure", valid_type=StructureData, required=False)
spec.input("calc_parameters", valid_type=ParameterData, required=False)
#spec.input("settings", valid_type=ParameterData, required=False)
spec.input("fleurinp", valid_type=FleurInpData, required=False)
spec.input("remote_data", valid_type=RemoteData, required=False)
spec.input("inpgen", valid_type=Code, required=False)
spec.input("fleur", valid_type=Code, required=True)
spec.outline(
cls.start,
if_(cls.validate_input)(
cls.run_fleurinpgen),
cls.run_fleur,
cls.get_res,
while_(cls.condition)(
cls.run_fleur,
cls.get_res),
cls.return_results
)
#spec.dynamic_output()
def start(self):
"""
init context and some parameters
"""
print('started convergence workflow version {}'.format(self._workflowversion))
print("Workchain node identifiers: {}".format(ProcessRegistry().current_calc_node))
# init
self.ctx.last_calc = None
self.ctx.loop_count = 0
self.ctx.calcs = []
self.ctx.successful = False
self.ctx.distance = []
self.ctx.total_energy = []
self.energydiff = 10000
self.ctx.warnings = []
self.ctx.errors = []
self.ctx.fleurinp = None
wf_dict = self.inputs.wf_parameters.get_dict()
if wf_dict == {}:
wf_dict = self._wf_default
# if MPI in code name, execute parallel
self.ctx.serial = wf_dict.get('serial', False)#True
# set values, or defaults
self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
self.ctx.resources = wf_dict.get('resources', {"num_machines": 1})
self.ctx.walltime_sec = wf_dict.get('walltime_sec', 60*60)
self.ctx.queue = wf_dict.get('queue_name', '')
def validate_input(self):
"""
# validate input and find out which path (1, or 2) to take
# return True means run inpgen if false run fleur directly
"""
run_inpgen = True
inputs = self.inputs
if 'fleurinp' in inputs:
run_inpgen = False
if 'structure' in inputs:
warning = 'WARNING: Ignoring Structure input, because Fleurinp was given'
print(warning)
self.ctx.warnings.append(warning)
if 'inpgen' in inputs:
warning = 'WARNING: Ignoring inpgen code input, because Fleurinp was given'
print(warning)
self.ctx.warnings.append(warning)
if 'calc_parameters' in inputs:
warning = 'WARNING: Ignoring parameter input, because Fleurinp was given'
print(warning)
self.ctx.warnings.append(warning)
elif 'structure' in inputs:
if not 'inpgen' in inputs:
error = 'ERROR: StructureData was provided, but no inpgen code was provided'
print(error)
self.ctx.errors.append(error)
#kill workflow
else:
error = 'ERROR: No StructureData nor FleurinpData was provided'
print(error)
self.ctx.errors.append(error)
#kill workflow
return run_inpgen
def run_fleurinpgen(self):
"""
run the inpgen
"""
structure = self.inputs.structure
inpgencode = self.inputs.inpgen
if 'calc_parameters' in self.inputs:
params = self.inputs.calc_parameters
else:
params = None
options = {"max_wallclock_seconds": self.ctx.walltime_sec,
"resources": self.ctx.resources,
"queue_name" : self.ctx.queue}
inputs = get_inputs_inpgen(structure, inpgencode, options, params=params)
print 'run inpgen'
future = submit(FleurinpProcess, **inputs)
return ToContext(inpgen=future, last_calc=future)
def change_fleurinp(self):
"""
This routine sets somethings in the fleurinp file before running a fleur
calculation.
"""
from aiida.orm.data.fleurinp.fleurinpmodifier import FleurinpModifier
#print('in change_fleurinp')
if self.ctx.fleurinp: #something was already changed
#print('Fleurinp already exists')
return
elif 'fleurinp' in self.inputs:
fleurin = self.inputs.fleurinp
else:
fleurin = self.ctx['inpgen'].out.fleurinpData
wf_dict = self.inputs.wf_parameters.get_dict()
converge_te = wf_dict.get('converge_energy', False)
if not converge_te:
#if not energy convergence, set mindistance to criterium
#itermax to 18 (less jobs needed)
dc = wf_dict.get('density_criterion', 0.00002)
fleurmode = FleurinpModifier(fleurin)
fleurmode.set_inpchanges({'itmax': 30, 'minDistance' : dc})
out = fleurmode.freeze()
self.ctx.fleurinp = out
return
else:
self.ctx.fleurinp = fleurin
return
def run_fleur(self):
"""
run a FLEUR calculation
"""
# check if calculation before is in FINISHED (not failed)
#check if inpgen was run before.
self.change_fleurinp()
fleurin = self.ctx.fleurinp
'''
if 'settings' in self.inputs:
settings = self.input.settings
else:
settings = ParameterData(dict={'files_to_retrieve' : [], 'files_not_to_retrieve': [],
'files_copy_remotely': [], 'files_not_copy_remotely': [],
'commandline_options': ["-wtime", "{}".format(self.ctx.walltime_sec)], 'blaha' : ['bla']})
'''
if self.ctx['last_calc']:
remote = self.ctx['last_calc'].out.remote_folder
elif 'remote_data' in self.inputs:
remote = self.inputs.remote_data
code = self.inputs.fleur
options = {"max_wallclock_seconds": self.ctx.walltime_sec,
"resources": self.ctx.resources,
"queue_name" : self.ctx.queue}
#inputs = get_inputs_fleur(code, remote, fleurin, options, settings=settings, serial=self.ctx.serial)
inputs = get_inputs_fleur(code, remote, fleurin, options, serial=self.ctx.serial)
#print inputs
future = submit(FleurProcess, **inputs)
self.ctx.loop_count = self.ctx.loop_count + 1
print 'run FLEUR number: {}'.format(self.ctx.loop_count)
self.ctx.calcs.append(future)
return ToContext(last_calc=future) #calcs.append(future),
def get_res(self):
"""
Check how the last Fleur calculation went
Parse some results.
"""
#print('In get_res')
# TODO maybe do this different
# or if complexer output node exists take from there.
from aiida.tools.codespecific.fleur.xml_util import eval_xpath2
from lxml import etree
#from lxml.etree import XMLSyntaxError
xpath_energy = '/fleurOutput/scfLoop/iteration/totalEnergy/@value'
xpath_distance = '/fleurOutput/scfLoop/iteration/densityConvergence/chargeDensity/@distance' # be aware of magnetism
#densityconvergence_xpath = 'densityConvergence'
#chargedensity_xpath = 'densityConvergence/chargeDensity'
#overallchargedensity_xpath = 'densityConvergence/overallChargeDensity'
#spindensity_xpath = 'densityConvergence/spinDensity'
last_calc = self.ctx.last_calc
# TODO check calculation state:
calc_state = 'FINISHED'
if calc_state != 'FINISHED':
#kill workflow in a controled way, call return results, or write a end_routine
#TODO
pass
'''
spin = get_xml_attribute(eval_xpath(root, magnetism_xpath), jspin_name)
charge_densitys = eval_xpath(iteration_node, chargedensity_xpath)
charge_density1 = get_xml_attribute(charge_densitys[0], distance_name)
write_simple_outnode(
charge_density1, 'float', 'charge_density1', simple_data)
charge_density2 = get_xml_attribute(charge_densitys[1], distance_name)
write_simple_outnode(
charge_density2, 'float', 'charge_density2', simple_data)
spin_density = get_xml_attribute(
eval_xpath(iteration_node, spindensity_xpath), distance_name)
write_simple_outnode(
spin_density, 'float', 'spin_density', simple_data)
overall_charge_density = get_xml_attribute(
eval_xpath(iteration_node, overallchargedensity_xpath), distance_name)
write_simple_outnode(
overall_charge_density, 'float', 'overall_charge_density', simple_data)
'''
#TODO: dangerous, can fail, error catching
outxmlfile = last_calc.out.output_parameters.dict.outputfile_path
tree = etree.parse(outxmlfile)
root = tree.getroot()
energies = eval_xpath2(root, xpath_energy)
for energy in energies:
self.ctx.total_energy.append(float(energy))
distances = eval_xpath2(root, xpath_distance)
#print distances
for distance in distances:
self.ctx.distance.append(float(distance))
def condition(self):
"""
check convergence condition
"""
#print('condition')
density_converged = False
energy_converged = False
# TODO do a test first if last_calculation was successful, otherwise,
# 'output_parameters' wont exist.
inpwfp_dict = self.inputs.wf_parameters.get_dict()
last_charge_density = self.ctx.last_calc.out.output_parameters.dict.charge_density
#print last_charge_density
if inpwfp_dict.get('converge_density', True):
if inpwfp_dict.get('density_criterion', 0.00002) >= last_charge_density:
density_converged = True
else:
density_converged = True
energy = self.ctx.total_energy
if len(energy) >=2:
self.energydiff = abs(energy[-1]-energy[-2])
#print self.energydiff
if inpwfp_dict.get('converge_energy', True):
if inpwfp_dict.get('energy_criterion', 0.002) >= self.energydiff:
energy_converged = True
else:
energy_converged = True #since energy convergence is not wanted
if density_converged and energy_converged:
self.ctx.successful = True
return False
elif self.ctx.loop_count >= self.ctx.max_number_runs:
return False
else:
return True
def return_results(self):
"""
return the results of the calculations
"""
outputnode_dict ={}
if self.ctx.successful:
print('Done, the convergence criteria are reached.')
print('The charge density of the FLEUR calculation pk= converged after {} FLEUR runs and {} iterations to {} '
'"me/bohr^3"'.format(self.ctx.loop_count, self.ctx.last_calc.out.output_parameters.dict.number_of_iterations_total,
self.ctx.last_calc.out.output_parameters.dict.charge_density))
print('The | |
})
@login_required
@require_http_methods(['GET'])
def organization_applications(request, organization_id):
organization = get_object_or_404(models.Organization, pk=organization_id)
return render(request, 'boh/organization/applications.html', {
'organization': organization,
'active_top': 'applications',
'active_tab': 'applications'
})
@login_required
@require_http_methods(['GET'])
def organization_people(request, organization_id):
organization = get_object_or_404(models.Organization, pk=organization_id)
return render(request, 'boh/organization/people.html', {
'organization': organization,
'active_top': 'applications',
'active_tab': 'people'
})
@login_required
@require_http_methods(['GET', 'POST'])
def organization_settings_general(request, organization_id):
organization = get_object_or_404(models.Organization, pk=organization_id)
form = forms.OrganizationSettingsGeneralForm(request.POST or None, instance=organization)
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, _('You successfully updated this organization\'s general information.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this organization\'s general information.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/organization/settings/general.html', {
'organization': organization,
'form': form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'general'
})
@login_required
@require_http_methods(['GET', 'POST'])
def organization_settings_people(request, organization_id):
organization = get_object_or_404(models.Organization, pk=organization_id)
people_form = forms.OrganizationSettingsPeopleForm(request.POST or None, instance=organization)
if request.method == 'POST':
if people_form.is_valid():
people_form.save()
messages.success(request, _('You successfully updated this organization\'s associated people.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this organization\'s associated people.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/organization/settings/people.html', {
'organization': organization,
'people_form': people_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'people'
})
@login_required
@require_http_methods(['GET', 'POST'])
def organization_settings_danger(request, organization_id):
organization = get_object_or_404(models.Organization, pk=organization_id)
form = forms.OrganizationDeleteForm(request.POST or None, instance=organization)
if request.method == 'POST' and form.is_valid():
organization.delete()
messages.success(request, _('You successfully deleted the "%(organization_name)s" organization.') % {'organization_name': organization.name}, extra_tags=random.choice(success_messages))
return redirect('boh:dashboard.personal')
return render(request, 'boh/organization/settings/danger.html', {
'organization': organization,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'danger'
})
@login_required
@staff_member_required
@require_http_methods(['GET', 'POST'])
def organization_add(request):
form = forms.OrganizationAddForm(request.POST or None)
if form.is_valid():
organization = form.save()
messages.success(request, _('You successfully created this organization.'), extra_tags=random.choice(success_messages))
return redirect('boh:organization.overview', organization.id)
return render(request, 'boh/organization/add.html', {
'form': form,
'active_top': 'applications'
})
# Application
@login_required
@require_http_methods(['GET'])
def application_list(request):
queries = request.GET.copy()
if queries.__contains__('page'):
del queries['page']
if queries.__contains__('page_size'):
del queries['page_size']
application_filter = filters.ApplicationFilter(request.GET, queryset=models.Application.objects.all().select_related('organization__name').prefetch_related('tags'))
page_size = 25
page_size_form = forms.PageSizeForm()
if request.GET.get('page_size'):
page_size_form = forms.PageSizeForm(request.GET)
if page_size_form.is_valid():
page_size = page_size_form.cleaned_data['page_size']
if page_size == 'all':
page_size = 10000000
else:
page_size = int(page_size)
paginator = Paginator(application_filter, page_size)
page = request.GET.get('page')
try:
applications = paginator.page(page)
except PageNotAnInteger:
applications = paginator.page(1)
except EmptyPage:
applications = paginator.page(paginator.num_pages)
#
show_advanced = False
if request.GET.get('platform') or request.GET.get('lifecycle') or request.GET.get('origin') or request.GET.get('technologies') or request.GET.get('regulations') or request.GET.get('tags') or request.GET.get('service_level_agreements') or request.GET.get('asvs_level') or (request.GET.get('external_audience') and request.GET.get('external_audience') is not '1') or (request.GET.get('internet_accessible') and request.GET.get('internet_accessible') is not '1'):
show_advanced = True
return render(request, 'boh/application/list.html', {
'form': application_filter.form,
'applications': applications,
'queries': queries,
'page_size_form': page_size_form,
'page_size': str(page_size),
'show_advanced': show_advanced,
'active_top': 'applications'
})
@login_required
@require_http_methods(['GET'])
def application_overview(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
return render(request, 'boh/application/overview.html', {
'application': application,
'active_top': 'applications',
'active_tab': 'overview'
})
@login_required
@require_http_methods(['GET'])
def application_engagements(request, application_id):
application = get_object_or_404(models.Application.objects.select_related('organization'), pk=application_id)
engagements = application.engagement_set.prefetch_related(
Prefetch('activity_set', queryset=models.Activity.objects
.all()
.select_related('activity_type__name')
)
).annotate(comment_count=Count('engagementcomment'))
pending_engagements = engagements.filter(status=models.Engagement.PENDING_STATUS)
open_engagements = engagements.filter(status=models.Engagement.OPEN_STATUS)
closed_engagements = engagements.filter(status=models.Engagement.CLOSED_STATUS).order_by('-end_date')
return render(request, 'boh/application/engagements.html', {
'application': application,
'pending_engagements': pending_engagements,
'open_engagements': open_engagements,
'closed_engagements': closed_engagements,
'active_top': 'applications',
'active_tab': 'engagements'
})
@login_required
@require_http_methods(['GET'])
def application_environments(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
return render(request, 'boh/application/environments.html', {
'application': application,
'active_top': 'applications',
'active_tab': 'environments'
})
@login_required
@require_http_methods(['GET'])
def application_people(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
return render(request, 'boh/application/people.html', {
'application': application,
'active_top': 'applications',
'active_tab': 'people'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_people_add(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
relation_form = forms.PersonRelationForm(request.POST or None)
relation_form.fields['person'].queryset = models.Person.objects.exclude(application__id=application.id)
if request.method == 'POST':
if relation_form.is_valid():
relation = relation_form.save(commit=False)
relation.application = application
name = relation.person.first_name + ' ' + relation.person.last_name
try:
relation.save()
except IntegrityError:
messages.error(request, _('"%(name)s" is already related to this application.') % {'name': name}, extra_tags=random.choice(error_messages))
else:
messages.success(request, _('You successfully added "%(name)s" to this application.') % {'name': name}, extra_tags=random.choice(success_messages))
finally:
return redirect('boh:application.people', application.id)
else:
messages.error(request, _('There was a problem saving the relation to this application.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/application/add_relation.html', {
'application': application,
'relation_form': relation_form,
'active_top': 'applications',
'active_tab': 'people'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_people_edit(request, application_id, relation_id):
application = get_object_or_404(models.Application, pk=application_id)
relation = get_object_or_404(models.Relation, pk=relation_id)
relation_form = forms.PersonRelationForm(request.POST or None, instance=relation)
relation_form.fields['person'].queryset = models.Person.objects.exclude(Q(application__id=application.id) & ~Q(id=relation.person.id))
relation_form.fields['person'].value = relation.person
if request.method == 'POST':
if relation_form.is_valid():
relation = relation_form.save(commit=False)
relation.application = application
name = relation.person.first_name + ' ' + relation.person.last_name
try:
relation.save()
except IntegrityError:
messages.error(request, _('"%(name)s" is already related to this application.') % {'name': name}, extra_tags=random.choice(error_messages))
else:
messages.success(request, _('You successfully added "%(name)s" to this application.') % {'name': name}, extra_tags=random.choice(success_messages))
finally:
return redirect('boh:application.people', application.id)
else:
messages.error(request, _('There was a problem saving the relation to this application.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/application/edit_relation.html', {
'application': application,
'relation': relation,
'relation_form': relation_form,
'active_top': 'applications',
'active_tab': 'people'
})
@login_required
@require_http_methods(['POST'])
def application_people_delete(request, application_id, relation_id):
application = get_object_or_404(models.Application, pk=application_id)
relation = get_object_or_404(models.Relation, pk=relation_id)
name = relation.person.first_name + ' ' + relation.person.last_name
delete_form = forms.RelationDeleteForm(request.POST, instance=relation)
if delete_form.is_valid():
relation.delete()
messages.success(request, _('You successfully disassociated "%(name)s" with this application.') % {'name': name}, extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem disassociating "%(name)s" with this application.') % {'name': name}, extra_tags=random.choice(error_messages))
return redirect('boh:application.people', application.id)
@login_required
@require_http_methods(['GET', 'POST'])
def application_add(request):
form = forms.ApplicationAddForm(request.POST or None)
if form.is_valid():
application = form.save()
messages.success(request, _('You successfully created this application.'), extra_tags=random.choice(success_messages))
return redirect('boh:application.overview', application_id=application.id)
return render(request, 'boh/application/add.html', {
'form': form,
'active_top': 'applications'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_general(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
general_form = forms.ApplicationSettingsGeneralForm(instance=application)
organization_form = forms.ApplicationSettingsOrganizationForm(instance=application)
if request.method == 'POST':
if 'submit-general' in request.POST:
general_form = forms.ApplicationSettingsGeneralForm(request.POST, instance=application)
if general_form.is_valid():
general_form.save()
messages.success(request, _('You successfully updated this application\'s general information.'), extra_tags=random.choice(success_messages))
elif 'submit-organization' in request.POST:
organization_form = forms.ApplicationSettingsOrganizationForm(request.POST, instance=application)
if organization_form.is_valid():
organization_form.save()
messages.success(request, _('You successfully updated this application\'s organization.'), extra_tags=random.choice(success_messages))
return render(request, 'boh/application/settings/general.html', {
'application': application,
'general_form': general_form,
'organization_form': organization_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'general'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_metadata(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
metadata_form = forms.ApplicationSettingsMetadataForm(instance=application)
technologies_form = forms.ApplicationSettingsTechnologiesForm(instance=application)
regulations_form = forms.ApplicationSettingsRegulationsForm(instance=application)
tags_form = forms.ApplicationSettingsTagsForm(instance=application)
if 'submit-metadata' in request.POST:
metadata_form = forms.ApplicationSettingsMetadataForm(request.POST, instance=application)
if metadata_form.is_valid():
metadata_form.save()
messages.success(request, _('You successfully updated this application\'s metadata.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this application\'s metadata.'), extra_tags=random.choice(error_messages))
if 'submit-technologies' in request.POST:
technologies_form = forms.ApplicationSettingsTechnologiesForm(request.POST, instance=application)
if technologies_form.is_valid():
technologies_form.save()
messages.success(request, _('You successfully updated this application\'s technologies.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this application\'s technologies.'), extra_tags=random.choice(error_messages))
if 'submit-regulations' in request.POST:
regulations_form = forms.ApplicationSettingsRegulationsForm(request.POST, instance=application)
if regulations_form.is_valid():
regulations_form.save()
messages.success(request, _('You successfully updated this application\'s regulations.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this application\'s regulations.'), extra_tags=random.choice(error_messages))
elif 'submit-tags' in request.POST:
tags_form = forms.ApplicationSettingsTagsForm(request.POST, instance=application)
if tags_form.is_valid():
tags_form.save()
messages.success(request, _('You successfully updated this application\'s tags.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this application\'s tags.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/application/settings/metadata.html', {
'application': application,
'metadata_form': metadata_form,
'technologies_form': technologies_form,
'regulations_form': regulations_form,
'tags_form': tags_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'metadata'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_data_elements(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
data_elements_form = forms.ApplicationSettingsDataElementsForm(request.POST or None, instance=application)
dcl_override_form = forms.ApplicationSettingsDCLOverrideForm(instance=application)
if request.method == 'POST':
if data_elements_form.is_valid():
data_elements_form.save()
messages.success(request, _('You successfully updated this application\'s data elements.'), extra_tags=random.choice(success_messages))
return redirect('boh:application.settings.data-elements', application.id)
return render(request, 'boh/application/settings/data_elements.html', {
'application': application,
'data_elements_form': data_elements_form,
'dcl_override_form': dcl_override_form,
'dcl': application.data_classification_level,
'dsv': application.data_sensitivity_value,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'data_elements'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_service_level_agreements(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
sla_form = forms.ApplicationSettingsServiceLevelAgreementForm(request.POST or None, instance=application)
if request.method == 'POST':
if sla_form.is_valid():
sla_form.save()
messages.success(request, _('You successfully updated this application\'s service level agreements.'), extra_tags=random.choice(success_messages))
else:
messages.error(request, _('There was a problem updating this application\'s service level agreements.'), extra_tags=random.choice(error_messages))
return render(request, 'boh/application/settings/service_level_agreements.html', {
'application': application,
'sla_form': sla_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'agreements'
})
@login_required
@require_http_methods(['POST'])
def application_settings_data_elements_override(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
dcl_override_form = forms.ApplicationSettingsDCLOverrideForm(request.POST or None, instance=application)
if dcl_override_form.is_valid():
dcl_override_form.save()
messages.success(request, _('This application\'s data classification override has been updated.'), extra_tags=random.choice(success_messages))
return redirect('boh:application.settings.data-elements', application.id)
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_services(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
threadfix_form = forms.ApplicationSettingsThreadFixForm(instance=application)
if 'submit-threadfix' in request.POST:
threadfix_form = forms.ApplicationSettingsThreadFixForm(request.POST, instance=application)
if threadfix_form.is_valid():
threadfix_form.save()
messages.success(request, _('You successfully updated this application\'s ThreadFix information.'), extra_tags=random.choice(success_messages))
return render(request, 'boh/application/settings/services.html', {
'application': application,
'threadfix_form': threadfix_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'services'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_owasp_asvs(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
asvs_form = forms.ApplicationSettingsASVSForm(instance=application)
if 'submit-asvs' in request.POST:
asvs_form = forms.ApplicationSettingsASVSForm(request.POST, instance=application)
if asvs_form.is_valid():
asvs_form.save()
messages.success(request, _('You successfully updated this application\'s ASVS information.'), extra_tags=random.choice(success_messages))
return render(request, 'boh/application/settings/owasp_asvs.html', {
'application': application,
'asvs_form': asvs_form,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'owasp'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_custom_fields(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
ApplicationCustomFieldValueFormSet = \
inlineformset_factory(
models.Application,
models.ApplicationCustomFieldValue,
fields=('custom_field', 'value',),
extra=1,
widgets={}
)
formset = ApplicationCustomFieldValueFormSet(request.POST or None, instance=application)
if formset.is_valid():
formset.save()
messages.success(request, _('You successfully updated these custom fields.'), extra_tags=random.choice(success_messages))
return redirect('boh:application.settings.custom-fields', application_id=application.id)
custom_fields = models.CustomField.objects.all()
return render(request, 'boh/application/settings/custom_fields.html', {
'application': application,
'custom_fields': custom_fields,
'formset': formset,
'active_top': 'applications',
'active_tab': 'settings',
'active_side': 'custom_fields'
})
@login_required
@require_http_methods(['GET', 'POST'])
def application_settings_danger(request, application_id):
application = get_object_or_404(models.Application, pk=application_id)
form = forms.ApplicationDeleteForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
application.delete()
messages.success(request, _('You successfully deleted the "%(application_name)s" application.') % {'application_name': application.name}, extra_tags=random.choice(success_messages))
return redirect('boh:application.list')
| |
whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject), if it's set; otherwise false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
"""
SetFocusCore(self: UIElementAutomationPeer)
Sets the keyboard input focus on the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.SetFocus.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: ToggleButton) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class CheckBoxAutomationPeer(ToggleButtonAutomationPeer, IToggleProvider):
"""
Exposes System.Windows.Controls.CheckBox types to UI Automation.
CheckBoxAutomationPeer(owner: CheckBox)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: ButtonBaseAutomationPeer) -> str
Gets the accelerator key for the element associated with this
System.Windows.Automation.Peers.ButtonBaseAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: A string containing the accelerator key.
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
"""
GetAutomationControlTypeCore(self: CheckBoxAutomationPeer) -> AutomationControlType
Gets the System.Windows.Automation.Peers.AutomationControlType for the element
associated with this System.Windows.Automation.Peers.CheckBoxAutomationPeer.
Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationControlType.
Returns: System.Windows.Automation.Peers.AutomationControlType.CheckBox.
"""
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: ButtonBaseAutomationPeer) -> str
Gets the System.Windows.Automation.AutomationProperties.AutomationId for the
element associated with this
System.Windows.Automation.Peers.ButtonBaseAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.AutomationId.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally, if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource, this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
"""
GetChildrenCore(self: UIElementAutomationPeer) -> List[AutomationPeer]
Gets the collection of child elements of the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetChildren.
Returns: A list of child System.Windows.Automation.Peers.AutomationPeer elements.
"""
pass
def GetClassNameCore(self, *args): #cannot find CLR method
"""
GetClassNameCore(self: CheckBoxAutomationPeer) -> str
Gets the name of the element associated with this
System.Windows.Automation.Peers.CheckBoxAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetClassName.
Returns: A string that contains "CheckBox".
"""
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN, System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item type that the
System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer represents. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetItemType.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemType that is returned by
System.Windows.Automation.AutomationProperties.GetItemType(System.Windows.Depend
encyObject).
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: UIElementAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetLabeledBy.
Returns: The System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: ButtonBaseAutomationPeer) -> str
Gets the name of the class of the element associated with this
System.Windows.Automation.Peers.ButtonBaseAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: A string that contains the class name, minus the accelerator key.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: UIElementAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
laid out in a specific direction. This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetOrientation.
Returns: The System.Windows.Automation.Peers.AutomationOrientation.None enumeration
value.
"""
pass
def GetPeerFromPointCore(self, *args): #cannot find CLR method
""" GetPeerFromPointCore(self: AutomationPeer, point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self, *args): #cannot find CLR method
"""
HasKeyboardFocusCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
currently has keyboard input focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.HasKeyboardFocus.
Returns: true if the element has keyboard input focus; otherwise, false.
"""
pass
def IsContentElementCore(self, *args): #cannot find CLR method
"""
IsContentElementCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
an element that contains data that is presented to the user. This method is
called by System.Windows.Automation.Peers.AutomationPeer.IsContentElement.
Returns: true.
"""
pass
def IsControlElementCore(self, *args): #cannot find CLR method
"""
IsControlElementCore(self: UIElementAutomationPeer) -> bool
Gets or sets a value that indicates whether the System.Windows.UIElement that
is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
is understood by the end user as interactive. Optionally, the user might
understand the System.Windows.UIElement as contributing to the logical
structure of the control in the GUI. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsControlElement.
Returns: true.
"""
pass
def IsEnabledCore(self, *args): #cannot find CLR method
"""
IsEnabledCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard | |
= reshape_fr(ELag,f.m)
E[k] = post_process(E[k],infl,rot)
stats.assess(k,kObs,'a',E=E[k])
for k in progbar(range(chrono.K+1),desc='Assessing'):
stats.assess(k,None,'u',E=E[k])
return assimilator
@DA_Config
def EnRTS(upd_a,N,cntr,infl=1.0,rot=False,**kwargs):
"""
EnRTS (Rauch-Tung-Striebel) smoother.
Ref: Raanes, <NAME>. (2016):
"On the ensemble Rauch‐Tung‐Striebel smoother..."
Settings for reproducing literature benchmarks may be found in
mods/Lorenz95/raanes2016.py
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
E = zeros((chrono.K+1,N,f.m))
Ef = E.copy()
E[0] = X0.sample(N)
# Forward pass
for k,kObs,t,dt in progbar(chrono.forecast_range):
E[k] = f(E[k-1],t-dt,dt)
E[k] = add_noise(E[k], dt, f.noise, kwargs)
Ef[k] = E[k]
if kObs is not None:
stats.assess(k,kObs,'f',E=E[k])
hE = h(E[k],t)
y = yy[kObs]
E[k] = EnKF_analysis(E[k],hE,h.noise,y,upd_a,stats,kObs)
E[k] = post_process(E[k],infl,rot)
stats.assess(k,kObs,'a',E=E[k])
# Backward pass
for k in progbar(range(chrono.K)[::-1]):
A = anom(E[k])[0]
Af = anom(Ef[k+1])[0]
J = tinv(Af) @ A
J *= cntr
E[k] += ( E[k+1] - Ef[k+1] ) @ J
for k in progbar(range(chrono.K+1),desc='Assessing'):
stats.assess(k,E=E[k])
return assimilator
def serial_inds(upd_a, y, cvR, A):
if 'mono' in upd_a:
# Not robust?
inds = arange(len(y))
elif 'sorted' in upd_a:
dC = cvR.diag
if np.all(dC == dC[0]):
# Sort y by P
dC = np.sum(A*A,0)/(N-1)
inds = np.argsort(dC)
else: # Default: random ordering
inds = np.random.permutation(len(y))
return inds
@DA_Config
def SL_EAKF(N,loc_rad,taper='GC',ordr='rand',infl=1.0,rot=False,**kwargs):
"""
Serial, covariance-localized EAKF.
Ref: Karspeck, <NAME>., and <NAME>. (2007):
"Experimental implementation of an ensemble adjustment filter..."
Used without localization, this should be equivalent
(full ensemble equality) to the EnKF 'Serial'.
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
N1 = N-1
R = h.noise
Rm12 = h.noise.C.sym_sqrt_inv
E = X0.sample(N)
stats.assess(0,E=E)
for k,kObs,t,dt in progbar(chrono.forecast_range):
E = f(E,t-dt,dt)
E = add_noise(E, dt, f.noise, kwargs)
if kObs is not None:
stats.assess(k,kObs,'f',E=E)
y = yy[kObs]
inds = serial_inds(ordr, y, R, anom(E)[0])
locf_at = h.loc_f(loc_rad, 'y2x', t, taper)
for i,j in enumerate(inds):
hE = h(E,t)
hx = mean(hE,0)
Y = hE - hx
mu = mean(E ,0)
A = E-mu
# Update j-th component of observed ensemble
Yj = Rm12[j,:] @ Y.T
dyj = Rm12[j,:] @ (y - hx)
#
skk = Yj@Yj # N1 * prior var
su = 1/( 1/skk + 1/N1 ) # N1 * KG
alpha = (N1/(N1+skk))**(0.5) # update contraction factor
#
dy2 = su*dyj/N1 # mean update
Y2 = alpha*Yj # anomaly update
if skk<1e-9: continue
# Update state (regress update from observation space)
# Localized
local, coeffs = locf_at(j)
if len(local) == 0: continue
Regression = (A[:,local]*coeffs).T @ Yj/np.sum(Yj**2)
mu[ local] += Regression*dy2
A[:,local] += np.outer(Y2 - Yj, Regression)
# Without localization:
#Regression = A.T @ Yj/np.sum(Yj**2)
#mu += Regression*dy2
#A += np.outer(Y2 - Yj, Regression)
E = mu + A
E = post_process(E,infl,rot)
stats.assess(k,kObs,E=E)
return assimilator
def infl_N_dual(YR,dyR,xN,g):
N, P = YR.shape
N1 = N-1
V,s,UT = svd0(YR)
du = UT @ dyR
eN, cL = hyperprior_coeffs(s,N,xN,g)
pad_rk = lambda arr: pad0( arr, min(N,P) )
dgn_rk = lambda l: pad_rk((l*s)**2) + N1
# Make dual cost function (in terms of l1)
J = lambda l: np.sum(du**2/dgn_rk(l)) \
+ eN/l**2 \
+ cL*log(l**2)
# Derivatives (not required with minimize_scalar):
Jp = lambda l: -2*l * np.sum(pad_rk(s**2) * du**2/dgn_rk(l)**2) \
+ -2*eN/l**3 \
+ 2*cL/l
Jpp = lambda l: 8*l**2 * np.sum(pad_rk(s**4) * du**2/dgn_rk(l)**3) \
+ 6*eN/l**4 \
+ -2*cL/l**2
# Find inflation factor (optimize)
l1 = Newton_m(Jp,Jpp,1.0)
#l1 = fmin_bfgs(J, x0=[1], gtol=1e-4, disp=0)
#l1 = minimize_scalar(J, bracket=(sqrt(prior_mode), 1e2), tol=1e-4).x
za = N1/l1**2
return za
@DA_Config
def LETKF(N,loc_rad,taper='GC',approx=False,infl=1.0,rot=False,**kwargs):
"""
Same as EnKF (sqrt), but with localization.
Settings for reproducing literature benchmarks may be found in
mods/Lorenz95/sak08.py
Ref: Hunt, <NAME>., <NAME>, and <NAME>. (2007):
"Efficient data assimilation for spatiotemporal chaos..."
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0,R,N1 = twin.f, twin.h, twin.t, twin.X0, twin.h.noise.C, N-1
E = X0.sample(N)
stats.assess(0,E=E)
for k,kObs,t,dt in progbar(chrono.forecast_range):
E = f(E,t-dt,dt)
E = add_noise(E, dt, f.noise, kwargs)
if kObs is not None:
stats.assess(k,kObs,'f',E=E)
mu = mean(E,0)
A = E - mu
y = yy[kObs]
Y,hx = anom(h(E,t))
# Transform obs space
Y = Y @ R.sym_sqrt_inv.T
dy = (y - hx) @ R.sym_sqrt_inv.T
if infl=='-N':
xN = kwargs.get('xN',1.0)
g = kwargs.get('g',0)
za = infl_N_dual(Y,dy,xN,g)
else:
za = N1
locf_at = h.loc_f(loc_rad, 'x2y', t, taper)
for i in range(f.m):
# Localize
local, coeffs = locf_at(i)
if len(local) == 0: continue
Y_i = Y[:,local] * sqrt(coeffs)
dy_i = dy [local] * sqrt(coeffs)
# Do analysis
if approx:
# Approximate alternative, derived by pretending that Y_loc = H @ A_i,
# even though the local cropping of Y happens after application of H.
# Anyways, with an explicit H, one can apply Woodbury
# to go to state space (dim==1), before reverting to HA_i = Y_loc.
B = A[:,i]@A[:,i] / za
H = A[:,i]@Y_i /B / za # H.T == H
HRH = H@H # R^{-1} == Id coz of above
T2 = 1/(1 + B*HRH)
AT = sqrt(T2)*A[:,i]
P = T2 * B
dmu = P*H@dy_i
else:
# Non-Approximate
if len(local) < N:
# SVD version
V,sd,_ = svd0(Y_i)
d = pad0(sd**2,N) + za
Pw = (V * d**(-1.0)) @ V.T
T = (V * d**(-0.5)) @ V.T * sqrt(za)
else:
# EVD version
d,V = eigh(Y_i @ Y_i.T + za*eye(N))
T = V@diag(d**(-0.5))@V.T * sqrt(za)
Pw = V@diag(d**(-1.0))@V.T
AT = T@A[:,i]
dmu = dy_i@Y_i.T@Pw@A[:,i]
E[:,i] = mu[i] + dmu + AT
E = post_process(E,infl,rot)
stats.assess(k,kObs,E=E)
return assimilator
# Notes on optimizers for the 'dual' EnKF-N:
# ----------------------------------------
# Using minimize_scalar:
# - Doesn't take dJdx. Advantage: only need J
# - method='bounded' not necessary and slower than 'brent'.
# - bracket not necessary either...
# Using multivariate minimization: fmin_cg, fmin_bfgs, fmin_ncg
# - these also accept dJdx. But only fmin_bfgs approaches
# the speed of the scalar minimizers.
# Using scalar root-finders:
# - brenth(dJ1, LowB, 1e2, xtol=1e-6) # Same speed as minimization
# - newton(dJ1,1.0, fprime=dJ2, tol=1e-6) # No improvement
# - newton(dJ1,1.0, fprime=dJ2, tol=1e-6, fprime2=dJ3) # No improvement
# - Newton_m(dJ1,dJ2, 1.0) # Significantly faster. Also slightly better CV?
# => Despite inconvienience of defining analytic derivatives,
# Newton_m seems like the best option.
# - In extreme (or just non-linear h) cases,
# the EnKF-N cost function may have multiple minima.
# Then: should use more robust optimizer!
#
# For 'primal'
# ----------------------------------------
# Similarly, Newton_m seems like the best option,
# although alternatives are provided (commented out).
#
def Newton_m(fun,deriv,x0,is_inverted=False,
conf=1.0,xtol=1e-4,ytol=1e-7,itermax=10**2):
"Simple (and fast) implementation of Newton root-finding"
itr, dx, Jx = 0, np.inf, fun(x0)
norm = lambda x: sqrt(np.sum(x**2))
while ytol<norm(Jx) and xtol<norm(dx) and itr<itermax:
Dx = deriv(x0)
if is_inverted:
dx = Dx @ Jx
elif isinstance(Dx,float):
dx = Jx/Dx
else:
dx = mldiv(Dx,Jx)
dx *= conf
x0 -= dx
Jx = fun(x0)
return x0
def hyperprior_coeffs(s,N,xN=1,g=0):
"""
EnKF-N inflation prior may be specified by the constants:
- eN: Effect of unknown mean
- cL: Coeff in front of log term
These are trivial constants in the original EnKF-N,
but could be further adjusted (corrected and tuned),
as described below.
Reason 1: mode correction.
As a func of I-KH ("prior's weight"), adjust l1's mode towards 1.
As noted in Boc15, mode correction becomes necessary when R-->infty,
because then there should be no ensemble update (and also no inflation!).
Why leave the prior mode below 1 at all?
Because it sets up "tension" (negative feedback) in the inflation cycle:
the prior pulls downwards, while the likelihood tends to pull upwards.
Reason 2: Boosting the inflation prior's certainty from N to xN*N.
The aim is to take advantage of the fact that the ensemble may not
have quite as much sampling error as a fully stochastic sample,
as illustrated in section 2.1 of Raanes2018adaptive.
The tuning is controlled by:
- xN=1: is fully agnostic, i.e. assumes the ensemble is generated
from a highly chaotic or stochastic model.
- xN>1: increases the certainty of the hyper-prior,
which is appropriate for more linear and deterministic systems.
- xN<1: yields a more (than 'fully') agnostic hyper-prior,
as if N were smaller than it truly is.
- xN<=0 is not meaningful.
This parameter has not yet been explicitly described in the litteture,
although if effectively constitutes a bridging of
the Jeffreys (xN=1) and Dirac (xN=Inf) | |
"""doc
# leanai.data.dataset
> A generic implementation for a dataset based on parsers and file providers.
Leanai Datasets generally work by you providing an input and an output type and the implementation of the
dataset filles these fields using getters and parsers.
A simple example for 2d object detection using a SimpleDataset would look like this.
```python
# Define Types
InputType = namedtuple("InputType", ["image"])
OutputType = namedtuple("OutputType", ["class_ids", "boxes_2d"])
# Instantiate Dataset
dataset = MySimpleDataset(..., InputType, OutputType, ...)
# Get Items (of types defined above)
for inp: InputType, outp: OutputType in dataset:
```
A simple example for 2d object detection using a WebDataset would look like this.
```python
# Define Types
InputType = namedtuple("InputType", ["image"])
OutputType = namedtuple("OutputType", ["class_ids", "boxes_2d"])
# Instantiate Dataset
file_provider = WebDatasetFileProvider(...)
parser = MyParser(InputType, OutputType)
dataset = IterableDataset(file_provider, parser)
# Get Items (of types defined above)
for inp: InputType, outp: OutputType in dataset:
```
This makes using datasets very easy and for implementation you only need to implement getters for the
fields which you want to access. `get_image(self, sample)` will implement the support for the `image`
field. In order to make switching between datasets easier and make behaviour predicatble, there is a
set of conventions to which a dataset implementation should adhere. The fields and their content should
adhere to the specification in the tables below.
## Object Based Data (per object, i.e. per car, per pedestrian, ...)
N represents the number of objects and the indices of the arrays allign.
If an invalid value is required for padding, NaN for Float and -1 for uint shall be used
| Name | Shape | Description |
|---------------|-------|--------------------------------------------------------------------------------|
| confidence | N,1 | Float between 0 and 1. (0 = no confidence, 1 = sure) |
| fg_bg_classes | N,1 | uint8 (0,1) where 1 means foreground, 0 means background |
| class_ids | N,1 | uint8 representing the class (0 = Background, 1 = Class 1, 2 = Class 2, ...) |
| instance_ids | N,1 | uint8/uint16 for InstanceID |
| occlusion | N,1 | Float representing occlusion rate. (0 = perfectly visible, 1 = fully occluded) |
| cosy | N,str | Name of the coordinate system in which the data is |
| boxes_2d | N,4/5 | Centerpoint Representation (c_x, c_y, w, h, theta) |
| boxes_3d | N,7 | Centerpoint Representation (c_x, c_y, c_z, l, w, h, theta) |
| boxes_3d | N,10 | Centerpoint + Quaternion (c_x, c_y, c_z, l, w, h, w, q0, q1, q2) |
| velocity | N,2/3 | Velocity of the object in meters per second (c_x, c_y, c_z) |
| depth | N,1 | Float euclidean distance of object to cam in meters |
| skeletons_2d | N,K,2 | The 2d position of the K joints |
| skeletons_3d | N,K,3 | The 3d position of the K joints |
## Frame Based Data (per image)
h,w represents height and width of the image.
The shape of these annotations is independant of the number of objects in a scene.
| Name | Shape | Description |
|------------------|-------|-----------------------------------------------------------------------------|
| projection | 4,3 | Projection Matrix according to the opencv standard |
| image | h,w,3 | The image in RGB format channel last (you can change that in your model) |
| scan | P,3 | Pointcloud containing P points from a lidarscan (x,y,z) |
| transform_x_to_y | 4,4 | The Rt Matrix to go from cosy X to cosy Y |
| semantic_mask | h,w,1 | Each pixel has the class_id of what is visible |
| instance_mask | h,w,1 | Each pixel has the instance_id of what is visible |
| depth_image | h,w,1 | Float encoding of euclidean distance of a pixel to the camera in meters |
## Coordinate System Conventions
Following the conventions of ISO8855 and ROS makes things easier and predictable.
This means following these conventions for the coordinate systems (all right handed).
| Name | X-Axis | Y-Axis | Z-Axis | Description |
|--------------|---------|--------|---------|-----------------------------------------------------------|
| Image Sensor | right | down | forward | only pixel stuff, use for projection |
| Ego (Cam 0) | forward | left | up | share origin with Image Sensor (3d stuff) |
| 3D Sensor | forward | left | up | 3D Data (e.g. LiDAR) follow |
| Vehicle | forward | left | up | Center of the vehicle the sensors are attached to |
| World | east | north | up | Or starting position of vehicle/robot (forward, left, up) |
With these conventions switching from dataset A to dataset B should be as easy as changing one line of
code where you instantiate the dataset.
"""
from typing import Any, Dict, Iterator, List
from torch.utils.data import IterableDataset as _IterableDataset
from torch.utils.data import Dataset as _Dataset
from .parser import IParser, Parser
from .file_provider import FileProviderSequence, FileProviderIterable
from .data_promise import DataPromise
class IIterableDataset(_IterableDataset):
"""
Interface for an iterable dataset
(also implements the torch.utils.data.IterableDataset).
You can use this interface when you expect a dataset in your code.
If sufficient use IIterableDataset over ISequenceDataset as more datasets
will implement with that specification as it is a subset.
The interface requires implementations for:
* `__iter__`
* `__next__`
"""
def __next__(self) -> Any:
raise NotImplementedError("Must be implemented by subclass.")
def __iter__(self) -> Iterator[Any]:
raise NotImplementedError("Must be implemented by subclass.")
class ISequenceDataset(_Dataset):
"""
Interface for a sequence dataset
(also implements the torch.utils.data.Dataset).
You can use this interface when you expect a dataset in your code.
If sufficient use IIterableDataset over ISequenceDataset as more datasets
will implement with that specification as it is a subset.
The interface requires implementations for:
* `__len__`
* `__getitem__`
* `__iter__`
* `__next__`
"""
def __next__(self) -> Any:
raise NotImplementedError("Must be implemented by subclass.")
def __iter__(self) -> Iterator[Any]:
raise NotImplementedError("Must be implemented by subclass.")
def __getitem__(self, index) -> Any:
raise NotImplementedError("Must be implemented by subclass.")
def __len__(self) -> int:
raise NotImplementedError("Must be implemented by subclass.")
class CommonDataset(object):
def __init__(self, file_provider_iterable: FileProviderIterable, parser: IParser, transformers=[], test_mode=False) -> None:
"""
A common base implementation from which all datasets inherit.
"""
super().__init__()
self._file_provider = file_provider_iterable
self._fp_iterator = None
self._parser = parser
self.transformers = []
for transformer in transformers:
self.transformers.append(transformer(test_mode=test_mode))
def _process(self, sample: Dict[str, DataPromise]) -> Any:
sample = self._parser(sample)
return self.preprocess(sample)
def preprocess(self, sample: Any) -> Any:
"""
Preprocesses samples.
The default implementation simply applies the transformers in order.
This function can be used for transforming the data representation as well as for data augmentation.
You can even overwrite this function to implement your own preprocessing from scratch.
:param sample: A sample as provided by the parser (what your dataset returns if no preprocess or transformers are provided).
:return: A sample in the format as the algorithm needs it.
"""
for transformer in self.transformers:
sample = transformer(sample)
return sample
def __next__(self) -> Any:
if self._fp_iterator is None:
raise RuntimeError("You must first call iter(...) before you can use next(...).")
sample = self._fp_iterator.__next__()
return self._process(sample)
def __iter__(self) -> Iterator[Any]:
self._fp_iterator = self._file_provider.__iter__()
return self
def __len__(self) -> int:
return len(self._file_provider)
class IterableDataset(CommonDataset, IIterableDataset):
def __init__(self, file_provider_iterable: FileProviderIterable, parser: IParser, transformers=[], test_mode=False) -> None:
"""
An implementation of the IIterableDataset using fileprovider and parser.
This should be used when using WebDatasets or streamed datasets.
With this dataset random access is not possible and it can only be read in order.
Thus the file provider is a stream (iterable).
Do not inherit from this with your dataset implementation, provide a file provider and a
parser or consider using and inheriting from the SimpleDataset.
:param file_provider_iterable: The iterable file provider providing samples to the parser.
:param parser: The parser converting samples into a usable format.
:transformers: Transformers that are applied on the dataset to convert the format to what the model requires. (Default: [])
:test_mode: A parameter that is passed to the constructor of the transformers (Default: False).
"""
super().__init__(file_provider_iterable, parser, transformers=transformers, test_mode=test_mode)
class SequenceDataset(CommonDataset, ISequenceDataset):
def __init__(self, file_provider_sequence: FileProviderSequence, parser: IParser, transformers=[], test_mode=False) -> None:
"""
An implementation of the ISequenceDataset using fileprovider and parser.
This should be used when using regurlar file based datasets.
Random access is possible and might be used by a dataloader.
Thus to enable random access the file provider is a sequence, allowing access | |
__author__ = 'hofmann'
__version__ = '0.0.6'
import os
import random
import numpy.random as np_random
import tempfile
from scripts.MetaDataTable.metadatatable import MetadataTable
from scripts.StrainSimulationWrapper.strainsimulationwrapper import StrainSimulationWrapper
from scripts.StrainSelector.strainselector import StrainSelector
from scripts.PopulationDistribution.populationdistribution import PopulationDistribution
from scripts.GenomePreparation.genomepreparation import GenomePreparation
from scripts.Validator.validator import Validator
# ##################################
#
# Community
#
# ##################################
class Community(Validator):
def __init__(
self, identifier, genomes_total, genomes_real, limit_per_otu, file_path_metadata_table,
file_path_genome_locations, file_path_gff_locations, ratio, mode,
log_mu, log_sigma, gauss_mu=None, gauss_sigma=None,
logfile=None, verbose=True, debug=False):
"""
Accumulation of all community related information
@param identifier: Community identifier
@type identifier: str | unicode
@param genomes_total: Total amount of genomes to be drawn from this community
@type genomes_total: int
@param genomes_real: Amount of real genomes to be drawn, rest will drawn from simulated ones
@type genomes_real: int
@param limit_per_otu: A Maximum for drawn genomes belonging to the same otu, unless more are required to be drawn
@type limit_per_otu: int
@param file_path_metadata_table: Table of Metadata for each genome of the community
@type file_path_metadata_table: str | unicode
@param file_path_genome_locations: Format: 'id \t file path to fasta file'
@type file_path_genome_locations: str | unicode
@param file_path_gff_locations: Format: 'id \t file path to gff file'
@type file_path_gff_locations: str | unicode
@param ratio: If one comm. has ratio=1 and another has ration=2, the other community will be twice the size
@type ratio: int | long | float
@param mode: Valid: 'replicates', 'timeseries_normal', 'timeseries_lognormal', 'differential'
@type mode: str | unicode
@param log_mu: Mean of drawn log distribution
@type log_mu: int | long | float
@param log_sigma: Standard deviation of log distribution
@type log_sigma: int | long | float
@param gauss_mu: Mean of drawn gauss distribution
@type gauss_mu: int | long | float
@param gauss_sigma: Standard deviation of gauss distribution
@type gauss_sigma: int | long | float
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | basestring
@param verbose: More output and user interaction is enabled.
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
assert genomes_real is None or genomes_real <= genomes_total
assert mode is None or mode in PopulationDistribution.get_valid_modes()
if verbose is None:
verbose = False
super(Community, self).__init__(label="Community", logfile=logfile, verbose=verbose, debug=debug)
if genomes_real is None:
genomes_real = genomes_total
self.genomes_real = genomes_real
self.genomes_total = genomes_total
self.limit_per_otu = limit_per_otu
self.file_path_metadata_table = self.get_full_path(file_path_metadata_table)
self.file_path_genome_locations = self.get_full_path(file_path_genome_locations)
self.file_path_gff_locations = None
if file_path_gff_locations is not None:
self.file_path_gff_locations = self.get_full_path(file_path_gff_locations)
self.ratio = ratio
self.log_mu = log_mu
self.log_sigma = log_sigma
self.gauss_mu = gauss_mu
self.gauss_sigma = gauss_sigma
self.mode = mode
self.simulate_strains = False
if genomes_real and genomes_real < genomes_total:
self.simulate_strains = True
self.verbose = verbose
self.id = identifier
def has_valid_values(self):
if not self.validate_characters(self.id) or self.id is '':
return False
if not self.validate_characters(self.mode) or self.mode is '':
return False
if not self.validate_number(self.genomes_total, self.genomes_real):
return False
if not self.validate_number(self.genomes_real, 1, self.genomes_total):
return False
if not self.validate_number(self.ratio, 0, zero=False):
return False
if not self.validate_number(self.log_mu, 0, zero=False):
return False
if not self.validate_number(self.log_sigma, 0, zero=True):
return False
if not self.validate_number(self.gauss_mu):
return False
if not self.validate_number(self.gauss_sigma):
return False
if not self.validate_number(self.limit_per_otu, 1):
return False
if not self.validate_file(self.file_path_metadata_table):
return False
if not self.validate_file(self.file_path_genome_locations):
return False
if self.file_path_gff_locations and not self.validate_file(self.file_path_gff_locations):
return False
return True
# ##################################
#
# CommunityDesign
#
# ##################################
class CommunityDesign(GenomePreparation):
"""
For the design of an artificial community
"""
# _filename_distribution_comunity = "distribution_{comunity_index}_{sample_index}.txt"
_filename_distribution_comunity_joint = "distribution_{sample_index}.txt"
# TODO: plasmids within genome files
# used_genomes_with_plasmids[genome_id] = random.randint(7, 10)
# distribution = str(int(distribution) * factor)
def __init__(
self, column_name_genome_id="genome_ID", column_name_otu="OTU",
column_name_novelty_category="novelty_category", column_name_ncbi="NCBI_ID", column_name_source="source",
max_processors=1, tmp_dir=None, logfile=None, verbose=True, debug=False, seed=None):
"""
@param column_name_genome_id: Column name of genome ids in the metadata table
@type column_name_genome_id: str | unicode
@param column_name_otu: Column name of otu ids in the metadata table
@type column_name_otu: str | unicode
@param column_name_novelty_category: Column name of novelty category in the metadata table
@type column_name_novelty_category: str | unicode
@param column_name_ncbi: Column name of taxonomic id assignment in the metadata table
@type column_name_ncbi: str | unicode
@param column_name_source: Column name of 'source' in the metadata table
@type column_name_source: str | unicode
@param max_processors: maximum number of processors available to be used
@type max_processors: long | int
@param tmp_dir: working directory or place temporary files can be stored
@type tmp_dir: str | unicode
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | basestring
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
super(CommunityDesign, self).__init__(label="CommunityDesign", logfile=logfile, verbose=verbose, debug=debug)
if seed is not None:
random.seed(seed)
np_random.seed(abs(hash(seed)) % 4294967295) # numpy accepts only 32 bit integers
# self._seed = seed
# self._filename_distribution = filename_prefix_distribution + "{index}.txt"
self._column_name_genome_id = column_name_genome_id
self._column_name_otu = column_name_otu
self._column_name_novelty_category = column_name_novelty_category
self._column_name_source = column_name_source
self._column_name_ncbi = column_name_ncbi
assert isinstance(max_processors, (long, int))
assert max_processors > 0
self._max_processors = max_processors
if tmp_dir is None:
tmp_dir = tempfile.gettempdir()
self._tmp_dir = tmp_dir
assert self.validate_dir(self._tmp_dir)
@staticmethod
def get_distribution_file_paths(directory, number_of_samples):
"""
Generate directory paths for each sample
@param directory: Output stream
@type directory: str | unicode
@param number_of_samples: Number of samples
@type number_of_samples: int | long
@return: list of directories
@rtype: list[str | unicode]
"""
file_path = os.path.join(directory, CommunityDesign._filename_distribution_comunity_joint)
return [file_path.format(sample_index=sample_index) for sample_index in range(number_of_samples)]
@staticmethod
def _write_distribution_file(stream_out, genome_id_to_abundance):
"""
Write abundance file for each sample
@param stream_out: Output stream
@type stream_out: file | FileIO | StringIO
@param genome_id_to_abundance: Drawn distribution for each genome id
@type genome_id_to_abundance: dict[str|unicode, list[float]]
"""
for genome_id in genome_id_to_abundance:
distributions = [str(abundance) for abundance in genome_id_to_abundance[genome_id]]
stream_out.write("{id}\t{distr}\n".format(id=genome_id, distr='\t'.join(distributions)))
def design_community(
self, file_path_distributions, community, number_of_samples, metadata_table,
directory_out_metadata, directory_in_template=None):
"""
Design artificial community, of a specific design, with different distributions for each sample
@param file_path_distributions: File path where distributions will be written to
@type file_path_distributions: str | unicode
@param community: Input data for the creation of a community
@type community: Community
@param number_of_samples: Amount of samples to be simulated
@type number_of_samples: int
@param metadata_table: Will contain metadata of all (simulated) genomes/plasmids drawn
@type metadata_table: MetadataTable
@param directory_out_metadata: Metadata tables of separated by chosen and not chosen genomes are written to here
@type directory_out_metadata: str | unicode
@param directory_in_template: contains template data for strain simulation
@type directory_in_template: str | unicode
@return: Dictionary with drawn genome ids as key and file paths as value
@rtype: dict[str|unicode, str|unicode]
"""
assert isinstance(community, Community)
assert isinstance(metadata_table, MetadataTable)
number_of_strains = community.genomes_total
# pick how much a strain will be simulated
genome_amounts = []
strain_simulation = None
if community.simulate_strains:
strain_simulation = StrainSimulationWrapper(
executable_sim=None,
directory_template=directory_in_template,
column_name_gid=self._column_name_genome_id,
column_name_ncbi=self._column_name_ncbi,
column_name_source=self._column_name_source,
separator='\t',
filename_prefix="simulated_",
keep_original=True,
max_processors=self._max_processors,
tmp_dir=self._tmp_dir,
logfile=self._logfile, verbose=self._verbose, debug=self._debug,
# seed=self._seed
)
probability = None # 1-options.communities[community_id]["evolve"]
genome_amounts = strain_simulation.get_genome_amounts(
probability=probability,
max_genome_amount=community.genomes_total,
num_real_genomes=community.genomes_real,
silent=not community.verbose
)
number_of_strains = len(genome_amounts)
# draw strains
self._logger.info("Drawing strains.")
metadata_table_community = MetadataTable(logfile=self._logfile, verbose=self._verbose)
metadata_table_community.read(community.file_path_metadata_table, column_names=True)
strain_selector = StrainSelector(
column_name_genome_id=self._column_name_genome_id,
column_name_otu=self._column_name_otu,
column_name_novelty_category=self._column_name_novelty_category,
logfile=self._logfile, verbose=self._verbose, debug=self._debug
)
list_of_drawn_genome_id = strain_selector.get_drawn_genome_id(
metadata_table=metadata_table_community,
number_of_strains=number_of_strains,
number_of_strains_per_otu=community.limit_per_otu
)
# write unused data to separate file
old_base_name = os.path.basename(community.file_path_metadata_table)
file_prefix, extention = os.path.splitext(old_base_name)
new_file_name = "unused_c{index}_{prefix}{ext}".format(
prefix=file_prefix,
index=community.id,
ext=extention)
metadata_new_file_path = os.path.join(directory_out_metadata, new_file_name)
metadata_table_community.write(
metadata_new_file_path,
exclude=True,
value_list=list_of_drawn_genome_id,
key_column_name=self._column_name_genome_id,
column_names=True)
# get path for every genome
genome_id_to_file_path_gff = None
if community.file_path_gff_locations:
genome_id_to_file_path_gff = self._get_genome_id_to_path_map(
community.file_path_gff_locations, list_of_drawn_genome_id)
genome_id_to_path_map = self._get_genome_id_to_path_map(
community.file_path_genome_locations, list_of_drawn_genome_id)
# concatenate
metadata_table_community.reduce_rows_to_subset(list_of_drawn_genome_id, self._column_name_genome_id)
metadata_table.concatenate(metadata_table_community, strict=False)
# validate correct format of files
self._logger.info("Validating raw sequence files!")
assert self.validate_format(
list_of_file_paths=genome_id_to_path_map.values(),
file_format="fasta",
sequence_type="dna",
ambiguous=True
), "Validation of file format failed!"
# simulate diversity around strains
if community.simulate_strains:
genome_id_to_amounts = strain_simulation.get_genome_id_to_amounts(list_of_drawn_genome_id, genome_amounts)
strain_simulation.simulate_strains(
meta_table=metadata_table,
genome_id_to_amounts=genome_id_to_amounts,
genome_id_to_file_path_genome=genome_id_to_path_map,
genome_id_to_file_path_gff=genome_id_to_file_path_gff)
# adopt new list that includes simulated strains
self._logger.info("Validating simulated sequence files!")
for genome_id, file_path in genome_id_to_path_map.iteritems():
if genome_id in list_of_drawn_genome_id:
continue
assert self.validate_sequence_file(
file_path,
file_format="fasta",
sequence_type="dna",
ambiguous=True)
list_of_drawn_genome_id = genome_id_to_path_map.keys()
# get community distributions
population_distribution = PopulationDistribution(
logfile=self._logfile, verbose=self._verbose, debug=self._debug)
list_of_distributions = population_distribution.get_lists_of_distributions(
size_of_population=len(list_of_drawn_genome_id),
number_of_samples=number_of_samples,
modus=community.mode,
log_mu=community.log_mu, log_sigma=community.log_sigma,
gauss_mu=community.gauss_mu, gauss_sigma=community.gauss_sigma,
view_distribution=community.verbose
)
# move and clean up files (removes sequence description)
# genome_id_to_total_length = self.move_genome_files(
# genome_id_to_path_map,
# directory_output=directory_out_genomes,
# sequence_min_length=min_sequence_length,
# set_of_sequence_names=set_of_sequence_names)
# write distribution file
# genome_id_to_distributions = self._get_genome_id_to_distributions(list_of_drawn_genome_id, list_of_distributions)
assert len(list_of_drawn_genome_id) == len(list_of_distributions)
genome_id_to_distributions = dict(zip(list_of_drawn_genome_id, list_of_distributions))
# genome_id_to_file_name = self._get_genome_id_to_file_name(genome_id_to_path_map)
with open(file_path_distributions, 'w') as stream_out:
self._write_distribution_file(stream_out=stream_out, genome_id_to_abundance=genome_id_to_distributions)
return | |
<filename>src/data/data_studio.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ============================================================================ #
# Project : Airbnb #
# Version : 0.1.0 #
# File : data_classes.py #
# Python : 3.8.0 #
# ---------------------------------------------------------------------------- #
# Author : <NAME> #
# Company: DecisionScients #
# Email : <EMAIL> #
# ---------------------------------------------------------------------------- #
# Created : Monday, 6th January 2020 11:38:57 am #
# Last Modified: Monday, 6th January 2020 11:50:59 am #
# Modified By : <NAME> (<EMAIL>>) #
# ---------------------------------------------------------------------------- #
# License: BSD #
# Copyright (c) 2020 DecisionScients #
# ============================================================================ #
"""Data cleaning, transforming, and analysis for machine learning.
This module includes a Data Object abstraction, support for data cleaning
and preparation as well as analysis and inference capabilities commonly
performed during end-to-end analysis and machine learning projects.
This analysis and modeling framework centers upon three capabilities:
1. Data as Objects : Data organized and managed as objects and metadata
2. Data Curation : Cleaning, transforming, normalizing and curating
3. Data Influence : Data-driven learning and change.
Accordingly, the core offering is the Data object model, a composite
data class that integrates basic analysis and metadata. Data processing and
development functionality extends the data objects as they move
through the AI process. An analysis and inference module is about
inference, insight, and storytelling.
"""
#%%
from datetime import datetime
import os
from pathlib import Path
import platform
import psutil
import site
import time
import uuid
PROJECT_DIR = Path(__file__).resolve().parents[1]
site.addsitedir(PROJECT_DIR)
from abc import ABC, abstractmethod
from collections import OrderedDict
import pandas as pd
pd.set_option('display.max_columns', None)
from src.analysis.univariate import Describe
from src.data.file_classes import File
from src.utils.system import get_size
from .constants import DTYPES
# --------------------------------------------------------------------------- #
# DataComponent #
# --------------------------------------------------------------------------- #
"""Abstract class that defines the interface for all Data classes.
Parameters
----------
name : str
The name to assign to the DataComponent object
df : DataFrame
The DataFrame containing the data
"""
class DataComponent(ABC):
def __init__(self, path):
self._id = uuid.uuid4()
self._name = os.path.basename(path)
self._path = path
self._df = pd.DataFrame()
self._summary = pd.DataFrame()
# meta data
self._metadata = {}
self._metadata['name'] = os.path.basename(path)
self._metadata['path'] = path
self._metadata['creator'] = os.getlogin()
self._metadata['created'] = time.ctime(os.path.getctime(__file__))
self._metadata['modifier'] = os.getlogin()
self._metadata['modified'] = time.ctime(os.path.getmtime(__file__))
def metadata(self):
"""Prints object metadata."""
print("\n#","="*30, "Author Information", "="*30,"#")
print(f"Id: {self._id}")
print(f"Creator: {self._creator}")
print(f"Created: {self._created}")
print(f"Modifier: {self._modifier}")
print(f"Modified: {self._modified}")
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
return self
@abstractmethod
def summarize(self):
pass
@abstractmethod
def get_data(self, name):
"""Returns the Data object designated by the name."""
pass
@abstractmethod
def add(self, data):
pass
@abstractmethod
def remove(self, name):
pass
# --------------------------------------------------------------------------- #
# DataCollection #
# --------------------------------------------------------------------------- #
class DataCollection(DataComponent):
"""The Composite of the Data Object Model."""
def __init__(self, name):
super(DataCollection, self).__init__(name)
self._data_collection = OrderedDict()
def merge_data(self):
"""Merges all DataSets and DataCollections into a single DataFrame."""
merged = pd.DataFrame()
for _, data_object in self._data_collection.items():
df = data_object.get_data()
merged = pd.concat([merged,df], axis=0)
return merged
def metadata(self):
"""Prints DataCollection metadata."""
super(DataCollection, self).metadata(verbose)
print("="*30, "DataType Summary", "="*30)
merged = self._merge_data()
metadata = pd.DataFrame()
metadata[self._name] = merged.dtypes.value_counts()
print(metadata)
return metadata
def summarize(self):
"""Descriptive summaries for DataCollection and DataSet objects.
Parameters
----------
verbose : Bool
True if the summary should be printed.
Returns
-------
Dict : Cointaining quantitative and qualitative descriptive
statistics.
"""
describe = Describe()
df = self.merge_data()
describe.fit(df)
summary = describe.get_analysis()
print("#","=*35 Quantitative Analysis 35*=","#")
print(summary['quant'])
print("#","=*35 Quantitative Analysis 35*=","#")
print(summary['qual'])
return summary
def get_data(self, name=None):
"""Return all data or the named dataset or collection.
Parameters
----------
name : str
The name of the DataSet or DataCollection object.
"""
if name:
return self._data_collection[name]
else:
return self._data_collection
def add(self, data):
"""Adds a DataSet or DataCollection object to the collection.
Parameters
----------
dataset : DataSet or DataCollection object.
"""
name = data.name
self._data_collection[name] = data
return self
def remove(self, name):
"""Removes a DataSet or DataCollection object from the collection."""
del self._data_collection[name]
return self
def replace_string(self, pattern, replace, columns=None, regex=True):
"""Regex capable, string replace method for DataSet objects.
Parameters
----------
pattern : str
A (regex) pattern to find in the DataSet or designated columns.
replace : str
A string sequence to replace the pattern
columns : array-like (Optional)
List of columns to which the replacement should be applied.
regex : Bool
Indicates whether the pattern and replacement are valid regex.
"""
for _, data_object in self._data_collection.items():
if columns:
data_object.replace_string(pattern, replace, columns, regex)
else:
data_object.replace_string(pattern, replace, regex)
self._add(data_object)
def cast_types(self, data_types):
"""Cast objects of the dataframe to designated types."""
for _, data_object in self._data_collection.items():
data_object.cast_types(data_types)
self._add(data_object)
def import_data(self, directory, columns=None):
"""Creates DataSet objects, imports the data and adds the DataSets.
Parameters
----------
directory : str
The directory containing the files to import.
columns : list
List of column names to return.
"""
filenames = os.listdir(directory)
for filename in filenames:
name = filename.split(".")[0]
dataset = DataSet(name=name)
path = os.path.join(directory, filename)
dataset.import_data(filename=path, columns=columns)
self.add(data=dataset)
return self
def export_data(self, directory, file_format='csv'):
"""Exports the data from contained DataSets to the directory in format.
Parameters
----------
directory : str
The directory to which the data will be exported.
file_format : str
The format in which the data will be saved.
"""
for name, dataset in self._data_collection.items():
filename = name + "." + file_format
path = os.path.join(directory, filename)
dataset.export_data(filename=path)
return self
# --------------------------------------------------------------------------- #
# DataSet #
# --------------------------------------------------------------------------- #
class DataSet(DataComponent):
"""Base class for all DataSet subclasses.
Parameters
----------
name : str
The name of the dataset.
df : DataFrame (Optional)
The content in DataFrame format.
"""
def __init__(self, name):
super(DataSet, self).__init__(name)
def metadata(self):
"""Prints DataSet metadata."""
super(DataSet, self).metadata()
print("#","="*30, "DataType Summary", "="*30,"#")
metadata = pd.DataFrame()
metadata[self._name] = self._df.dtypes.value_counts()
print(metadata)
print("#","="*30, "DataType Detail", "="*30,"#")
metadata = pd.DataFrame()
metadata[self._name] = self._df.dtypes.T
print(metadata)
return metadata
def summarize(self, verbose=True):
"""Prints DataSet descriptive statistics."""
describe = Describe()
describe.fit(self)
summary = describe.get_analysis()
if verbose:
print("\n#=*35 Quantitative Analysis 35*=#")
print(summary['quant'])
print("#=*35 Qualitative Analysis 35*=#")
print(summary['qual'])
return summary
def add(self, data):
pass
def remove(self, name):
pass
def replace_string(self, pattern, replace, columns=None, regex=True):
"""Regex capable, string replace method for DataSet objects.
Parameters
----------
pattern : str
A (regex) pattern to find in the DataSet or designated columns.
replace : str
A string sequence to replace the pattern
columns : array-like (Optional)
List of columns to which the replacement should be applied.
regex : Bool
Indicates whether the pattern and replacement are valid regex.
"""
if columns:
self._df[columns] = self._df[columns].replace({pattern:replace}, regex=regex)
else:
self._df = self._df.replace({pattern:replace}, regex=regex)
def import_data(self, filename, columns=None):
"""Reads the data from filename and appends it to the dataframe member."""
f = File()
df = f.read(filename, columns=columns)
self._df = pd.concat([self._df, df], axis=0, sort=False)
return self
def export_data(self, filename):
"""Writes the data to the location designated by the filename."""
f = File()
f.write(filename, self._df)
return self
def get_data(self, attribute=None):
"""Method to return all data or one, or more attributes.
Parameters
----------
attribute : str or list (Optional)
The attribute or attributes to retrieve
Returns
-------
DataFrame or Series
"""
if attribute is not None:
return self._df[attribute]
return self._df
from .constants import DTYPES
# --------------------------------------------------------------------------- #
# TYPE CASTER #
# --------------------------------------------------------------------------- #
class TypeCaster():
"""Type casting, conversions, normalization, standardization, transformation."""
types = ["BOOL", "CATEGORY", "DATETIME", "FLOAT", "INT", "OBJECT"]
def cast_bool(self, df, labels):
"""Casts the labeled data as type boolean."""
for label in labels:
df[label].astype('bool')
return df
def cast_category(self, df, labels):
"""Casts the labeled data as type category."""
for label in labels:
df[label].astype('category')
return df
def cast_datetime(self, df, labels):
"""Casts the labeled data as type datetime."""
for label in labels:
pd.to_datetime(df[[labels]])
return df
def cast_float(self, df, labels):
"""Casts the labeled data as type float."""
for label in labels:
df[label]astype('float')
return df
def cast_int(self, df, labels):
"""Casts the labeled data as type integer."""
for label in labels:
df[label]astype('int')
return df
def cast_object(self, df, labels):
"""Casts the labeled data as type integer."""
for label in labels:
df[label]astype('object')
return df
# --------------------------------------------------------------------------- #
# QUANT STUDIO #
# --------------------------------------------------------------------------- #
class QuantStudio(ABC):
"""Abstract base class and interface for the treatment of quantitative data."""
def __init__(self, name):
self._id = uuid.uuid4()
self._name = name
self._creator = os.getlogin()
self._created = time.ctime(os.path.getctime(__file__))
self._modifier = os.getlogin()
self._modified = time.ctime(os.path.getmtime(__file__))
def fit(dataset, y=None):
pass
def transform(dataset, y=None):
pass
def reverse(dataset, y=None):
pass
# --------------------------------------------------------------------------- #
# RINSE | |
<reponame>BensonRen/AEM_DIM_Bench
"""
The class wrapper for the networks
"""
# Built-in
import os
import time
import sys
sys.path.append('../utils/')
# Torch
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
# from torchsummary import summary
from torch.optim import lr_scheduler
from utils.helper_functions import simulator
from utils.evaluation_helper import compare_truth_pred
# from mdn_tony_duan import MDN as mdn
from mdn_manu_joseph import MDN as mdn
# Libs
import numpy as np
from math import inf
import matplotlib.pyplot as plt
import pandas as pd
# Own module
from utils.time_recorder import time_keeper
from utils.helper_functions import put_param_into_folder, write_flags_and_BVE
########################################
# The object to help regulate training #
########################################
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn # The model maker function
self.flags = flags # The Flags containing the specs
if inference_mode: # If inference mode, use saved model
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
print("This is inference mode, the ckpt is", self.ckpt_dir)
else: # training mode, create a new ckpt folder
if flags.model_name is None: # leave custume name if possible
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
else:
self.ckpt_dir = os.path.join(ckpt_dir, flags.model_name)
self.model = self.create_model() # The model itself
self.optm = None # The optimizer: Initialized at train() due to GPU
self.optm_eval = None # The eval_optimizer: Initialized at eva() due to GPU
self.lr_scheduler = None # The lr scheduler: Initialized at train() due to GPU
self.train_loader = train_loader # The train data loader
self.test_loader = test_loader # The test data loader
self.log = SummaryWriter(self.ckpt_dir) # Create a summary writer for keeping the summary to the tensor board
self.best_validation_loss = float('inf') # Set the BVL to large number
def make_optimizer_eval(self, geometry_eval):
"""
The function to make the optimizer during evaluation time.
The difference between optm is that it does not have regularization and it only optmize the self.geometr_eval tensor
:return: the optimizer_eval
"""
if self.flags.optim == 'Adam':
op = torch.optim.Adam([geometry_eval], lr=self.flags.lr)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop([geometry_eval], lr=self.flags.lr)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD([geometry_eval], lr=self.flags.lr)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def create_model(self):
"""
Function to create the network module from provided model fn and flags
:return: the created nn module
"""
model = self.model_fn(self.flags)
# summary(model, input_size=(128, 8))
# print(model)
return model
def make_loss(self, pi, sigma, mu, labels=None, warmup=None, warmup_threshold=-1):
"""
The special loss for mdn
:param logit: The output of the network
:param labels: The ground truth labels
:param warmup: The warmup process for the mean to get the range faster
:return: the total loss
"""
#return mdn.new_mdn_loss(pi, sigma, mu, labels)
return self.model.mdn_loss(pi, sigma, mu, labels)
# return loss
def make_optimizer(self):
"""
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more
:return:
"""
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def make_lr_scheduler(self, optm):
"""
Make the learning rate scheduler as instructed. More modes can be added to this, current supported ones:
1. ReduceLROnPlateau (decrease lr when validation error stops improving
:return:
"""
return lr_scheduler.ReduceLROnPlateau(optimizer=optm, mode='min',
factor=self.flags.lr_decay_rate,
patience=10, verbose=True, threshold=1e-5)
def save(self):
"""
Saving the model to the current check point folder with name best_model_forward.pt
:return: None
"""
# torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_forward.pt'))
def load(self):
"""
Loading the model from the check point folder with name best_model_forward.pt
:return:
"""
# self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
if torch.cuda.is_available():
self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_forward.pt'))
else:
self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_forward.pt'), map_location=torch.device('cpu'))
def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
# Construct optimizer after the model moved to GPU
self.optm = self.make_optimizer()
self.lr_scheduler = self.make_lr_scheduler(self.optm)
# Time keeping
tk = time_keeper(time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
# boundary_loss = 0 # Unnecessary during training since we provide geometries
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
# This is msd manu joseph
pi, sigma, mu = self.model(spectra) # Get the output
loss = self.make_loss(pi, sigma, mu, geometry, epoch) # Get the loss tensor
# This is for mdn tony
# loss = torch.mean(self.model.loss(spectra, geometry))
loss.backward() # Calculate the backward gradients
# gradient clipping
torch.nn.utils.clip_grad_value_(self.model.parameters(), 1)
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
# Calculate the avg loss of training
train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)
if epoch % self.flags.eval_step == 0: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
# This is msd <NAME>
pi, sigma, mu = self.model(spectra) # Get the output
loss = self.make_loss(pi, sigma, mu, geometry, epoch) # Get the loss tensor
# This is for mdn tony
# pi, mu_sigma = self.model(spectra)
# loss = torch.mean(self.model.loss(spectra, geometry))
test_loss += loss.detach().cpu().numpy()
# Record the testing loss to the tensorboard
test_avg_loss = test_loss / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f"\
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
write_flags_and_BVE(self.flags, self.best_validation_loss, self.ckpt_dir)
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
# Learning rate decay upon plateau
self.lr_scheduler.step(train_avg_loss)
self.log.close()
tk.record(1) # Record the total time of the training peroid
def evaluate(self, save_dir='data/', prefix=''):
self.load() # load the model as constructed
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
self.model.eval()
saved_model_str = self.saved_model.replace('/', '_') + prefix
# Get the file names
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(saved_model_str))
# keep time
tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
# Open those files to append
with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
# Loop through the eval data and evaluate
for ind, (geometry, spectra) in enumerate(self.test_loader):
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
# Initialize the geometry first
print('model in eval:', self.model)
# This is msd manu joseph
pi, sigma, mu = self.model(spectra) # Get the output
# Xpred = mdn.sample(pi, sigma, mu).detach().cpu().numpy()
Xpred = self.model.sample(pi, sigma, mu).detach().cpu().numpy()
# This is for mdn tony
# Xpred = self.model.sample(spectra).detach().cpu().numpy()
# Saving the files down
np.savetxt(fxt, geometry.cpu().data.numpy())
np.savetxt(fyt, spectra.cpu().data.numpy())
np.savetxt(fxp, Xpred)
#if self.flags.data_set != 'Yang_sim' and 'Peurifoy' not in self.flags.data_set:
# Ypred = simulator(self.flags.data_set, Xpred)
# np.savetxt(fyp, Ypred)
tk.record(1)
return Ypred_file, Ytruth_file
def evaluate_multiple_time(self, time=200, save_dir='../mm_bench_multi_eval/MDN/'):
"""
Make evaluation multiple time for deeper comparison for stochastic algorithms
:param save_dir: The directory to save the result
:return:
"""
save_dir = os.path.join(save_dir, self.flags.data_set)
tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
for i in range(time):
self.evaluate(save_dir=save_dir, prefix='inference' + str(i))
tk.record(i)
def predict(self, Ytruth_file, save_dir='data/', prefix=''):
self.load() # load the model as constructed
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
self.model.eval()
saved_model_str = self.saved_model.replace('/', '_') + prefix
Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=',') # Read the input
if len(Ytruth.columns) == 1: # The file is not delimitered by ',' but ' '
Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ')
Ytruth_tensor = torch.from_numpy(Ytruth.values).to(torch.float)
print('shape of Ytruth tensor :', Ytruth_tensor.shape)
# Get the file names
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(saved_model_str))
# keep time
tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
if cuda:
Ytruth_tensor = Ytruth_tensor.cuda()
print('model in eval:', | |
ATOM 3051 C CB . PRO B 1 173 ? 63.703 -38.258 -7.994 1.00 20.61 ? 174 PRO B CB 1
ATOM 3052 C CG . PRO B 1 173 ? 64.106 -36.916 -7.588 1.00 20.83 ? 174 PRO B CG 1
ATOM 3053 C CD . PRO B 1 173 ? 65.009 -37.122 -6.428 1.00 19.83 ? 174 PRO B CD 1
ATOM 3054 N N . GLY B 1 174 ? 62.846 -41.367 -6.700 1.00 20.27 ? 175 GLY B N 1
ATOM 3055 C CA . GLY B 1 174 ? 63.026 -42.823 -6.725 1.00 19.47 ? 175 GLY B CA 1
ATOM 3056 C C . GLY B 1 174 ? 63.575 -43.525 -5.506 1.00 20.70 ? 175 GLY B C 1
ATOM 3057 O O . GLY B 1 174 ? 63.638 -44.763 -5.491 1.00 23.78 ? 175 GLY B O 1
ATOM 3058 N N . ASP B 1 175 ? 64.018 -42.789 -4.489 1.00 18.25 ? 176 ASP B N 1
ATOM 3059 C CA . ASP B 1 175 ? 64.503 -43.382 -3.240 1.00 18.58 ? 176 ASP B CA 1
ATOM 3060 C C . ASP B 1 175 ? 63.340 -43.676 -2.295 1.00 19.33 ? 176 ASP B C 1
ATOM 3061 O O . ASP B 1 175 ? 62.598 -42.769 -1.944 1.00 17.01 ? 176 ASP B O 1
ATOM 3062 C CB . ASP B 1 175 ? 65.538 -42.420 -2.652 1.00 19.61 ? 176 ASP B CB 1
ATOM 3063 C CG . ASP B 1 175 ? 66.200 -42.908 -1.380 1.00 19.15 ? 176 ASP B CG 1
ATOM 3064 O OD1 . ASP B 1 175 ? 65.938 -44.033 -0.867 1.00 18.17 ? 176 ASP B OD1 1
ATOM 3065 O OD2 . ASP B 1 175 ? 67.055 -42.142 -0.896 1.00 21.87 ? 176 ASP B OD2 1
ATOM 3066 N N . PRO B 1 176 ? 63.161 -44.943 -1.878 1.00 16.38 ? 177 PRO B N 1
ATOM 3067 C CA . PRO B 1 176 ? 62.035 -45.327 -1.037 1.00 17.90 ? 177 PRO B CA 1
ATOM 3068 C C . PRO B 1 176 ? 62.270 -44.987 0.425 1.00 15.35 ? 177 PRO B C 1
ATOM 3069 O O . PRO B 1 176 ? 61.394 -45.116 1.203 1.00 15.66 ? 177 PRO B O 1
ATOM 3070 C CB . PRO B 1 176 ? 61.929 -46.847 -1.275 1.00 18.45 ? 177 PRO B CB 1
ATOM 3071 C CG . PRO B 1 176 ? 63.338 -47.220 -1.527 1.00 19.78 ? 177 PRO B CG 1
ATOM 3072 C CD . PRO B 1 176 ? 63.894 -46.128 -2.357 1.00 19.17 ? 177 PRO B CD 1
ATOM 3073 N N . ARG B 1 177 ? 63.478 -44.608 0.768 1.00 12.98 ? 178 ARG B N 1
ATOM 3074 C CA . ARG B 1 177 ? 63.759 -44.308 2.173 1.00 12.84 ? 178 ARG B CA 1
ATOM 3075 C C . ARG B 1 177 ? 62.932 -43.162 2.684 1.00 11.15 ? 178 ARG B C 1
ATOM 3076 O O . ARG B 1 177 ? 62.807 -42.156 2.023 1.00 13.22 ? 178 ARG B O 1
ATOM 3077 C CB . ARG B 1 177 ? 65.233 -43.946 2.376 1.00 12.93 ? 178 ARG B CB 1
ATOM 3078 C CG . ARG B 1 177 ? 66.158 -45.137 2.227 1.00 15.07 ? 178 ARG B CG 1
ATOM 3079 C CD . ARG B 1 177 ? 67.590 -44.770 2.491 1.00 15.69 ? 178 ARG B CD 1
ATOM 3080 N NE . ARG B 1 177 ? 68.037 -43.963 1.412 1.00 18.31 ? 178 ARG B NE 1
ATOM 3081 C CZ . ARG B 1 177 ? 69.230 -43.394 1.293 1.00 17.28 ? 178 ARG B CZ 1
ATOM 3082 N NH1 . ARG B 1 177 ? 70.176 -43.605 2.136 1.00 20.60 ? 178 ARG B NH1 1
ATOM 3083 N NH2 . ARG B 1 177 ? 69.482 -42.703 0.220 1.00 19.80 ? 178 ARG B NH2 1
ATOM 3084 N N . ASP B 1 178 ? 62.455 -43.311 3.885 1.00 10.84 ? 179 ASP B N 1
ATOM 3085 C CA . ASP B 1 178 ? 61.761 -42.223 4.574 1.00 12.23 ? 179 ASP B CA 1
ATOM 3086 C C . ASP B 1 178 ? 60.563 -41.728 3.847 1.00 11.68 ? 179 ASP B C 1
ATOM 3087 O O . ASP B 1 178 ? 60.318 -40.531 3.767 1.00 11.63 ? 179 ASP B O 1
ATOM 3088 C CB . ASP B 1 178 ? 62.775 -41.136 4.848 1.00 12.25 ? 179 ASP B CB 1
ATOM 3089 C CG . ASP B 1 178 ? 63.940 -41.677 5.609 1.00 14.42 ? 179 ASP B CG 1
ATOM 3090 O OD1 . ASP B 1 178 ? 63.726 -42.175 6.746 1.00 15.14 ? 179 ASP B OD1 1
ATOM 3091 O OD2 . ASP B 1 178 ? 65.053 -41.561 5.067 1.00 15.51 ? 179 ASP B OD2 1
ATOM 3092 N N . THR B 1 179 ? 59.853 -42.700 3.259 1.00 12.96 ? 180 THR B N 1
ATOM 3093 C CA . THR B 1 179 ? 58.624 -42.449 2.519 1.00 10.76 ? 180 THR B CA 1
ATOM 3094 C C . THR B 1 179 ? 57.403 -43.150 3.020 1.00 10.64 ? 180 THR B C 1
ATOM 3095 O O . THR B 1 179 ? 57.437 -44.037 3.853 1.00 8.77 ? 180 THR B O 1
ATOM 3096 C CB . THR B 1 179 ? 58.814 -42.885 1.025 1.00 12.15 ? 180 THR B CB 1
ATOM 3097 O OG1 . THR B 1 179 ? 58.979 -44.301 0.994 1.00 12.37 ? 180 THR B OG1 1
ATOM 3098 C CG2 . THR B 1 179 ? 60.011 -42.169 0.353 1.00 10.78 ? 180 THR B CG2 1
ATOM 3099 N N . THR B 1 180 ? 56.282 -42.618 2.570 1.00 9.31 ? 181 THR B N 1
ATOM 3100 C CA . THR B 1 180 ? 54.991 -43.278 2.661 1.00 11.52 ? 181 THR B CA 1
ATOM 3101 C C . THR B 1 180 ? 54.094 -42.801 1.535 1.00 10.22 ? 181 THR B C 1
ATOM 3102 O O . THR B 1 180 ? 54.481 -42.017 0.703 1.00 13.57 ? 181 THR B O 1
ATOM 3103 C CB . THR B 1 180 ? 54.318 -43.062 4.033 1.00 9.68 ? 181 THR B CB 1
ATOM 3104 O OG1 . THR B 1 180 ? 53.288 -44.059 4.209 1.00 10.54 ? 181 THR B OG1 1
ATOM 3105 C CG2 . THR B 1 180 ? 53.734 -41.708 4.090 1.00 10.11 ? 181 THR B CG2 1
ATOM 3106 N N . THR B 1 181 ? 52.887 -43.308 1.508 1.00 11.87 ? 182 THR B N 1
ATOM 3107 C CA . THR B 1 181 ? 51.949 -42.849 0.497 1.00 10.54 ? 182 THR B CA 1
ATOM 3108 C C . THR B 1 181 ? 50.846 -41.975 1.094 1.00 11.38 ? 182 THR B C 1
ATOM 3109 O O . THR B 1 181 ? 50.589 -42.059 2.284 1.00 9.38 ? 182 THR B O 1
ATOM 3110 C CB . THR B 1 181 ? 51.284 -44.041 -0.194 1.00 10.25 ? 182 THR B CB 1
ATOM 3111 O OG1 . THR B 1 181 ? 50.402 -44.663 0.726 1.00 9.99 ? 182 THR B OG1 | |
import operator
import functools
from math import ceil
from itertools import chain
from elasticmagic import agg
from elasticmagic.attribute import AttributedField
from elasticmagic.cluster import MAX_RESULT_WINDOW
from elasticmagic.compat import text_type, string_types, with_metaclass
from elasticmagic.expression import (
Params, Term, Terms, MatchAll, Query, Bool, Field, Sort, Nested,
)
from elasticmagic.types import String, Integer, instantiate
from .codec import SimpleCodec
first = operator.itemgetter(0)
is_not_none = functools.partial(operator.is_not, None)
class UnboundFilter(object):
_current_counter = 0
def __init__(self, filter_cls, args, kwargs):
self.filter_cls = filter_cls
self.args = args
self.kwargs = kwargs
self._counter = UnboundFilter._current_counter
UnboundFilter._current_counter += 1
def bind(self, name):
return self.filter_cls(name, *self.args, **self.kwargs)
class QueryFilterMeta(type):
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_filters = []
for attr_name, attr in attrs.items():
if isinstance(attr, UnboundFilter):
cls._unbound_filters.append((attr_name, attr))
delattr(cls, attr_name)
cls._unbound_filters.sort(key=lambda e: e[1]._counter)
def __setattr__(cls, name, value):
if isinstance(value, UnboundFilter):
cls._unbound_filters.append((name, value))
else:
type.__setattr__(cls, name, value)
class QueryFilter(with_metaclass(QueryFilterMeta)):
NAME = 'qf'
CONJ_OR = 'CONJ_OR'
CONJ_AND = 'CONJ_AND'
def __init__(self, name=None, codec=None):
self._name = name or self.NAME
self._codec = codec or SimpleCodec()
self._filters = []
self._params = {}
for base_cls in reversed(self.__class__.__mro__):
if hasattr(base_cls, '_unbound_filters'):
for filter_name, unbound_filter in base_cls._unbound_filters:
self.add_filter(unbound_filter.bind(filter_name))
self.reset()
def get_name(self):
return self._name
def get_types(self):
types = {}
for filt in self._filters:
types.update(filt._types)
return types
def reset(self):
self._params = {}
@property
def filters(self):
return self._filters
def add_filter(self, filter):
self.remove_filter(filter.name)
filter.qf = self
self._filters.append(filter)
setattr(self, filter.name, filter)
def remove_filter(self, filter_name):
if isinstance(getattr(self, filter_name, None), BaseFilter):
delattr(self, filter_name)
for ix, f in enumerate(self._filters):
if f.name == filter_name:
break
self._filters = self._filters[:ix] + self._filters[ix + 1:]
def apply(self, search_query, params):
self._params = self._codec.decode(params, self.get_types())
# First filter query with all filters
for f in self._filters:
search_query = f._apply_filter(search_query, self._params)
# then add aggregations
for f in self._filters:
search_query = f._apply_agg(search_query, self._params)
return search_query
def process_result(self, query_result):
filter_results = {}
for f in self._filters:
filter_results[f.name] = f._process_result(
query_result, self._params
)
return QueryFilterResult(filter_results)
process_results = process_result
def get_filter(self, name):
return getattr(self, name, None)
class QueryFilterResult(object):
def __init__(self, filters):
self._filters = filters
for filter_name, filter_result in self._filters.items():
setattr(self, filter_name, filter_result)
@property
def filters(self):
return self._filters
def get_filter(self, name):
return self._filters.get(name)
class BaseFilter(object):
def __new__(cls, *args, **kwargs):
if not args or not isinstance(args[0], string_types):
return UnboundFilter(cls, args, kwargs)
return super(BaseFilter, cls).__new__(cls)
def __init__(self, name, alias=None):
self.name = name
self.alias = alias or self.name
self.qf = None
@property
def _types(self):
return {}
def _get_agg_filters(self, filters, exclude_tags):
active_filters = []
for filt, meta in filters:
tags = meta.get('tags', set()) if meta else set()
if not exclude_tags.intersection(tags):
active_filters.append(filt)
return active_filters
def _apply_filter(self, search_query, params):
raise NotImplementedError()
def _apply_agg(self, search_query, params):
return search_query
def _process_result(self, result, params):
return BaseFilterResult(self.name, self.alias, None)
class BaseFilterResult(object):
def __init__(self, name, alias):
self.name = name
self.alias = alias
class FieldFilter(BaseFilter):
def __init__(self, name, field, alias=None, type=None):
super(FieldFilter, self).__init__(name, alias=alias)
self.field = field
self.type = instantiate(type or self.field.get_type())
@property
def _types(self):
return {self.alias: self.type}
class BaseFilterValue(object):
def __init__(self, value, _filter=None):
self.value = value
self.filter = _filter
class SimpleFilter(FieldFilter):
def __init__(
self, name, field, alias=None, type=None,
conj_operator=QueryFilter.CONJ_OR,
):
super(SimpleFilter, self).__init__(name, field, alias=alias, type=type)
self._conj_operator = conj_operator
def _get_values_from_params(self, params):
values = params.get('exact', [])
return list(filter(is_not_none, map(first, values)))
def _get_expression(self, params):
values = self._get_values_from_params(params.get(self.alias, {}))
if not values:
return None
if len(values) == 1:
return self.field == values[0]
if self._conj_operator == QueryFilter.CONJ_AND:
return Bool.must(*(self.field == v for v in values))
else:
return self.field.in_(values)
def _apply_filter(self, search_query, params):
expr = self._get_expression(params)
if expr is None:
return search_query
return search_query.filter(expr, meta={'tags': {self.name}})
class FacetFilter(SimpleFilter):
def __init__(
self, name, field, alias=None, type=None,
conj_operator=QueryFilter.CONJ_OR, instance_mapper=None,
get_title=None, **kwargs
):
super(FacetFilter, self).__init__(
name, field, alias=alias, type=type, conj_operator=conj_operator
)
self._instance_mapper = instance_mapper
self._get_title = get_title
self._agg_kwargs = kwargs
@property
def _agg_name(self):
return '{}.{}'.format(self.qf._name, self.name)
@property
def _filter_agg_name(self):
return '{}.{}.filter'.format(self.qf._name, self.name)
def _apply_filter(self, search_query, params):
expr = self._get_expression(params)
if expr is None:
return search_query
return search_query.post_filter(expr, meta={'tags': {self.name}})
def _apply_agg(self, search_query, params):
exclude_tags = {self.qf._name}
if self._conj_operator == QueryFilter.CONJ_OR:
exclude_tags.add(self.name)
filters = self._get_agg_filters(
search_query.get_context().iter_post_filters_with_meta(),
exclude_tags
)
terms_agg = agg.Terms(
self.field, instance_mapper=self._instance_mapper,
**self._agg_kwargs
)
if filters:
aggs = {
self._filter_agg_name: agg.Filter(
Bool.must(*filters), aggs={self._agg_name: terms_agg}
)
}
else:
aggs = {self._agg_name: terms_agg}
return search_query.aggregations(**aggs)
def _process_result(self, result, params):
values = self._get_values_from_params(params.get(self.alias, {}))
if result.get_aggregation(self._filter_agg_name):
terms_agg = result \
.get_aggregation(self._filter_agg_name) \
.get_aggregation(self._agg_name)
else:
terms_agg = result.get_aggregation(self._agg_name)
facet_result = FacetFilterResult(self.name, self.alias)
processed_values = set()
for bucket in terms_agg.buckets:
# FIXME: values can be a list of string but bucket.key may not
facet_result.add_value(FacetValueResult(
bucket, bucket.key in values, bool(values),
get_title=self._get_title,
))
processed_values.add(bucket.key)
for v in values:
if v not in processed_values:
fake_agg_data = {'key': v, 'doc_count': None}
fake_bucket = terms_agg.bucket_cls(
fake_agg_data, terms_agg.expr.aggs(None), terms_agg
)
# add bucket to terms aggregation to autopopulate instance
terms_agg.add_bucket(fake_bucket)
facet_result.add_value(FacetValueResult(
fake_bucket, True, True, get_title=self._get_title,
))
return facet_result
class FacetFilterResult(BaseFilterResult):
def __init__(self, name, alias):
super(FacetFilterResult, self).__init__(name, alias)
self.values = []
self.selected_values = []
self.all_values = []
self.values_map = {}
def add_value(self, fv):
self.all_values.append(fv)
self.values_map[fv.value] = fv
if fv.selected:
self.selected_values.append(fv)
else:
self.values.append(fv)
def get_value(self, value):
return self.values_map.get(value)
class FacetValueResult(BaseFilterValue):
def __init__(self, bucket, selected,
filter_has_selected_values, get_title=None):
self.bucket = bucket
self.selected = selected
self._filter_has_selected_values = filter_has_selected_values
self._get_title = get_title
@property
def value(self):
return self.bucket.key
@property
def count(self):
return self.bucket.doc_count
@property
def count_text(self):
if self.count is None:
return ''
if not self.selected and self._filter_has_selected_values:
return '+{}'.format(self.count)
return '{}'.format(self.count)
@property
def instance(self):
bucket = self.bucket
if bucket:
return self.bucket.instance
@property
def filter_name(self):
return self.filter.name
@property
def filter_value(self):
return self.filter.qf._codec.encode_value(self.value)
@property
def title(self):
if self._get_title:
return self._get_title(self)
if self.instance:
return text_type(self.instance)
return text_type(self.value)
def __unicode__(self):
return self.title
class RangeFilter(FieldFilter):
def __init__(
self, name, field, alias=None, type=None,
compute_enabled=True, compute_min_max=True,
):
super(RangeFilter, self).__init__(name, field, alias=alias)
self.type = instantiate(type or self.field.get_type())
self._compute_enabled = compute_enabled
self._compute_min_max = compute_min_max
self._from_value = None
self._to_value = None
@property
def _filter_agg_name(self):
return '{}.{}.filter'.format(self.qf._name, self.name)
@property
def _min_agg_name(self):
return '{}.{}.min'.format(self.qf._name, self.name)
@property
def _max_agg_name(self):
return '{}.{}.max'.format(self.qf._name, self.name)
@property
def _enabled_agg_name(self):
return '{}.{}.enabled'.format(self.qf._name, self.name)
def _get_from_value(self, params):
from_values = params.get('gte')
if from_values:
return from_values[0][0]
from_values = params.get('exact')
if from_values:
return from_values[0][0]
def _get_to_value(self, params):
to_values = params.get('lte')
if to_values:
return to_values[0][0]
to_values = params.get('exact')
if to_values:
return to_values[-1][0]
def _apply_filter(self, search_query, params):
params = params.get(self.alias) or {}
self._from_value = self._get_from_value(params)
self._to_value = self._get_to_value(params)
if self._from_value is None and self._to_value is None:
return search_query
return search_query.post_filter(
self.field.range(gte=self._from_value, lte=self._to_value),
meta={'tags': {self.name}}
)
def _apply_agg(self, search_query, params):
filters = self._get_agg_filters(
search_query.get_context().iter_post_filters_with_meta(),
{self.qf._name, self.name}
)
aggs = {}
if self._compute_enabled:
aggs.update({
self._enabled_agg_name: agg.Filter(self.field != None),
})
if self._compute_min_max:
stat_aggs = {
self._min_agg_name: agg.Min(self.field),
self._max_agg_name: agg.Max(self.field),
}
if filters:
aggs.update({
self._filter_agg_name: agg.Filter(
Bool.must(*filters), aggs=stat_aggs
)
})
else:
aggs.update(stat_aggs)
return search_query.aggregations(**aggs)
def _process_result(self, result, params):
if result.get_aggregation(self._filter_agg_name):
base_agg = result.get_aggregation(self._filter_agg_name)
else:
base_agg = result
enabled = None
if self._compute_enabled:
enabled = bool(
result.get_aggregation(self._enabled_agg_name).doc_count
)
min_value = max_value = None
if self._compute_min_max:
min_value = base_agg.get_aggregation(self._min_agg_name).value
max_value = base_agg.get_aggregation(self._max_agg_name).value
return RangeFilterResult(
self._from_value, self._to_value,
enabled=enabled, min_value=min_value, max_value=max_value
)
class RangeFilterResult(object):
def __init__(
self, from_value, to_value,
enabled=None, min_value=None, max_value=None
):
self.from_value = from_value
self.to_value = to_value
self.enabled = enabled
self.min_value = min_value
self.max_value = max_value
@property
def min(self):
return self.min_value
@property
def max(self):
return self.max_value
class SimpleQueryValue(BaseFilterValue):
def __init__(self, value, expr, _filter=None, **opts):
super(SimpleQueryValue, self).__init__(value, _filter=_filter)
self.expr = expr
self.opts = opts
class SimpleQueryFilter(BaseFilter):
def __init__(self, name, *values, **kwargs):
super(SimpleQueryFilter, self).__init__(
name, alias=kwargs.pop('alias', None)
)
self._values = values
self._values_map = {fv.value: fv for fv in self._values}
self._conj_operator = kwargs.pop('conj_operator', QueryFilter.CONJ_OR)
self.default = kwargs.pop('default', None)
@property
def _types(self):
return {self.alias: None}
def get_value(self, value):
return self._values_map.get(value)
def _get_expression(self, params):
values = params.get(self.alias, {}).get('exact')
if not values:
if self.default:
values = [[self.default]]
if not values:
return None
expressions = []
for v in values:
w = v[0]
filter_value = self.get_value(w)
if filter_value and not isinstance(filter_value.expr, MatchAll):
expressions.append(filter_value.expr)
if not expressions:
return None
if self._conj_operator == QueryFilter.CONJ_AND:
return Bool.must(*expressions)
else:
return Bool.should(*expressions)
def _apply_filter(self, search_query, params):
expr = self._get_expression(params)
if expr is None:
return search_query
return search_query.filter(expr, meta={'tags': {self.name}})
class FacetQueryFilter(SimpleQueryFilter):
def __init__(self, name, *values, **kwargs):
super(FacetQueryFilter, self).__init__(name, *values, **kwargs)
self.agg_kwargs = kwargs
@property
def _filter_agg_name(self):
return '{}.{}.filter'.format(self.qf._name, self.name)
def _make_agg_name(self, value):
return '{}.{}:{}'.format(self.qf._name, self.name, value)
def _apply_filter(self, search_query, params):
expr = self._get_expression(params)
if expr is None:
return search_query
return search_query.post_filter(expr, meta={'tags': {self.name}})
def _apply_agg(self, search_query, params):
exclude_tags = {self.qf._name}
if self._conj_operator == QueryFilter.CONJ_OR:
exclude_tags.add(self.name)
filters = | |
# Copyright 2011 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA key generation code.
Create new keys with the newkeys() function. It will give you a PublicKey and a
PrivateKey object.
Loading and saving keys requires the pyasn1 module. This module is imported as
late as possible, such that other functionality will remain working in absence
of pyasn1.
.. note::
Storing public and private keys via the `pickle` module is possible.
However, it is insecure to load a key from an untrusted source.
The pickle module is not secure against erroneous or maliciously
constructed data. Never unpickle data received from an untrusted
or unauthenticated source.
"""
import logging
import typing
import warnings
import rsa.prime
import rsa.pem
import rsa.common
import rsa.randnum
import rsa.core
log = logging.getLogger(__name__)
DEFAULT_EXPONENT = 65537
class AbstractKey:
"""Abstract superclass for private and public keys."""
__slots__ = ('n', 'e', 'blindfac', 'blindfac_inverse')
def __init__(self, n: int, e: int) -> None:
self.n = n
self.e = e
# These will be computed properly on the first call to blind().
self.blindfac = self.blindfac_inverse = -1
@classmethod
def _load_pkcs1_pem(cls, keyfile: bytes) -> 'AbstractKey':
"""Loads a key in PKCS#1 PEM format, implement in a subclass.
:param keyfile: contents of a PEM-encoded file that contains
the public key.
:type keyfile: bytes
:return: the loaded key
:rtype: AbstractKey
"""
@classmethod
def _load_pkcs1_der(cls, keyfile: bytes) -> 'AbstractKey':
"""Loads a key in PKCS#1 PEM format, implement in a subclass.
:param keyfile: contents of a DER-encoded file that contains
the public key.
:type keyfile: bytes
:return: the loaded key
:rtype: AbstractKey
"""
def _save_pkcs1_pem(self) -> bytes:
"""Saves the key in PKCS#1 PEM format, implement in a subclass.
:returns: the PEM-encoded key.
:rtype: bytes
"""
def _save_pkcs1_der(self) -> bytes:
"""Saves the key in PKCS#1 DER format, implement in a subclass.
:returns: the DER-encoded key.
:rtype: bytes
"""
@classmethod
def load_pkcs1(cls, keyfile: bytes, format: str = 'PEM') -> 'AbstractKey':
"""Loads a key in PKCS#1 DER or PEM format.
:param keyfile: contents of a DER- or PEM-encoded file that contains
the key.
:type keyfile: bytes
:param format: the format of the file to load; 'PEM' or 'DER'
:type format: str
:return: the loaded key
:rtype: AbstractKey
"""
methods = {
'PEM': cls._load_pkcs1_pem,
'DER': cls._load_pkcs1_der,
}
method = cls._assert_format_exists(format, methods)
return method(keyfile)
@staticmethod
def _assert_format_exists(file_format: str, methods: typing.Mapping[str, typing.Callable]) \
-> typing.Callable:
"""Checks whether the given file format exists in 'methods'.
"""
try:
return methods[file_format]
except KeyError:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (file_format,
formats))
def save_pkcs1(self, format: str = 'PEM') -> bytes:
"""Saves the key in PKCS#1 DER or PEM format.
:param format: the format to save; 'PEM' or 'DER'
:type format: str
:returns: the DER- or PEM-encoded key.
:rtype: bytes
"""
methods = {
'PEM': self._save_pkcs1_pem,
'DER': self._save_pkcs1_der,
}
method = self._assert_format_exists(format, methods)
return method()
def blind(self, message: int) -> int:
"""Performs blinding on the message using random number 'r'.
:param message: the message, as integer, to blind.
:type message: int
:param r: the random number to blind with.
:type r: int
:return: the blinded message.
:rtype: int
The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
"""
self._update_blinding_factor()
return (message * pow(self.blindfac, self.e, self.n)) % self.n
def unblind(self, blinded: int) -> int:
"""Performs blinding on the message using random number 'r'.
:param blinded: the blinded message, as integer, to unblind.
:param r: the random number to unblind with.
:return: the original message.
The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
"""
return (self.blindfac_inverse * blinded) % self.n
def _initial_blinding_factor(self) -> int:
for _ in range(1000):
blind_r = rsa.randnum.randint(self.n - 1)
if rsa.prime.are_relatively_prime(self.n, blind_r):
return blind_r
raise RuntimeError('unable to find blinding factor')
def _update_blinding_factor(self):
if self.blindfac < 0:
# Compute initial blinding factor, which is rather slow to do.
self.blindfac = self._initial_blinding_factor()
self.blindfac_inverse = rsa.common.inverse(self.blindfac, self.n)
else:
# Reuse previous blinding factor as per section 9 of 'A Timing
# Attack against RSA with the Chinese Remainder Theorem' by Werner
# Schindler.
# See https://tls.mbed.org/public/WSchindler-RSA_Timing_Attack.pdf
self.blindfac = pow(self.blindfac, 2, self.n)
self.blindfac_inverse = pow(self.blindfac_inverse, 2, self.n)
class PublicKey(AbstractKey):
"""Represents a public RSA key.
This key is also known as the 'encryption key'. It contains the 'n' and 'e'
values.
Supports attributes as well as dictionary-like access. Attribute access is
faster, though.
>>> PublicKey(5, 3)
PublicKey(5, 3)
>>> key = PublicKey(5, 3)
>>> key.n
5
>>> key['n']
5
>>> key.e
3
>>> key['e']
3
"""
__slots__ = ('n', 'e')
def __getitem__(self, key: str) -> int:
return getattr(self, key)
def __repr__(self) -> str:
return 'PublicKey(%i, %i)' % (self.n, self.e)
def __getstate__(self) -> typing.Tuple[int, int]:
"""Returns the key as tuple for pickling."""
return self.n, self.e
def __setstate__(self, state: typing.Tuple[int, int]) -> None:
"""Sets the key from tuple."""
self.n, self.e = state
def __eq__(self, other: typing.Any) -> bool:
if other is None:
return False
if not isinstance(other, PublicKey):
return False
return self.n == other.n and self.e == other.e
def __ne__(self, other: typing.Any) -> bool:
return not (self == other)
def __hash__(self) -> int:
return hash((self.n, self.e))
@classmethod
def _load_pkcs1_der(cls, keyfile: bytes) -> 'PublicKey':
"""Loads a key in PKCS#1 DER format.
:param keyfile: contents of a DER-encoded file that contains the public
key.
:return: a PublicKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MAwCBQCNGmYtAgMBAAE='
>>> der = base64.standard_b64decode(b64der)
This loads the file:
>>> PublicKey._load_pkcs1_der(der)
PublicKey(2367317549, 65537)
"""
from pyasn1.codec.der import decoder
from rsa.asn1 import AsnPubKey
(priv, _) = decoder.decode(keyfile, asn1Spec=AsnPubKey())
return cls(n=int(priv['modulus']), e=int(priv['publicExponent']))
def _save_pkcs1_der(self) -> bytes:
"""Saves the public key in PKCS#1 DER format.
:returns: the DER-encoded public key.
:rtype: bytes
"""
from pyasn1.codec.der import encoder
from rsa.asn1 import AsnPubKey
# Create the ASN object
asn_key = AsnPubKey()
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile: bytes) -> 'PublicKey':
"""Loads a PKCS#1 PEM-encoded public key file.
The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
after the "-----END RSA PUBLIC KEY-----" lines is ignored.
:param keyfile: contents of a PEM-encoded file that contains the public
key.
:return: a PublicKey object
"""
der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self) -> bytes:
"""Saves a PKCS#1 PEM-encoded public key file.
:return: contents of a PEM-encoded file that contains the public key.
:rtype: bytes
"""
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
@classmethod
def load_pkcs1_openssl_pem(cls, keyfile: bytes) -> 'PublicKey':
"""Loads a PKCS#1.5 PEM-encoded public key file from OpenSSL.
These files can be recognised in that they start with BEGIN PUBLIC KEY
rather than BEGIN RSA PUBLIC KEY.
The contents of the file before the "-----BEGIN PUBLIC KEY-----" and
after the "-----END PUBLIC KEY-----" lines is ignored.
:param keyfile: contents of a PEM-encoded file that contains the public
key, from OpenSSL.
:type keyfile: bytes
:return: a PublicKey object
"""
der = rsa.pem.load_pem(keyfile, 'PUBLIC KEY')
return cls.load_pkcs1_openssl_der(der)
@classmethod
def load_pkcs1_openssl_der(cls, keyfile: bytes) -> 'PublicKey':
"""Loads a PKCS#1 DER-encoded public key file from OpenSSL.
:param keyfile: contents of a DER-encoded file that contains the public
key, from OpenSSL.
:return: a PublicKey object
"""
from rsa.asn1 import OpenSSLPubKey
from pyasn1.codec.der import decoder
from pyasn1.type import univ
(keyinfo, _) = decoder.decode(keyfile, asn1Spec=OpenSSLPubKey())
if keyinfo['header']['oid'] != univ.ObjectIdentifier('1.2.840.113549.1.1.1'):
raise TypeError("This is not a DER-encoded OpenSSL-compatible public key")
return cls._load_pkcs1_der(keyinfo['key'][1:])
class PrivateKey(AbstractKey):
"""Represents a private RSA key.
This key is also known as the 'decryption key'. It contains the 'n', 'e',
'd', 'p', 'q' and other values.
Supports attributes as well as dictionary-like access. Attribute access is
faster, though.
>>> PrivateKey(3247, 65537, 833, 191, 17)
PrivateKey(3247, 65537, 833, 191, | |
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import rrc_evaluation_funcs
import importlib
import sys
import math
def evaluation_imports():
"""
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
"""
return {
'Polygon': 'plg',
'numpy': 'np'
}
def default_evaluation_params():
"""
default_evaluation_params: Default parameters to use for the validation and evaluation.
"""
p = dict([s[1:].split('=') for s in sys.argv[1:]])
if p['g'].split("/")[-1] in ['gt_ctw1500_det.zip', 'gt_ctw1500_det_with_ignore.zip']:
return {
'IOU_CONSTRAINT': 0.5,
'AREA_PRECISION_CONSTRAINT': 0.5,
'GT_SAMPLE_NAME_2_ID': '([0-9]+).txt',
'DET_SAMPLE_NAME_2_ID': '([0-9]+).txt',
'LTRB': False, # LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
'CRLF': False, # Lines are delimited by Windows CRLF format
'CONFIDENCES': False, # Detections must include confidence value. AP will be calculated
'PER_SAMPLE_RESULTS': True # Generate per sample results and produce data for visualization
}
elif p['g'].split("/")[-1] in ['total-text-gt.zip']:
return {
'IOU_CONSTRAINT': 0.5,
'AREA_PRECISION_CONSTRAINT': 0.5,
'GT_SAMPLE_NAME_2_ID': 'poly_gt_img([0-9]+).txt',
'DET_SAMPLE_NAME_2_ID': 'img([0-9]+).txt',
'LTRB': False, # LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
'CRLF': False, # Lines are delimited by Windows CRLF format
'CONFIDENCES': False, # Detections must include confidence value. AP will be calculated
'PER_SAMPLE_RESULTS': True # Generate per sample results and produce data for visualization
}
elif p['g'].split("/")[-1] in ['gt-icdar2015.zip']:
return {
'IOU_CONSTRAINT': 0.5,
'AREA_PRECISION_CONSTRAINT': 0.5,
'GT_SAMPLE_NAME_2_ID': 'gt_img_([0-9]+).txt',
'DET_SAMPLE_NAME_2_ID': 'img_([0-9]+).txt',
'LTRB': False, # LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
'CRLF': False, # Lines are delimited by Windows CRLF format
'CONFIDENCES': False, # Detections must include confidence value. AP will be calculated
'PER_SAMPLE_RESULTS': True # Generate per sample results and produce data for visualization
}
else:
raise NotImplementedError
def validate_data(gtFilePath, submFilePath, evaluationParams):
"""
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
Validates also that there are no missing files in the folder.
If some error detected, the method raises the error
"""
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
# Validate format of GroundTruth
for k in gt:
rrc_evaluation_funcs.validate_lines_in_file(k, gt[k], evaluationParams['CRLF'], evaluationParams['LTRB'], True)
# Validate format of results
for k in subm:
if (k in gt) == False:
raise Exception("The sample %s not present in GT" % k)
rrc_evaluation_funcs.validate_lines_in_file(k, subm[k], evaluationParams['CRLF'], evaluationParams['LTRB'],
False, evaluationParams['CONFIDENCES'])
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
"""
Method evaluate_method: evaluate method and returns the results
Results. Dictionary with the following values:
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
"""
for module, alias in evaluation_imports().items():
globals()[alias] = importlib.import_module(module)
def polygon_from_points(points):
"""
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
"""
num_points = len(points)
# resBoxes=np.empty([1,num_points],dtype='int32')
resBoxes = np.empty([1, num_points], dtype='float32')
for inp in range(0, num_points, 2):
# print(inp, points)
# print(resBoxes[0, inp/2])
resBoxes[0, int(inp / 2)] = float(points[inp])
resBoxes[0, int(inp / 2 + num_points / 2)] = float(points[inp + 1])
pointMat = resBoxes[0].reshape([2, int(num_points / 2)]).T
return plg.Polygon(pointMat)
def rectangle_to_polygon(rect):
resBoxes = np.empty([1, 8], dtype='int32')
resBoxes[0, 0] = int(rect.xmin)
resBoxes[0, 4] = int(rect.ymax)
resBoxes[0, 1] = int(rect.xmin)
resBoxes[0, 5] = int(rect.ymin)
resBoxes[0, 2] = int(rect.xmax)
resBoxes[0, 6] = int(rect.ymin)
resBoxes[0, 3] = int(rect.xmax)
resBoxes[0, 7] = int(rect.ymax)
pointMat = resBoxes[0].reshape([2, 4]).T
return plg.Polygon(pointMat)
def rectangle_to_points(rect):
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin),
int(rect.xmin), int(rect.ymin)]
return points
def get_union(pD, pG):
areaA = pD.area();
areaB = pG.area();
return areaA + areaB - get_intersection(pD, pG);
def get_intersection_over_union(pD, pG):
try:
return get_intersection(pD, pG) / get_union(pD, pG);
except:
return 0
def funcCt(x):
if x <= 0.01:
return 1
else:
return 1 - x
def get_text_intersection_over_union_recall(pD, pG):
'''
Ct (cut): Area of ground truth that is not covered by detection bounding box.
'''
try:
Ct = pG.area() - get_intersection(pD, pG)
assert (Ct >= 0 and Ct <= pG.area()), 'Invalid Ct value'
assert (pG.area() > 0), 'Invalid Gt'
return (get_intersection(pD, pG) * funcCt(Ct * 1.0 / pG.area())) / get_union(pD, pG);
except Exception as e:
return 0
def funcOt(x):
if x <= 0.01:
return 1
else:
return 1 - x
def get_text_intersection_over_union_precision(pD, pG, gtNum, gtPolys, gtDontCarePolsNum):
'''
Ot: Outlier gt area
'''
Ot = 0
try:
inside_pG = pD & pG
gt_union_inside_pD = None
gt_union_inside_pD_and_pG = None
count_initial = 0
for i in xrange(len(gtPolys)):
if i != gtNum and gtNum not in gtDontCarePolsNum: # ignore don't care regions
if not get_intersection(pD, gtPolys[i]) == 0:
if count_initial == 0:
# initial
gt_union_inside_pD = gtPolys[i]
gt_union_inside_pD_and_pG = inside_pG & gtPolys[i]
count_initial = 1
continue
gt_union_inside_pD = gt_union_inside_pD | gtPolys[i]
inside_pG_i = inside_pG & gtPolys[i]
gt_union_inside_pD_and_pG = gt_union_inside_pD_and_pG | inside_pG_i
if not gt_union_inside_pD == None:
pD_union_with_other_gt = pD & gt_union_inside_pD
Ot = pD_union_with_other_gt.area() - gt_union_inside_pD_and_pG.area()
if Ot <= 1.0e-10:
Ot = 0
else:
Ot = 0
# allow invalid polygon
assert (Ot >= 0 and Ot <= pD.area())
assert (pD.area() > 0)
return (get_intersection(pD, pG) * funcOt(Ot * 1.0 / pD.area())) / get_union(pD, pG);
except Exception as e:
# print(e)
return 0
def get_intersection(pD, pG):
pInt = pD & pG
if len(pInt) == 0:
return 0
return pInt.area()
def get_intersection_three(pD, pG, pGi):
pInt = pD & pG
pInt_3 = pInt & pGi
if len(pInt_3) == 0:
return 0
return pInt_3.area()
def compute_ap(confList, matchList, numGtCare):
correct = 0
AP = 0
if len(confList) > 0:
confList = np.array(confList)
matchList = np.array(matchList)
sorted_ind = np.argsort(-confList)
confList = confList[sorted_ind]
matchList = matchList[sorted_ind]
for n in range(len(confList)):
match = matchList[n]
if match:
correct += 1
AP += float(correct) / (n + 1)
if numGtCare > 0:
AP /= numGtCare
return AP
perSampleMetrics = {}
matchedSum = 0
matchedSum_iou = 0
matchedSum_tiouGt = 0
matchedSum_tiouDt = 0
matchedSum_cutGt = 0
matchedSum_coverOtherGt = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
numGlobalCareGt = 0;
numGlobalCareDet = 0;
arrGlobalConfidences = [];
arrGlobalMatches = [];
totalNumGtPols = 0
totalNumDetPols = 0
# fper_ = open('per_samle_result.txt', 'w')
for resFile in gt:
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
recall = 0
precision = 0
hmean = 0
detMatched = 0
detMatched_iou = 0
detMatched_tiouGt = 0
detMatched_tiouDt = 0
detMatched_cutGt = 0
detMatched_coverOtherGt = 0
iouMat = np.empty([1, 1])
gtPols = []
detPols = []
gtPolPoints = []
detPolPoints = []
# Array of Ground Truth Polygons' keys marked as don't Care
gtDontCarePolsNum = []
# Array of Detected Polygons' matched with a don't Care GT
detDontCarePolsNum = []
pairs = []
detMatchedNums = []
arrSampleConfidences = [];
arrSampleMatch = [];
sampleAP = 0;
evaluationLog = ""
pointsList, _, transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,
evaluationParams[
'CRLF'],
evaluationParams[
'LTRB'],
True, False)
for n in range(len(pointsList)):
points = pointsList[n]
transcription = transcriptionsList[n]
dontCare = transcription == "###"
if evaluationParams['LTRB']:
gtRect = Rectangle(*points)
gtPol = rectangle_to_polygon(gtRect)
else:
gtPol = polygon_from_points(points)
gtPols.append(gtPol)
gtPolPoints.append(points)
if dontCare:
gtDontCarePolsNum.append(len(gtPols) - 1)
evaluationLog += "GT polygons: " + str(len(gtPols)) + (
" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum) > 0 else "\n")
if resFile in subm:
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
pointsList, confidencesList, _ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,
evaluationParams[
'CRLF'],
evaluationParams[
'LTRB'],
False,
evaluationParams[
'CONFIDENCES'])
for n in range(len(pointsList)):
points = pointsList[n]
if evaluationParams['LTRB']:
detRect = Rectangle(*points)
detPol = rectangle_to_polygon(detRect)
else:
detPol = polygon_from_points(points)
detPols.append(detPol)
detPolPoints.append(points)
if len(gtDontCarePolsNum) > 0:
for dontCarePol in gtDontCarePolsNum:
dontCarePol = gtPols[dontCarePol]
intersected_area = get_intersection(dontCarePol, detPol)
pdDimensions = detPol.area()
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT']):
detDontCarePolsNum.append(len(detPols) - 1)
break
evaluationLog += "DET polygons: " + str(len(detPols)) + (
" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum) > 0 else "\n")
if len(gtPols) > 0 and len(detPols) > 0:
# Calculate IoU and precision matrixs
outputShape = [len(gtPols), len(detPols)]
iouMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtPols), np.int8)
detRectMat = np.zeros(len(detPols), np.int8)
tiouRecallMat = np.empty(outputShape)
tiouPrecisionMat = np.empty(outputShape)
tiouGtRectMat = np.zeros(len(gtPols), np.int8)
tiouDetRectMat = np.zeros(len(detPols), np.int8)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
iouMat[gtNum, detNum] = get_intersection_over_union(pD, pG)
tiouRecallMat[gtNum, detNum] = get_text_intersection_over_union_recall(pD, pG)
tiouPrecisionMat[gtNum, detNum] = get_text_intersection_over_union_precision(pD, pG, gtNum,
gtPols,
gtDontCarePolsNum)
for gtNum in range(len(gtPols)):
for detNum in | |
# coding=utf-8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under tconvert_examples_to_features_trexhe License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
import json
from collections import Counter
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
import numpy as np
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, start_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.start_id = start_id
class tacredInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, subj_special_start_id, obj_special_start_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.subj_special_start_id = subj_special_start_id
self.obj_special_start_id = obj_special_start_id
class semevalInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, e1_start_id, e2_start_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.e1_start_id = e1_start_id
self.e2_start_id = e2_start_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
with open(input_file, 'r', encoding='utf8') as f:
return json.load(f)
@classmethod
def _read_semeval_txt(clas, input_file):
with open(input_file, 'r', encoding='utf8') as f:
examples = []
example = []
for line in f:
if line.strip() == '':
examples.append(example)
example = []
else:
example.append(line.strip())
return examples
class EntityTypeProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir, dataset_type=None):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir, dataset_type):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "{}.json".format(dataset_type))), dataset_type)
def get_labels(self):
"""See base class."""
return [0, 1]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
label_list = ['entity', 'location', 'time', 'organization', 'object', 'event', 'place', 'person', 'group']
for (i, line) in enumerate(lines):
guid = i
text_a = line['sent']
text_b = (line['start'], line['end'])
label = [0 for item in range(len(label_list))]
for item in line['labels']:
label[label_list.index(item)] = 1
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
relations = ['per:siblings', 'per:parents', 'org:member_of', 'per:origin', 'per:alternate_names', 'per:date_of_death',
'per:title', 'org:alternate_names', 'per:countries_of_residence', 'org:stateorprovince_of_headquarters',
'per:city_of_death', 'per:schools_attended', 'per:employee_of', 'org:members', 'org:dissolved',
'per:date_of_birth', 'org:number_of_employees/members', 'org:founded', 'org:founded_by',
'org:political/religious_affiliation', 'org:website', 'org:top_members/employees', 'per:children',
'per:cities_of_residence', 'per:cause_of_death', 'org:shareholders', 'per:age', 'per:religion',
'no_relation',
'org:parents', 'org:subsidiaries', 'per:country_of_birth', 'per:stateorprovince_of_death',
'per:city_of_birth',
'per:stateorprovinces_of_residence', 'org:country_of_headquarters', 'per:other_family',
'per:stateorprovince_of_birth',
'per:country_of_death', 'per:charges', 'org:city_of_headquarters', 'per:spouse']
relations = ['UNCLEAR','CAPITALISTIC','CLIENT','COMPETITOR','PARTNER','TRIAL']
import pandas as pd
import re
class TACREDProcessor(DataProcessor):
def get_train_examples(self, data_dir, dataset_type, negative_sample):
"""See base class."""
return self._create_examples(pd.read_csv(data_dir+dataset_type+"_EN_bert_processed_org.tsv",sep="\t"), dataset_type, negative_sample)
def get_dev_examples(self, data_dir, dataset_type, negative_sample):
"""See base class."""
return self._create_examples(pd.read_csv(data_dir+dataset_type+"_EN_bert_processed_org.tsv",sep="\t"),dataset_type, negative_sample)
def get_labels(self):
"""See base class."""
# return ["0", "1"]
return relations
def _create_examples(self, df, dataset_type, negative_sample):
"""Creates examples for the training and dev sets."""
examples = []
for i, item in df.iterrows():
guid = i
tokens = item["sentence"]
sub = re.sub(r"\[E11\]",'',tokens)
sub = re.sub(r"\[E12\]",'',sub)
ob = re.sub(r"\[E21\]",'',tokens)
ob = re.sub(r"\[E22\]",'',ob)
text_b = ob.index('[E11]'), ob.index('[E12]') -5,sub.index('[E21]'), sub.index('[E22]') -5
tokens = re.sub(r"\[E11\]",'',tokens)
tokens = re.sub(r"\[E12\]",'',tokens)
tokens = re.sub(r"\[E21\]",'',tokens)
tokens = re.sub(r"\[E22\]",'',tokens)
text = tokens
text_a = text.rstrip()
label = item['relation']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
semeval_relations = ['Cause-Effect(e1,e2)', 'Cause-Effect(e2,e1)',
'Content-Container(e1,e2)', 'Content-Container(e2,e1)',
'Entity-Origin(e1,e2)', 'Entity-Origin(e2,e1)',
'Member-Collection(e1,e2)', 'Member-Collection(e2,e1)',
'Component-Whole(e1,e2)', 'Component-Whole(e2,e1)',
'Entity-Destination(e1,e2)', 'Entity-Destination(e2,e1)',
'Instrument-Agency(e1,e2)', 'Instrument-Agency(e2,e1)',
'Message-Topic(e1,e2)', 'Message-Topic(e2,e1)',
'Product-Producer(e1,e2)', 'Product-Producer(e2,e1)',
'Other'
]
semeval_relations_no_direction = ['Content-Container', 'Cause-Effect', 'Entity-Origin', 'Member-Collection',
'Component-Whole',
'Entity-Destination', 'Instrument-Agency', 'Other', 'Message-Topic',
'Product-Producer']
class SemEvalProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_semeval_txt(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_semeval_txt(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
# return ["0", "1"]
return semeval_relations
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
sentence = line[0].split('\t')[1][1:-1]
label = line[1]
# I have checked @ and ^ do not appear in the corpus.
sentence = sentence.replace('<e1>', '@ ').replace('</e1>', ' @').replace('<e2>', '^ ').replace('</e2>',
' ^')
guid = i
# text_a: raw text including @ and ^, after word piece, just tokens.index['@'] to get the first index
text_a = sentence
# text_b: None
text_b = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features_entity_typing(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
start, end = example.text_b[0], example.text_b[1]
sentence = example.text_a
tokens_0_start = tokenizer.tokenize(sentence[:start])
tokens_start_end = tokenizer.tokenize(sentence[start:end])
tokens_end_last = tokenizer.tokenize(sentence[end:])
tokens = [cls_token] + tokens_0_start + tokenizer.tokenize('@') + tokens_start_end + tokenizer.tokenize(
'@') + tokens_end_last + [sep_token]
start = 1 + len(tokens_0_start)
end = 1 + len(tokens_0_start) + 1 + len(tokens_start_end)
segment_ids = [sequence_a_segment_id] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = example.label
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: {}".format(label_id))
start_id = np.zeros(max_seq_length)
start_id[start] = 1
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
start_id=start_id))
return features
def convert_examples_to_features_tacred(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a | |
matched terms in this passage
score = sum(t.boost for t in f.matches)
# Favor diversity: multiply score by the number of separate
# terms matched
score *= (len(f.matched_terms) * 100) or 1
return score
# Fragment sorters
def SCORE(fragment):
"Sorts higher scored passages first."
return 1
def FIRST(fragment):
"Sorts passages from earlier in the document first."
return fragment.startchar
def LONGER(fragment):
"Sorts longer passages first."
return 0 - len(fragment)
def SHORTER(fragment):
"Sort shorter passages first."
return len(fragment)
# Formatters
def get_text(original, token, replace):
"""Convenience function for getting the text to use for a match when
formatting.
If ``replace`` is False, returns the part of ``original`` between
``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns
``token.text``.
"""
if replace:
return token.text
else:
return original[token.startchar:token.endchar]
class Formatter(object):
"""Base class for formatters.
For highlighters that return strings, it is usually only necessary to
override :meth:`Formatter.format_token`.
Use the :func:`get_text` function as a convenience to get the token text::
class MyFormatter(Formatter):
def format_token(text, token, replace=False):
ttext = get_text(text, token, replace)
return "[%s]" % ttext
"""
between = "..."
def _text(self, text):
return text
def format_token(self, text, token, replace=False):
"""Returns a formatted version of the given "token" object, which
should have at least ``startchar`` and ``endchar`` attributes, and
a ``text`` attribute if ``replace`` is True.
:param text: the original fragment text being highlighted.
:param token: an object having ``startchar`` and ``endchar`` attributes
and optionally a ``text`` attribute (if ``replace`` is True).
:param replace: if True, the original text between the token's
``startchar`` and ``endchar`` indices will be replaced with the
value of the token's ``text`` attribute.
"""
raise NotImplementedError
def format_fragment(self, fragment, replace=False):
"""Returns a formatted version of the given text, using the "token"
objects in the given :class:`Fragment`.
:param fragment: a :class:`Fragment` object representing a list of
matches in the text.
:param replace: if True, the original text corresponding to each
match will be replaced with the value of the token object's
``text`` attribute.
"""
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar is None:
continue
if t.startchar < index:
continue
if t.startchar > index:
output.append(self._text(text[index:t.startchar]))
output.append(self.format_token(text, t, replace))
index = t.endchar
output.append(self._text(text[index:fragment.endchar]))
out_string = "".join(output)
return out_string
def format(self, fragments, replace=False):
"""Returns a formatted version of the given text, using a list of
:class:`Fragment` objects.
"""
formatted = [self.format_fragment(f, replace=replace)
for f in fragments]
return self.between.join(formatted)
def __call__(self, text, fragments):
# For backwards compatibility
return self.format(fragments)
class NullFormatter(Formatter):
"""Formatter that does not modify the string.
"""
def format_token(self, text, token, replace=False):
return get_text(text, token, replace)
class UppercaseFormatter(Formatter):
"""Returns a string in which the matched terms are in UPPERCASE.
"""
def __init__(self, between="..."):
"""
:param between: the text to add between fragments.
"""
self.between = between
def format_token(self, text, token, replace=False):
ttxt = get_text(text, token, replace)
return ttxt.upper()
class HtmlFormatter(Formatter):
"""Returns a string containing HTML formatting around the matched terms.
This formatter wraps matched terms in an HTML element with two class names.
The first class name (set with the constructor argument ``classname``) is
the same for each match. The second class name (set with the constructor
argument ``termclass`` is different depending on which term matched. This
allows you to give different formatting (for example, different background
colors) to the different terms in the excerpt.
>>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
>>> hf(mytext, myfragments)
"The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
This object maintains a dictionary mapping terms to HTML class names (e.g.
``term0`` and ``term1`` above), so that multiple excerpts will use the same
class for the same term. If you want to re-use the same HtmlFormatter
object with different searches, you should call HtmlFormatter.clear()
between searches to clear the mapping.
"""
template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
def __init__(self, tagname="strong", between="...",
classname="match", termclass="term", maxclasses=5,
attrquote='"'):
"""
:param tagname: the tag to wrap around matching terms.
:param between: the text to add between fragments.
:param classname: the class name to add to the elements wrapped around
matching terms.
:param termclass: the class name prefix for the second class which is
different for each matched term.
:param maxclasses: the maximum number of term classes to produce. This
limits the number of classes you have to define in CSS by recycling
term class names. For example, if you set maxclasses to 3 and have
5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
``term2``, ``term0``, ``term1``.
"""
self.between = between
self.tagname = tagname
self.classname = classname
self.termclass = termclass
self.attrquote = attrquote
self.maxclasses = maxclasses
self.seen = {}
self.htmlclass = " ".join((self.classname, self.termclass))
def _text(self, text):
return htmlescape(text, quote=False)
def format_token(self, text, token, replace=False):
seen = self.seen
ttext = self._text(get_text(text, token, replace))
if ttext in seen:
termnum = seen[ttext]
else:
termnum = len(seen) % self.maxclasses
seen[ttext] = termnum
return self.template % {"tag": self.tagname, "q": self.attrquote,
"cls": self.htmlclass, "t": ttext,
"tn": termnum}
def clean(self):
"""Clears the dictionary mapping terms to HTML classnames.
"""
self.seen = {}
class GenshiFormatter(Formatter):
"""Returns a Genshi event stream containing HTML formatting around the
matched terms.
"""
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT # @UnresolvedImport
from genshi.core import Attrs, Stream # @UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def _add_text(self, text, output):
if output and output[-1][0] == self.TEXT:
output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
else:
output.append((self.TEXT, text, (None, -1, -1)))
def format_token(self, text, token, replace=False):
qn = self.qname
txt = get_text(text, token, replace)
return self.Stream([(self.START, (qn, self.Attrs()), (None, -1, -1)),
(self.TEXT, txt, (None, -1, -1)),
(self.END, qn, (None, -1, -1))])
def format_fragment(self, fragment, replace=False):
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar > index:
self._add_text(text[index:t.startchar], output)
output.append((text, t, replace))
index = t.endchar
if index < len(text):
self._add_text(text[index:], output)
return self.Stream(output)
def format(self, fragments, replace=False):
output = []
first = True
for fragment in fragments:
if not first:
self._add_text(self.between, output)
output += self.format_fragment(fragment, replace=replace)
first = False
return self.Stream(output)
# Highlighting
def top_fragments(fragments, count, scorer, order, minscore=1):
scored_fragments = ((scorer(f), f) for f in fragments)
scored_fragments = nlargest(count, scored_fragments)
best_fragments = [sf for score, sf in scored_fragments if score >= minscore]
best_fragments.sort(key=order)
return best_fragments
def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
scorer=None, minscore=1, order=FIRST, mode="query"):
if scorer is None:
scorer = BasicFragmentScorer()
if type(fragmenter) is type:
fragmenter = fragmenter()
if type(formatter) is type:
formatter = formatter()
if type(scorer) is type:
scorer = scorer()
if scorer is None:
scorer = BasicFragmentScorer()
termset = frozenset(terms)
tokens = analyzer(text, chars=True, mode=mode, removestops=False)
tokens = set_matched_filter(tokens, termset)
fragments = fragmenter.fragment_tokens(text, tokens)
fragments = top_fragments(fragments, top, scorer, order, minscore)
return formatter(text, fragments)
class Highlighter(object):
def __init__(self, fragmenter=None, scorer=None, formatter=None,
always_retokenize=False, order=FIRST):
self.fragmenter = fragmenter or ContextFragmenter()
self.scorer = scorer or BasicFragmentScorer()
self.formatter = formatter or HtmlFormatter(tagname="b")
self.order = order
self.always_retokenize = always_retokenize
def can_load_chars(self, results, fieldname):
# Is it possible to build a mapping between the matched terms/docs and
# their start and end chars for "pinpoint" highlighting (ie not require
# re-tokenizing text)?
if self.always_retokenize:
# No, we've been configured to always retokenize some text
return False
if not results.has_matched_terms():
# No, we don't know what the matched terms are yet
return False
if self.fragmenter.must_retokenize():
# No, the configured fragmenter doesn't support it
return False
# Maybe, if the field was configured to store characters
field = results.searcher.schema[fieldname]
return field.supports("characters")
@staticmethod
def _load_chars(results, fieldname, texts, to_bytes):
# For each docnum, create a mapping of text -> [(startchar, endchar)]
# for the matched terms
results._char_cache[fieldname] = cache = {}
sorted_ids = sorted(docnum for _, docnum in results.top_n)
for docnum in sorted_ids:
cache[docnum] = {}
for text in texts:
btext = to_bytes(text)
m = results.searcher.postings(fieldname, btext)
docset = set(results.termdocs[(fieldname, btext)])
for docnum in sorted_ids:
if docnum in docset:
m.skip_to(docnum)
assert m.id() == | |
<filename>tests/asp/gringo/modelchecker.035.test.py<gh_stars>10-100
input = """
% This used to generate incorrect results (models were missing) and was
% provided by the Potsdam group.
p14|p6|p6|p24:-not p14,p23.
p14|p6|p6|p24:-p22,p23.
p14|p6|p6|p7:-not p14,p23.
p14|p6|p6|p7:-p22,p23.
p14|p6|p9|p24:-not p14,p23.
p14|p6|p9|p24:-p22,p23.
p14|p6|p9|p7:-not p14,p23.
p14|p6|p9|p7:-p22,p23.
p14|p11|p6|p24:-not p14,p23.
p14|p11|p6|p24:-p22,p23.
p14|p11|p6|p7:-not p14,p23.
p14|p11|p6|p7:-p22,p23.
p14|p11|p9|p24:-not p14,p23.
p14|p11|p9|p24:-p22,p23.
p14|p11|p9|p7:-not p14,p23.
p14|p11|p9|p7:-p22,p23.
p2|p6|p6|p24:-not p14,p23.
p2|p6|p6|p24:-p22,p23.
p2|p6|p6|p7:-not p14,p23.
p2|p6|p6|p7:-p22,p23.
p2|p6|p9|p24:-not p14,p23.
p2|p6|p9|p24:-p22,p23.
p2|p6|p9|p7:-not p14,p23.
p2|p6|p9|p7:-p22,p23.
p2|p11|p6|p24:-not p14,p23.
p2|p11|p6|p24:-p22,p23.
p2|p11|p6|p7:-not p14,p23.
p2|p11|p6|p7:-p22,p23.
p2|p11|p9|p24:-not p14,p23.
p2|p11|p9|p24:-p22,p23.
p2|p11|p9|p7:-not p14,p23.
p2|p11|p9|p7:-p22,p23.
p22|not_p23|p15|p5|not_p8:-p16.
p22|not_p23|p15|p5|not_p8:-p5.
p22|not_p23|p15|p9|not_p8:-p16.
p22|not_p23|p15|p9|not_p8:-p5.
p22|not_p23|p16|p5|not_p8:-p16.
p22|not_p23|p16|p5|not_p8:-p5.
p22|not_p23|p16|p9|not_p8:-p16.
p22|not_p23|p16|p9|not_p8:-p5.
p22|p24|p15|p5|not_p8:-p16.
p22|p24|p15|p5|not_p8:-p5.
p22|p24|p15|p9|not_p8:-p16.
p22|p24|p15|p9|not_p8:-p5.
p22|p24|p16|p5|not_p8:-p16.
p22|p24|p16|p5|not_p8:-p5.
p22|p24|p16|p9|not_p8:-p16.
p22|p24|p16|p9|not_p8:-p5.
not_p6|not_p23|p15|p5|not_p8:-p16.
not_p6|not_p23|p15|p5|not_p8:-p5.
not_p6|not_p23|p15|p9|not_p8:-p16.
not_p6|not_p23|p15|p9|not_p8:-p5.
not_p6|not_p23|p16|p5|not_p8:-p16.
not_p6|not_p23|p16|p5|not_p8:-p5.
not_p6|not_p23|p16|p9|not_p8:-p16.
not_p6|not_p23|p16|p9|not_p8:-p5.
not_p6|p24|p15|p5|not_p8:-p16.
not_p6|p24|p15|p5|not_p8:-p5.
not_p6|p24|p15|p9|not_p8:-p16.
not_p6|p24|p15|p9|not_p8:-p5.
not_p6|p24|p16|p5|not_p8:-p16.
not_p6|p24|p16|p5|not_p8:-p5.
not_p6|p24|p16|p9|not_p8:-p16.
not_p6|p24|p16|p9|not_p8:-p5.
not_p20|p11|p5|not_p5:-not p13.
not_p20|p11|p5:-not p13.
not_p20|p11|p5|p21|not_p5:-not p13.
not_p20|p11|p5|p21:-not p13.
not_p20|p11|p4|p5|not_p5:-not p13.
not_p20|p11|p4|p5:-not p13.
not_p20|p11|p4|p21|not_p5:-not p13.
not_p20|p11|p4|p21:-not p13.
not_p20|p19|p5|not_p5:-not p13.
not_p20|p19|p5:-not p13.
not_p20|p19|p5|p21|not_p5:-not p13.
not_p20|p19|p5|p21:-not p13.
not_p20|p19|p4|p5|not_p5:-not p13.
not_p20|p19|p4|p5:-not p13.
not_p20|p19|p4|p21|not_p5:-not p13.
not_p20|p19|p4|p21:-not p13.
p11|p5|not_p5:-not p13,not p15.
p11|p5:-not p13,not p15.
p11|p5|p21|not_p5:-not p13,not p15.
p11|p5|p21:-not p13,not p15.
p11|p4|p5|not_p5:-not p13,not p15.
p11|p4|p5:-not p13,not p15.
p11|p4|p21|not_p5:-not p13,not p15.
p11|p4|p21:-not p13,not p15.
p19|p5|not_p5:-not p13,not p15.
p19|p5:-not p13,not p15.
p19|p5|p21|not_p5:-not p13,not p15.
p19|p5|p21:-not p13,not p15.
p19|p4|p5|not_p5:-not p13,not p15.
p19|p4|p5:-not p13,not p15.
p19|p4|p21|not_p5:-not p13,not p15.
p19|p4|p21:-not p13,not p15.
p15|not_p3|p21:-p1,p23.
p15|not_p3|p21:-p8,p23.
p15|not_p3|p21:-p1,p23,not p16.
p15|not_p3|p21:-p8,p23,not p16.
p15|p16|p21:-p1,p23.
p15|p16|p21:-p8,p23.
p15|p16|p21:-p1,p23,not p16.
p15|p16|p21:-p8,p23,not p16.
p2|not_p3|p21:-p1,p23.
p2|not_p3|p21:-p8,p23.
p2|not_p3|p21:-p1,p23,not p16.
p2|not_p3|p21:-p8,p23,not p16.
p2|p16|p21:-p1,p23.
p2|p16|p21:-p8,p23.
p2|p16|p21:-p1,p23,not p16.
p2|p16|p21:-p8,p23,not p16.
p8|p5|p10|not_p19:-p23,p6.
p8|p5|p10|not_p19:-p11,p6.
p8|p5|p10|not_p4:-p23,p6.
p8|p5|p10|not_p4:-p11,p6.
p8|p5|p4|not_p19:-p23,p6.
p8|p5|p4|not_p19:-p11,p6.
p8|p5|p4|not_p4:-p23,p6.
p8|p5|p4|not_p4:-p11,p6.
p8|p9|p10|not_p19:-p23,p6.
p8|p9|p10|not_p19:-p11,p6.
p8|p9|p10|not_p4:-p23,p6.
p8|p9|p10|not_p4:-p11,p6.
p8|p9|p4|not_p19:-p23,p6.
p8|p9|p4|not_p19:-p11,p6.
p8|p9|p4|not_p4:-p23,p6.
p8|p9|p4|not_p4:-p11,p6.
p18|p5|p10|not_p19:-p23,p6.
p18|p5|p10|not_p19:-p11,p6.
p18|p5|p10|not_p4:-p23,p6.
p18|p5|p10|not_p4:-p11,p6.
p18|p5|p4|not_p19:-p23,p6.
p18|p5|p4|not_p19:-p11,p6.
p18|p5|p4|not_p4:-p23,p6.
p18|p5|p4|not_p4:-p11,p6.
p18|p9|p10|not_p19:-p23,p6.
p18|p9|p10|not_p19:-p11,p6.
p18|p9|p10|not_p4:-p23,p6.
p18|p9|p10|not_p4:-p11,p6.
p18|p9|p4|not_p19:-p23,p6.
p18|p9|p4|not_p19:-p11,p6.
p18|p9|p4|not_p4:-p23,p6.
p18|p9|p4|not_p4:-p11,p6.
p18|p5|not_p1|p9:-p1,p12.
p18|p5|not_p1|p9:-p16,p12.
p18|p5|not_p1|p4:-p1,p12.
p18|p5|not_p1|p4:-p16,p12.
p18|p5|not_p24|p9:-p1,p12.
p18|p5|not_p24|p9:-p16,p12.
p18|p5|not_p24|p4:-p1,p12.
p18|p5|not_p24|p4:-p16,p12.
p18|not_p1|p9:-p1,p12,not p11.
p18|not_p1|p9:-p16,p12,not p11.
p18|not_p1|p4:-p1,p12,not p11.
p18|not_p1|p4:-p16,p12,not p11.
p18|not_p24|p9:-p1,p12,not p11.
p18|not_p24|p9:-p16,p12,not p11.
p18|not_p24|p4:-p1,p12,not p11.
p18|not_p24|p4:-p16,p12,not p11.
not_p2|p5|not_p1|p9:-p1,p12.
not_p2|p5|not_p1|p9:-p16,p12.
not_p2|p5|not_p1|p4:-p1,p12.
not_p2|p5|not_p1|p4:-p16,p12.
not_p2|p5|not_p24|p9:-p1,p12.
not_p2|p5|not_p24|p9:-p16,p12.
not_p2|p5|not_p24|p4:-p1,p12.
not_p2|p5|not_p24|p4:-p16,p12.
not_p2|not_p1|p9:-p1,p12,not p11.
not_p2|not_p1|p9:-p16,p12,not p11.
not_p2|not_p1|p4:-p1,p12,not p11.
not_p2|not_p1|p4:-p16,p12,not p11.
not_p2|not_p24|p9:-p1,p12,not p11.
not_p2|not_p24|p9:-p16,p12,not p11.
not_p2|not_p24|p4:-p1,p12,not p11.
not_p2|not_p24|p4:-p16,p12,not p11.
p12|not_p15|p16|p17:-p6,p1.
p12|not_p15|p16|p17:-p9,p1.
p12|not_p15|p16|p18:-p6,p1.
p12|not_p15|p16|p18:-p9,p1.
p12|not_p15|p7|p17:-p6,p1.
p12|not_p15|p7|p17:-p9,p1.
p12|not_p15|p7|p18:-p6,p1.
p12|not_p15|p7|p18:-p9,p1.
p12|p4|p16|p17:-p6,p1.
p12|p4|p16|p17:-p9,p1.
p12|p4|p16|p18:-p6,p1.
p12|p4|p16|p18:-p9,p1.
p12|p4|p7|p17:-p6,p1.
p12|p4|p7|p17:-p9,p1.
p12|p4|p7|p18:-p6,p1.
p12|p4|p7|p18:-p9,p1.
p6|not_p15|p16|p17:-p6,p1.
p6|not_p15|p16|p17:-p9,p1.
p6|not_p15|p16|p18:-p6,p1.
p6|not_p15|p16|p18:-p9,p1.
p6|not_p15|p7|p17:-p6,p1.
p6|not_p15|p7|p17:-p9,p1.
p6|not_p15|p7|p18:-p6,p1.
p6|not_p15|p7|p18:-p9,p1.
p6|p4|p16|p17:-p6,p1.
p6|p4|p16|p17:-p9,p1.
p6|p4|p16|p18:-p6,p1.
p6|p4|p16|p18:-p9,p1.
p6|p4|p7|p17:-p6,p1.
p6|p4|p7|p17:-p9,p1.
p6|p4|p7|p18:-p6,p1.
p6|p4|p7|p18:-p9,p1.
p1|p12|p2|not_p16:-not p23,not p3.
p1|p12|p2:-p19,not p23,not p3.
p1|p12|not_p16:-not p23,not p3,not p11.
p1|p12:-p19,not p23,not p3,not p11.
p1|p7|p2|not_p16:-not p23,not p3.
p1|p7|p2:-p19,not p23,not p3.
p1|p7|not_p16:-not p23,not p3,not p11.
p1|p7:-p19,not p23,not p3,not p11.
p6|p12|p2|not_p16:-not p23,not p3.
p6|p12|p2:-p19,not p23,not p3.
p6|p12|not_p16:-not p23,not p3,not p11.
p6|p12:-p19,not p23,not p3,not p11.
p6|p7|p2|not_p16:-not p23,not p3.
p6|p7|p2:-p19,not p23,not p3.
p6|p7|not_p16:-not p23,not p3,not p11.
p6|p7:-p19,not p23,not p3,not p11.
p19|p1|p12|p2|not_p16:-not p23.
p19|p1|p12|p2:-p19,not p23.
p19|p1|p12|not_p16:-not p23,not p11.
p19|p1|p12:-p19,not p23,not p11.
p19|p1|p7|p2|not_p16:-not p23.
p19|p1|p7|p2:-p19,not p23.
p19|p1|p7|not_p16:-not p23,not p11.
p19|p1|p7:-p19,not p23,not p11.
p19|p6|p12|p2|not_p16:-not p23.
p19|p6|p12|p2:-p19,not p23.
p19|p6|p12|not_p16:-not p23,not p11.
p19|p6|p12:-p19,not p23,not p11.
p19|p6|p7|p2|not_p16:-not p23.
p19|p6|p7|p2:-p19,not p23.
p19|p6|p7|not_p16:-not p23,not p11.
p19|p6|p7:-p19,not p23,not p11.
not_p19|not_p9|not_p25|p13:-p11,p21.
not_p19|not_p9|not_p25|p13:-p13,p21.
not_p19|not_p9|not_p25|not_p9:-p11,p21.
not_p19|not_p9|not_p25|not_p9:-p13,p21.
not_p19|not_p9|not_p3|p13:-p11,p21.
not_p19|not_p9|not_p3|p13:-p13,p21.
not_p19|not_p9|not_p3|not_p9:-p11,p21.
not_p19|not_p9|not_p3|not_p9:-p13,p21.
not_p19|p22|not_p25|p13:-p11,p21.
not_p19|p22|not_p25|p13:-p13,p21.
not_p19|p22|not_p25|not_p9:-p11,p21.
not_p19|p22|not_p25|not_p9:-p13,p21.
not_p19|p22|not_p3|p13:-p11,p21.
not_p19|p22|not_p3|p13:-p13,p21.
not_p19|p22|not_p3|not_p9:-p11,p21.
not_p19|p22|not_p3|not_p9:-p13,p21.
p16|not_p9|not_p25|p13:-p11,p21.
p16|not_p9|not_p25|p13:-p13,p21.
p16|not_p9|not_p25|not_p9:-p11,p21.
p16|not_p9|not_p25|not_p9:-p13,p21.
p16|not_p9|not_p3|p13:-p11,p21.
p16|not_p9|not_p3|p13:-p13,p21.
p16|not_p9|not_p3|not_p9:-p11,p21.
p16|not_p9|not_p3|not_p9:-p13,p21.
p16|p22|not_p25|p13:-p11,p21.
p16|p22|not_p25|p13:-p13,p21.
p16|p22|not_p25|not_p9:-p11,p21.
p16|p22|not_p25|not_p9:-p13,p21.
p16|p22|not_p3|p13:-p11,p21.
p16|p22|not_p3|p13:-p13,p21.
p16|p22|not_p3|not_p9:-p11,p21.
p16|p22|not_p3|not_p9:-p13,p21.
p4|p23|p21|not_p25|not_p20:-not p24.
p4|p23|p21|not_p25:-p25,not p24.
p4|p23|p21|p13|not_p20:-not p24.
p4|p23|p21|p13:-p25,not p24.
p4|p23|p2|not_p25|not_p20:-not p24.
p4|p23|p2|not_p25:-p25,not p24.
p4|p23|p2|p13|not_p20:-not p24.
p4|p23|p2|p13:-p25,not p24.
p4|p21|p21|not_p25|not_p20:-not p24.
p4|p21|p21|not_p25:-p25,not p24.
p4|p21|p21|p13|not_p20:-not p24.
p4|p21|p21|p13:-p25,not p24.
p4|p21|p2|not_p25|not_p20:-not p24.
p4|p21|p2|not_p25:-p25,not p24.
p4|p21|p2|p13|not_p20:-not p24.
p4|p21|p2|p13:-p25,not p24.
not_p18|p23|p21|not_p25|not_p20:-not p24.
not_p18|p23|p21|not_p25:-p25,not p24.
not_p18|p23|p21|p13|not_p20:-not p24.
not_p18|p23|p21|p13:-p25,not p24.
not_p18|p23|p2|not_p25|not_p20:-not p24.
not_p18|p23|p2|not_p25:-p25,not p24.
not_p18|p23|p2|p13|not_p20:-not p24.
not_p18|p23|p2|p13:-p25,not p24.
not_p18|p21|p21|not_p25|not_p20:-not p24.
not_p18|p21|p21|not_p25:-p25,not p24.
not_p18|p21|p21|p13|not_p20:-not p24.
not_p18|p21|p21|p13:-p25,not p24.
not_p18|p21|p2|not_p25|not_p20:-not p24.
not_p18|p21|p2|not_p25:-p25,not p24.
not_p18|p21|p2|p13|not_p20:-not p24.
not_p18|p21|p2|p13:-p25,not p24.
not_p12|p14|p15|not_p18:-p6,p24.
not_p12|p14|p15|not_p18:-not p5,p24.
not_p12|p14|p15|p11:-p6,p24.
not_p12|p14|p15|p11:-not p5,p24.
not_p12|p14|p2|not_p18:-p6,p24.
not_p12|p14|p2|not_p18:-not p5,p24.
not_p12|p14|p2|p11:-p6,p24.
not_p12|p14|p2|p11:-not p5,p24.
not_p12|p15|p15|not_p18:-p6,p24.
not_p12|p15|p15|not_p18:-not p5,p24.
not_p12|p15|p15|p11:-p6,p24.
not_p12|p15|p15|p11:-not p5,p24.
not_p12|p15|p2|not_p18:-p6,p24.
not_p12|p15|p2|not_p18:-not p5,p24.
not_p12|p15|p2|p11:-p6,p24.
not_p12|p15|p2|p11:-not p5,p24.
p1|p14|p15|not_p18:-p6,p24.
p1|p14|p15|not_p18:-not p5,p24.
p1|p14|p15|p11:-p6,p24.
p1|p14|p15|p11:-not p5,p24.
p1|p14|p2|not_p18:-p6,p24.
p1|p14|p2|not_p18:-not p5,p24.
p1|p14|p2|p11:-p6,p24.
p1|p14|p2|p11:-not p5,p24.
p1|p15|p15|not_p18:-p6,p24.
p1|p15|p15|not_p18:-not p5,p24.
p1|p15|p15|p11:-p6,p24.
p1|p15|p15|p11:-not p5,p24.
p1|p15|p2|not_p18:-p6,p24.
p1|p15|p2|not_p18:-not p5,p24.
p1|p15|p2|p11:-p6,p24.
p1|p15|p2|p11:-not p5,p24.
p11|p24|p8|not_p4:-p24,p2.
p11|p24|p8|not_p4:-p7,p2.
p11|p24|p8|not_p15:-p24,p2.
p11|p24|p8|not_p15:-p7,p2.
p11|p24|not_p1|not_p4:-p24,p2.
p11|p24|not_p1|not_p4:-p7,p2.
p11|p24|not_p1|not_p15:-p24,p2.
p11|p24|not_p1|not_p15:-p7,p2.
p11|not_p20|p8|not_p4:-p24,p2.
p11|not_p20|p8|not_p4:-p7,p2.
p11|not_p20|p8|not_p15:-p24,p2.
p11|not_p20|p8|not_p15:-p7,p2.
p11|not_p20|not_p1|not_p4:-p24,p2.
p11|not_p20|not_p1|not_p4:-p7,p2.
p11|not_p20|not_p1|not_p15:-p24,p2.
p11|not_p20|not_p1|not_p15:-p7,p2.
p24|p8|not_p4:-p24,p2,not p25.
p24|p8|not_p4:-p7,p2,not p25.
p24|p8|not_p15:-p24,p2,not p25.
p24|p8|not_p15:-p7,p2,not p25.
p24|not_p1|not_p4:-p24,p2,not p25.
p24|not_p1|not_p4:-p7,p2,not p25.
p24|not_p1|not_p15:-p24,p2,not p25.
p24|not_p1|not_p15:-p7,p2,not p25.
not_p20|p8|not_p4:-p24,p2,not p25.
not_p20|p8|not_p4:-p7,p2,not p25.
not_p20|p8|not_p15:-p24,p2,not p25.
not_p20|p8|not_p15:-p7,p2,not p25.
not_p20|not_p1|not_p4:-p24,p2,not p25.
not_p20|not_p1|not_p4:-p7,p2,not p25.
not_p20|not_p1|not_p15:-p24,p2,not p25.
not_p20|not_p1|not_p15:-p7,p2,not p25.
p24|p9|p6|p10:-p19,p23.
p24|p9|p6|p10:-p12,p23.
p24|p9|p6|not_p22:-p19,p23.
p24|p9|p6|not_p22:-p12,p23.
p24|p9|p24|p10:-p19,p23.
p24|p9|p24|p10:-p12,p23.
p24|p9|p24|not_p22:-p19,p23.
p24|p9|p24|not_p22:-p12,p23.
p24|not_p8|p6|p10:-p19,p23.
p24|not_p8|p6|p10:-p12,p23.
p24|not_p8|p6|not_p22:-p19,p23.
p24|not_p8|p6|not_p22:-p12,p23.
p24|not_p8|p24|p10:-p19,p23.
p24|not_p8|p24|p10:-p12,p23.
p24|not_p8|p24|not_p22:-p19,p23.
p24|not_p8|p24|not_p22:-p12,p23.
p6|p9|p6|p10:-p19,p23.
p6|p9|p6|p10:-p12,p23.
p6|p9|p6|not_p22:-p19,p23.
p6|p9|p6|not_p22:-p12,p23.
p6|p9|p24|p10:-p19,p23.
p6|p9|p24|p10:-p12,p23.
p6|p9|p24|not_p22:-p19,p23.
p6|p9|p24|not_p22:-p12,p23.
p6|not_p8|p6|p10:-p19,p23.
p6|not_p8|p6|p10:-p12,p23.
p6|not_p8|p6|not_p22:-p19,p23.
p6|not_p8|p6|not_p22:-p12,p23.
p6|not_p8|p24|p10:-p19,p23.
p6|not_p8|p24|p10:-p12,p23.
p6|not_p8|p24|not_p22:-p19,p23.
p6|not_p8|p24|not_p22:-p12,p23.
p24|p10|not_p18|not_p20:-p9,not p21.
p24|p10|not_p18|not_p20:-p19,not p21.
p24|p10|not_p18|not_p21:-p9,not p21.
p24|p10|not_p18|not_p21:-p19,not p21.
p24|p10|not_p7|not_p20:-p9,not p21.
p24|p10|not_p7|not_p20:-p19,not p21.
p24|p10|not_p7|not_p21:-p9,not p21.
p24|p10|not_p7|not_p21:-p19,not p21.
p24|p3|not_p18|not_p20:-p9,not p21.
p24|p3|not_p18|not_p20:-p19,not p21.
p24|p3|not_p18|not_p21:-p9,not p21.
p24|p3|not_p18|not_p21:-p19,not p21.
p24|p3|not_p7|not_p20:-p9,not p21.
p24|p3|not_p7|not_p20:-p19,not p21.
p24|p3|not_p7|not_p21:-p9,not p21.
p24|p3|not_p7|not_p21:-p19,not p21.
p9|p10|not_p18|not_p20:-p9,not p21.
p9|p10|not_p18|not_p20:-p19,not p21.
p9|p10|not_p18|not_p21:-p9,not p21.
p9|p10|not_p18|not_p21:-p19,not p21.
p9|p10|not_p7|not_p20:-p9,not p21.
p9|p10|not_p7|not_p20:-p19,not p21.
p9|p10|not_p7|not_p21:-p9,not p21.
p9|p10|not_p7|not_p21:-p19,not p21.
p9|p3|not_p18|not_p20:-p9,not p21.
p9|p3|not_p18|not_p20:-p19,not p21.
p9|p3|not_p18|not_p21:-p9,not p21.
p9|p3|not_p18|not_p21:-p19,not p21.
p9|p3|not_p7|not_p20:-p9,not p21.
p9|p3|not_p7|not_p20:-p19,not p21.
p9|p3|not_p7|not_p21:-p9,not p21.
p9|p3|not_p7|not_p21:-p19,not p21.
p18|not_p23|p11|p1:-p6,p23.
p18|not_p23|p11|p1:-p5,p23.
p18|not_p23|p11|not_p20:-p6,p23.
p18|not_p23|p11|not_p20:-p5,p23.
p18|not_p23|p21|p1:-p6,p23.
p18|not_p23|p21|p1:-p5,p23.
p18|not_p23|p21|not_p20:-p6,p23.
p18|not_p23|p21|not_p20:-p5,p23.
p18|p11|p1:-p6,p23,not p4.
p18|p11|p1:-p5,p23,not p4.
p18|p11|not_p20:-p6,p23,not p4.
p18|p11|not_p20:-p5,p23,not p4.
p18|p21|p1:-p6,p23,not p4.
p18|p21|p1:-p5,p23,not p4.
p18|p21|not_p20:-p6,p23,not p4.
p18|p21|not_p20:-p5,p23,not p4.
p3|p11|not_p20:-p5,p23,not p4.
p3|p21|p1:-p6,p23,not p4.
p3|p21|p1:-p5,p23,not p4.
p3|p21|not_p20:-p6,p23,not p4.
p3|p21|not_p20:-p5,p23,not p4.
p3|p20|not_p20|p3:-p13,not p22.
p3|p20|not_p20|p3:-p1,not p22.
p3|p20|not_p20|p18:-p13,not p22.
p3|p20|not_p20|p18:-p1,not p22.
p3|p20|p18|p3:-p13,not p22.
p3|p20|p18|p3:-p1,not p22.
p3|p20|p18:-p13,not p22.
p3|p20|p18:-p1,not p22.
p3|not_p20|p3:-p13,not p22,not p14.
p3|not_p20|p3:-p1,not p22,not p14.
p3|not_p20|p18:-p13,not p22,not p14.
p3|not_p20|p18:-p1,not p22,not p14.
p3|p18|p3:-p13,not p22,not p14.
p3|p18|p3:-p1,not p22,not p14.
p3|p18:-p13,not p22,not p14.
p3|p18:-p1,not p22,not p14.
not_p21|p20|not_p20|p3:-p13,not p22.
not_p21|p20|not_p20|p3:-p1,not p22.
not_p21|p20|not_p20|p18:-p13,not p22.
not_p21|p20|not_p20|p18:-p1,not p22.
not_p21|p20|p18|p3:-p13,not p22.
not_p21|p20|p18|p3:-p1,not p22.
not_p21|p20|p18:-p13,not p22.
not_p21|p20|p18:-p1,not p22.
not_p21|not_p20|p3:-p13,not p22,not p14.
not_p21|not_p20|p3:-p1,not p22,not p14.
not_p21|not_p20|p18:-p13,not p22,not p14.
not_p21|not_p20|p18:-p1,not p22,not p14.
not_p21|p18|p3:-p13,not p22,not p14.
not_p21|p18|p3:-p1,not p22,not p14.
not_p21|p18:-p13,not p22,not p14.
not_p21|p18:-p1,not p22,not p14.
p5|p12|not_p20:-p17,not p7.
p5|p12|not_p20:-p14,not p7.
p5|p12|p25|not_p20:-p17,not p7.
p5|p12|p25|not_p20:-p14,not p7.
p5|p11|p12|not_p20:-p17,not p7.
p5|p11|p12|not_p20:-p14,not p7.
p5|p11|p25|not_p20:-p17,not p7.
p5|p11|p25|not_p20:-p14,not p7.
p22|p12|not_p20:-p17,not p7.
p22|p12|not_p20:-p14,not p7.
p22|p12|p25|not_p20:-p17,not p7.
p22|p12|p25|not_p20:-p14,not p7.
p22|p11|p12|not_p20:-p17,not p7.
p22|p11|p12|not_p20:-p14,not p7.
p22|p11|p25|not_p20:-p17,not p7.
p22|p11|p25|not_p20:-p14,not p7.
p21|p5|p12|not_p20:-p17.
p21|p5|p12|not_p20:-p14.
p21|p5|p12|p25|not_p20:-p17.
p21|p5|p12|p25|not_p20:-p14.
p21|p5|p11|p12|not_p20:-p17.
p21|p5|p11|p12|not_p20:-p14.
p21|p5|p11|p25|not_p20:-p17.
p21|p5|p11|p25|not_p20:-p14.
p21|p22|p12|not_p20:-p17.
p21|p22|p12|not_p20:-p14.
p21|p22|p12|p25|not_p20:-p17.
p21|p22|p12|p25|not_p20:-p14.
p21|p22|p11|p12|not_p20:-p17.
p21|p22|p11|p12|not_p20:-p14.
p21|p22|p11|p25|not_p20:-p17.
p21|p22|p11|p25|not_p20:-p14.
not_p23|not_p7|p21|p5:-not p24,not p3.
not_p23|not_p7|p21|p5:-p13,not p3.
not_p23|not_p7|p21|p11:-not p24,not p3.
not_p23|not_p7|p21|p11:-p13,not p3.
not_p23|not_p7|not_p17|p5:-not p24,not p3.
not_p23|not_p7|not_p17|p5:-p13,not p3.
not_p23|not_p7|not_p17|p11:-not p24,not p3.
not_p23|not_p7|not_p17|p11:-p13,not p3.
not_p23|p7|p21|p5:-not p24,not p3.
not_p23|p7|p21|p5:-p13,not p3.
not_p23|p7|p21|p11:-not p24,not p3.
not_p23|p7|p21|p11:-p13,not p3.
not_p23|p7|not_p17|p5:-not p24,not p3.
not_p23|p7|not_p17|p5:-p13,not p3.
p15|p3|p1:-p19,p5,not p3.
p15|p3|p6:-p12,p5,not p3.
p15|p3|p6:-p19,p5,not p3.
p15|p1:-p12,p5,not p3,not p7.
p15|p1:-p19,p5,not p3,not p7.
p15|p6:-p12,p5,not p3,not p7.
p15|p6:-p19,p5,not p3,not p7.
p15|p4|p3|p1:-p12,p5.
p15|p4|p3|p1:-p19,p5.
p15|p4|p3|p6:-p12,p5.
p15|p4|p3|p6:-p19,p5.
p15|p4|p1:-p12,p5,not p7.
p15|p4|p1:-p19,p5,not p7.
p15|p4|p6:-p12,p5,not p7.
p15|p4|p6:-p19,p5,not p7.
p11|p8|p4|p16.
p11|p8|p4|p18.
p11|p8|not_p23|p16.
p11|p8|not_p23|p18.
p11|p1|p4|p16.
p11|p1|p4|p18.
p11|p1|not_p23|p16.
p11|p1|not_p23|p18.
p15|p8|p4|p16.
p15|p8|p4|p18.
p15|p8|not_p23|p16.
p15|p8|not_p23|p18.
p15|p1|p4|p16.
p15|p1|p4|p18.
p15|p1|not_p23|p16.
p15|p1|not_p23|p18.
p17|p6|p8|p25.
p17|p6|p8|not_p8.
p17|p6|not_p19|p25.
p17|p6|not_p19|not_p8.
p17|p12|p8|p25.
p17|p12|p8|not_p8.
p17|p12|not_p19|p25.
p17|p12|not_p19|not_p8.
p14|p6|p8|p25.
p14|p6|p8|not_p8.
p14|p6|not_p19|p25.
p14|p6|not_p19|not_p8.
p14|p12|p8|p25.
p14|p12|p8|not_p8.
p14|p12|not_p19|p25.
p14|p12|not_p19|not_p8.
p15|p14|p24|not_p3.
p15|p14|p24|p3.
p15|p14|p9|not_p3.
p15|p14|p9|p3.
p15|p11|p24|not_p3.
p15|p11|p24|p3.
p15|p11|p9|not_p3.
p15|p11|p9|p3.
not_p25|p14|p24|not_p3.
not_p25|p14|p24|p3.
not_p25|p14|p9|not_p3.
not_p25|p14|p9|p3.
not_p25|p11|p24|not_p3.
not_p25|p11|p24|p3.
not_p25|p11|p9|not_p3.
not_p25|p11|p9|p3.
p15|p2|p23|not_p12.
p15|p2|p23|p15.
p15|p2|p24|not_p12.
p15|p2|p24|p15.
p15|not_p22|p23|not_p12.
p15|not_p22|p23|p15.
p15|not_p22|p24|not_p12.
p15|not_p22|p24|p15.
p2|p23|not_p12:-not p23.
p2|p23|p15:-not p23.
p2|p24|not_p12:-not p23.
p2|p24|p15:-not p23.
not_p22|p23|not_p12:-not p23.
not_p22|p23|p15:-not p23.
not_p22|p24|not_p12:-not p23.
not_p22|p24|p15:-not p23.
not_p5|p15|not_p19|p22.
not_p5|p15|not_p19|not_p12.
not_p5|p15|p22.
not_p5|p15|p22|not_p12.
not_p5|not_p5|not_p19|p22.
not_p5|not_p5|not_p19|not_p12.
not_p5|not_p5|p22.
not_p5|not_p5|p22|not_p12.
not_p21|p15|not_p19|p22.
not_p21|p15|not_p19|not_p12.
not_p21|p15|p22.
not_p21|p15|p22|not_p12.
not_p21|not_p5|not_p19|p22.
not_p21|not_p5|not_p19|not_p12.
not_p21|not_p5|p22.
not_p21|not_p5|p22|not_p12.
not_p6:-not p6.
:-p6,not_p6.
not_p2:-not p2.
:-p2,not_p2.
not_p24:-not p24.
:-p24,not_p24.
not_p16:-not p16.
:-p16,not_p16.
not_p9:-not p9.
:-p9,not_p9.
not_p4:-not p4.
:-p4,not_p4.
not_p1:-not p1.
:-p1,not_p1.
not_p15:-not p15.
:-p15,not_p15.
not_p18:-not p18.
:-p18,not_p18.
not_p7:-not p7.
:-p7,not_p7.
not_p17:-not p17.
:-p17,not_p17.
not_p14:-not p14.
:-p14,not_p14.
not_p20:-not p20.
:-p20,not_p20.
not_p23:-not p23.
:-p23,not_p23.
not_p8:-not p8.
:-p8,not_p8.
not_p3:-not p3.
:-p3,not_p3.
not_p25:-not p25.
:-p25,not_p25.
not_p22:-not p22.
:-p22,not_p22.
not_p19:-not p19.
:-p19,not_p19.
not_p21:-not p21.
:-p21,not_p21.
not_p5:-not p5.
:-p5,not_p5.
not_p12:-not p12.
:-p12,not_p12.
"""
output = """
{p7, p2, not_p23, p15, p16, not_p6, not_p20, not_p5, not_p3, p8, not_p19, not_p4, p18, not_p1, p12, not_p24, not_p9, not_p25, not_p22, not_p21, not_p17, not_p14}
{p6, p7, not_p23, p15, p16, not_p20, not_p5, not_p3, p1, p8, not_p19, not_p4, p18, not_p24, not_p2, not_p9, not_p25, not_p12, not_p22, not_p21, not_p17, not_p14}
{p6, p7, not_p23, p15, p16, not_p20, not_p5, p1, p8, not_p19, not_p4, not_p24, not_p2, p3, not_p9, not_p25, not_p18, not_p12, not_p22, not_p21, not_p17, not_p14}
{not_p23, p15, not_p6, not_p20, not_p5, not_p3, p1, p8, not_p19, not_p4, p18, not_p24, not_p2, not_p16, not_p9, not_p25, not_p12, not_p22, not_p21, not_p7, not_p17, not_p14}
{not_p23, p15, not_p6, not_p20, not_p5, p1, p8, not_p19, not_p4, not_p24, not_p2, not_p16, p3, not_p9, not_p25, not_p18, not_p12, not_p22, not_p21, not_p7, not_p17, not_p14}
{p11, not_p23, p15, not_p6, not_p20, not_p5, not_p3, p8, not_p19, not_p4, not_p1, not_p24, not_p2, not_p16, not_p9, not_p25, not_p18, not_p12, not_p22, not_p21, not_p7, not_p17, not_p14}
{not_p23, p15, not_p6, not_p20, not_p5, p4, not_p3, p8, not_p19, not_p1, not_p24, not_p2, not_p16, not_p9, not_p25, not_p18, not_p12, not_p22, not_p21, not_p7, not_p17, not_p14}
{p6, p11, p2, not_p23, p15, not_p8, not_p20, not_p5, not_p3, not_p19, not_p4, not_p1, p12, not_p24, not_p16, not_p9, not_p25, not_p18, not_p22, not_p21, not_p7, not_p17, not_p14}
{p6, p2, not_p23, p15, not_p8, not_p20, not_p5, p4, not_p3, not_p19, not_p1, p12, not_p24, not_p16, not_p9, not_p25, not_p18, not_p22, not_p21, not_p7, not_p17, not_p14}
{p14, p11, not_p23, p15, not_p8, not_p6, not_p20, not_p5, not_p3, | |
<gh_stars>0
"""Settings to yield control to the user."""
import os
from warnings import warn
from yaml import safe_load
from serpentTools import messages, __path__
__all__ = ['defaultSettings', 'rc']
ROOT_DIR = __path__[0]
SETTING_HEADER_CHAR = '-'
SETTING_DOC_FMTR = """.. _{tag}:
{header}
``{name}``
{header}
{desc}
::
Default: {default}
Type: {vtype}
{options}
"""
_DEPRECATED = set()
SETTING_OPTIONS_FMTR = "Options: [{}]"
defaultSettings = {
'branching.intVariables': {
'default': [],
'description': 'Name of state data variables to convert to integers '
'for each branch',
'type': list
},
'branching.floatVariables': {
'default': [],
'description': 'Names of state data variables to convert to floats '
'for each branch',
'type': list
},
'depletion.metadataKeys': {
'default': ['ZAI', 'NAMES', 'DAYS', 'BU'],
'options': 'default',
'description': 'Non-material data to store, i.e. zai, isotope names, '
'burnup schedule, etc.',
'type': list
},
'depletion.materialVariables': {
'default': [],
'description': 'Names of variables to store. '
'Empty list -> all variables.',
'type': list
},
'depletion.materials': {
'default': [],
'description': 'Names of materials to store. '
'Empty list -> all materials.',
'type': list
},
'depletion.processTotal': {
'default': True,
'description': 'Option to store the depletion data from the TOT block',
'type': bool
},
'detector.names': {
'default': [],
'description': 'List of detectors to store. Empty list -> store all '
'detectors',
'type': list
},
'verbosity': {
'default': 'warning',
'options': messages.LOG_OPTS,
'type': str,
'description': 'Set the level of errors to be shown.',
'updater': messages.updateLevel
},
'sampler.allExist': {
'default': True,
'description': 'True if all the files should exist. Suppresses '
'errors if a file does not exist',
'type': bool
},
'sampler.freeAll': {
'default': False,
'description': 'If true, do not retain data from parsers after '
'reading. Limits memory usage after reading',
'type': bool,
},
'sampler.raiseErrors': {
'default': True,
'description': 'If True, stop at the first error. Otherwise, '
'continue reading but make a note about the error',
'type': bool
},
'sampler.skipPrecheck': {
'default': False,
'description': 'If True, no checks are performed prior to preparing '
'data. Set this to be True only if you know all files '
'contain the same data as errors may arise',
'type': bool
},
'serpentVersion': {
'default': '2.1.31',
'options': ['2.1.29', '2.1.30', '2.1.31'],
# When adding new version of Serpent, add / update
# MapStrVersions with variables that indicate the start of specific
# data blocks / time parameters like burnup
'description': 'Version of SERPENT',
'type': str
},
'xs.getInfXS': {
'default': True,
'description': 'If true, store the infinite medium cross sections.',
'type': bool
},
'xs.getB1XS': {
'default': True,
'description': 'If true, store the critical leakage cross sections.',
'type': bool
},
'xs.reshapeScatter': {
'default': False,
'description': 'If true, reshape the scattering matrices to square '
'matrices. By default, these matrices are stored '
'as vectors.',
'type': bool
},
'xs.variableGroups': {
'default': [],
'description': ('Name of variable groups from variables.yaml to be '
'expanded into SERPENT variable to be stored'),
'type': list
},
'xs.variableExtras': {
'default': [],
'description': 'Full SERPENT name of variables to be read',
'type': list
},
'microxs.getFlx': {
'default': True,
'description': 'If true, store the group flux ratios.',
'type': bool
},
'microxs.getXS': {
'default': True,
'description': 'If true, store the micro-group cross sections.',
'type': bool
},
'microxs.getFY': {
'default': True,
'description': 'If true, store the fission yields.',
'type': bool
}
}
class DefaultSetting(object):
"""Store a single setting."""
def __init__(self, name, default, varType, description, options, updater):
self.name = name
self.description = description
self.default = default
self.type = varType
self.options = options
self.updater = updater
def __repr__(self):
return '<DefaultSetting {}: value: {}>'.format(self.name, self.default)
def validate(self, value):
"""Return True if the value matches the default scheme.
Parameters
----------
value:
value to be tested
Returns
-------
bool
if the value can be used
Raises
------
TypeError
If the value is of an incorrect type
KeyError
If the value does not correspond to one of the acceptable options
"""
if not isinstance(value, self.type):
raise TypeError('Setting {} should be of type {}, not {}'
.format(self.name, self.type, type(value)))
if self.options:
listVals = [value] if not isinstance(value, list) else value
inOptions = any([val in self.options for val in listVals])
if not inOptions:
opts = ', '.join([str(option) for option in self.options])
raise KeyError('Setting {} is {} and not one of the allowed '
'options: {}'
.format(self.name, value, opts))
return True
class DefaultSettingsLoader(dict):
"""Base class for loading all the default settings."""
def __init__(self):
self.__locked = False
dict.__init__(self, self._load())
self.__locked = True
@staticmethod
def _load():
"""Load the default setting objects."""
defaults = {}
for name, value in defaultSettings.items():
if 'options' in value:
options = (value['default'] if value['options'] == 'default'
else value['options'])
else:
options = None
settingsOptions = {'name': name,
'default': value['default'],
'varType': value['type'],
'options': options,
'description': value['description'],
'updater': value.get('updater', None)
}
defaults[name] = DefaultSetting(**settingsOptions)
return defaults
def __setitem__(self, key, value):
if self.__locked:
raise KeyError('Default settings cannot be updated once set.')
self[key] = value
def retrieveDefaults(self):
"""Return a dictionary with the default settings."""
return {key: setting.default for key, setting in self.items()}
def validateSetting(self, name, value):
"""Validate the setting.
Parameters
----------
name: str
Full name of the setting
value: value to be set
Raises
------
KeyError
If the value is not one of the allowable options or if the
setting does not match an existing setting
TypeError
If the value is not of the correct type
"""
if name not in self:
raise KeyError('Setting {} does not exist'.format(name))
self[name].validate(value)
class UserSettingsLoader(dict):
"""Class that stores the active user settings."""
def __init__(self):
self._defaultLoader = DefaultSettingsLoader()
self.__inside = False
self.__originals = {}
dict.__init__(self, self._defaultLoader.retrieveDefaults())
def __enter__(self):
"""Use as a context manager to easily reset settings
Examples
--------
>>> rc["serpentVersion"] = "2.1.30"
>>> rc["serpentVersion"]
"2.1.30"
>>> with rc:
... rc["serpentVersion"] = "2.1.29"
... print(rc["serpentVersion"])
"2.1.29"
>>> rc["serpentVersion"]
"2.1.30"
"""
self.__inside = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__inside__ = False
for key, originalValue in self.__originals.items():
self[key] = originalValue
self.__originals = {}
def setValue(self, name, value):
"""Set the value of a specific setting.
Parameters
----------
name: str
Full name of the setting
value: str
value to be set
Raises
------
KeyError
If the value is not one of the allowable options or if the
setting does not match an existing setting
TypeError
If the value is not of the correct type
"""
if name in _DEPRECATED:
warn("Setting {} has been removed.".format(name))
return
if name not in self:
raise KeyError('Setting {} does not exist'.format(name))
self._defaultLoader[name].validate(value)
# if we've made it here, then the value is valid
if self.__inside:
self.__originals[name] = self[name]
if self._defaultLoader[name].updater is not None:
value = self._defaultLoader[name].updater(value)
dict.__setitem__(self, name, value)
messages.debug('Updated setting {} to {}'.format(name, value))
__setitem__ = setValue
def getReaderSettings(self, settingsPreffix):
"""Get all module-wide and reader-specific settings.
Parameters
----------
settingsPreffix: str or list
Name of the specific reader.
Will look for settings that lead with ``readerName``, e.g.
``depletion.metadataKeys`` or ``xs.variables``
Returns
-------
dict
Single level dictionary with ``settingName: settingValue`` pairs
Raises
------
KeyError
If the reader name is not located in the ``readers`` settings
dictionary
"""
settings = {}
settingsPreffix = (
[settingsPreffix] if isinstance(settingsPreffix, str)
else settingsPreffix)
for setting, value in self.items():
settingPath = setting.split('.')
if settingPath[0] in settingsPreffix:
name = settingPath[1]
else:
continue
settings[name] = value
return settings
def expandVariables(self):
"""Extend the keyword groups into lists of serpent variables.
Returns
-------
set
Names of all variables to be scraped
"""
keywords = self['xs.variableGroups']
extras = self['xs.variableExtras']
serpentVersion = self['serpentVersion'].replace(".", "-")
if not (keywords or extras): # return empty set and don't read
return set()
variables = set(extras) if extras else set()
if not keywords:
return variables
varFile = os.path.join(ROOT_DIR, 'variables.yaml')
with open(varFile) as fObj:
groups = safe_load(fObj)
thisVersion = groups.get(serpentVersion, {})
baseGroups = groups['base']
for key in keywords:
versionVars = thisVersion.get(key)
baseVars = baseGroups.get(key)
if versionVars:
variables.update(versionVars)
elif baseVars:
variables.update(baseVars)
return variables
def loadYaml(self, filePath, strict=True):
"""
Update the settings based on the contents of the yaml file
.. versionadded:: 0.2.0
Parameters
----------
filePath: str, or FileType
Path to config file
strict: bool
Fail at the first incorrect setting. If false, failed settings
will not be loaded and alerts will be raised
Raises
------
KeyError or TypeError
If settings found in the config file | |
<reponame>expz/insight<gh_stars>1-10
"""
These functions implement machine translation model training.
"""
from datetime import datetime
from fastai.basic_data import DataBunch
from fastai.callbacks import LearnerCallback, SaveModelCallback
from fastai.callbacks.tensorboard import LearnerTensorboardWriter
from fastai.train import Learner
from fastprogress.fastprogress import format_time
from functools import partial
import logging
import os
import pandas as pd
import time
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from bleu import bleu_score
from dataloader import PervasiveDataLoader
from evaluate import beam_search
from pervasive import (
Pervasive, PervasiveBert, PervasiveEmbedding, PervasiveOriginal, dilate,
PervasiveDownsample
)
from vocab import VocabData
logger = logging.getLogger('fr2en')
src_dir = os.path.dirname(os.path.abspath(__file__))
def check_params(params, param_list):
"""
Checks that a list of parameters is found in the config file
and throws an exception if not.
"""
for param in param_list:
try:
val = params
for key in param.split('.'):
val = val[key]
except (KeyError, TypeError):
raise ValueError(f'Expected parameter "{param}" not supplied.')
def scaled_mse_loss(y, y_hat):
"""
MSE loss scaled so that it usually lies in 0.1 - 100 range. This cannot
be converted to a lambda, because it needs to be pickleable.
"""
return 10000 * F.mse_loss(y, y_hat)
def build_learner(params, project_dir, pindex=0, comm_file=None, queues=None):
"""
Builds a fastai `Learner` object containing the model and data specified by
`params`. It is configured to run on GPU `device_id`. Assumes it is GPU
`pindex` of `world_size` total GPUs. In case more than one GPU is being
used, a file named `comm_file` is used to communicate between processes.
"""
# For user friendly error messages, check these parameters exist.
check_params(params, [
'cpu',
'data.batch_size',
'data.dir',
'data.epoch_size',
'data.max_length',
'data.max_val_size',
'data.src',
'data.tgt',
'data.vocab',
'decoder.embedding_dim',
'decoder.embedding_dropout',
'decoder.prediction_dropout',
'encoder.embedding_dim',
'encoder.embedding_dropout',
'network.bias',
'network.block_sizes',
'network.division_factor',
'network.dropout',
'network.efficient',
'network.growth_rate',
'network.kernel_size',
])
model_name = params['model_name']
# Try to make the directory for saving models.
model_dir = os.path.join(project_dir, 'model', model_name)
os.makedirs(model_dir, exist_ok=True)
# Configure GPU/CPU device settings.
cpu = params['cpu']
gpu_ids = params['gpu_ids'] if not cpu else []
world_size = len(gpu_ids) if len(gpu_ids) > 0 else 1
distributed = world_size > 1
if gpu_ids:
device_id = gpu_ids[pindex]
device = torch.device(device_id)
torch.cuda.set_device(device_id)
else:
device_id = None
device = torch.device('cpu')
# If distributed, initialize inter-process communication using shared file.
if distributed:
torch.distributed.init_process_group(backend='nccl',
world_size=world_size,
rank=pindex,
init_method=f'file://{comm_file}')
# Load vocabulary.
vocab_path = os.path.join(params['data']['dir'], params['data']['vocab'])
vocab = VocabData(vocab_path)
# Load data.
src_l = params['data']['src']
tgt_l = params['data']['tgt']
loader = PervasiveDataLoader(
os.path.join(params['data']['dir'], f'{src_l}.h5'),
os.path.join(params['data']['dir'], f'{tgt_l}.h5'),
vocab,
vocab,
params['data']['batch_size'] // world_size,
params['data']['max_length'],
epoch_size=params['data']['epoch_size'],
max_val_size=params['data']['max_val_size'],
distributed=distributed,
world_size=world_size,
pindex=pindex)
# Define neural network.
# Max length is 1 more than setting to account for BOS.
if params['network']['type'] == 'pervasive-embeddings':
model = PervasiveEmbedding(
params['network']['block_sizes'],
vocab.bos,
loader.max_length,
loader.max_length,
loader.datasets['train'].arrays[0].shape[2],
params['encoder']['embedding_dim'],
params['encoder']['embedding_dropout'],
params['network']['dropout'],
params['decoder']['prediction_dropout'],
params['network']['division_factor'],
params['network']['growth_rate'],
params['network']['bias'],
params['network']['efficient'])
# Rescale loss by 100 for easier display in training output.
loss_func = scaled_mse_loss
elif params['network']['type'] == 'pervasive-downsample':
model = PervasiveDownsample(
params['network']['block_sizes'],
vocab.bos,
loader.max_length,
loader.max_length,
params['encoder']['embedding_dim'],
params['encoder']['embedding_dropout'],
params['network']['dropout'],
params['decoder']['prediction_dropout'],
params['network']['division_factor'],
params['network']['growth_rate'],
params['network']['bias'],
params['network']['efficient'],
params['network']['kernel_size'])
# Rescale loss by 100 for easier display in training output.
loss_func = F.cross_entropy
elif params['network']['type'] == 'pervasive-bert':
model = PervasiveBert(
params['network']['block_sizes'],
vocab.bos,
loader.max_length,
loader.max_length,
params['encoder']['embedding_dim'],
params['encoder']['embedding_dropout'],
params['network']['dropout'],
params['decoder']['prediction_dropout'],
params['network']['division_factor'],
params['network']['growth_rate'],
params['network']['bias'],
params['network']['efficient'],
params['network']['kernel_size'])
loss_func = F.cross_entropy
elif params['network']['type'] == 'pervasive-original':
model = PervasiveOriginal(
params['network']['block_sizes'],
len(vocab),
vocab.bos,
loader.max_length,
loader.max_length,
params['encoder']['embedding_dim'],
params['encoder']['embedding_dropout'],
params['network']['dropout'],
params['decoder']['prediction_dropout'],
params['network']['division_factor'],
params['network']['growth_rate'],
params['network']['bias'],
params['network']['efficient'],
params['network']['kernel_size'])
loss_func = F.cross_entropy
elif params['network']['type'] == 'pervasive':
model = Pervasive(
params['network']['block_sizes'],
len(vocab),
vocab.bos,
loader.max_length,
loader.max_length,
params['encoder']['initial_emb_dim'],
params['encoder']['embedding_dim'],
params['encoder']['embedding_dropout'],
params['network']['dropout'],
params['decoder']['prediction_dropout'],
params['network']['division_factor'],
params['network']['growth_rate'],
params['network']['bias'],
params['network']['efficient'],
params['network']['kernel_size'])
loss_func = F.cross_entropy
model.init_weights()
if device_id is not None:
if not torch.cuda.is_available():
raise ValueError(
'Request to train on GPU {device_id}, but not GPU found.')
model.cuda(device_id)
if distributed:
model = DistributedDataParallel(model, device_ids=[device_id])
data = DataBunch(loader.loaders['train'],
loader.loaders['valid'],
loader.loaders['valid'],
device=device)
# Create Learner with Adam optimizer.
learn = Learner(data, model, loss_func=loss_func, model_dir=model_dir)
AdamP = partial(
torch.optim.Adam,
betas=(params['optim']['beta1'], params['optim']['beta2']))
learn.opt_func = AdamP
learn.wd = params['optim']['wd']
return (
learn, loader.loaders['train'].src_vocab,
loader.loaders['train'].tgt_vocab)
def restore(learn, model_fn, do_dilate=False):
"""
Restores the weights of a model saved to `model_fn` to the model of
the Learner `learn`.
"""
epoch = None
if model_fn is not None:
try:
# Turning off `strict` means it is okay for the saved model not
# to have weights for all the parameters of the current model.
state = torch.load(model_fn, map_location=learn.data.device)
model = learn.model
if isinstance(model, DistributedDataParallel):
model = model.module
model.load_state_dict(state['model'], strict=False)
if do_dilate:
dilate(model.network, fill_with_avg=True)
except FileNotFoundError:
raise Exception(f'The model file {model_fn} was not found!')
fields = model_fn.split('/')[-1].split('_')
if len(fields) > 1:
try:
epoch = int(fields[1].split('.')[0]) + 1
except ValueError:
pass
return epoch
def train_worker(pindex,
project_dir,
params,
comm_file=None,
restore_fn=None,
do_dilate=False,
queues=None):
"""
Trains the model as specified by `params` on GPU `gpu_ids[pindex]`.
Uses `comm_file` to communicate between processes.
Saves models and event logs to subdirectories of `project_dir`.
This is run in separate processes from the command line app, with
one process per GPU.
Optionally load a saved model with filename `restore`.
"""
# Variable used for distributed processing.
if not os.getenv('RANK', None):
os.environ['RANK'] = str(pindex)
learn, _, _ = build_learner(params, project_dir, pindex, comm_file, queues)
# Restore saved model if necessary.
epoch = restore(learn, restore_fn, do_dilate)
learn.model.cuda(params['gpu_ids'][pindex])
# Callbacks.
logs_path = learn.path / 'logs'
os.makedirs(f'{logs_path}/{params["model_name"]}', exist_ok=True)
ts = datetime.now().strftime('%Y%m%dT%H%M%S')
csv_fn = f'logs/{params["model_name"]}/log-{params["model_name"]}-{ts}'
# TODO: Enabling Tensorboard metrics causes an error.
# tbwriter = LearnerTensorboardWriter(learn, logs_path, params['model_name'])
# tbwriter.metrics_root = 'metrics/'
learn.callbacks = [
# Save callback causes 'Model not found' error when restoring.
SaveModelCallback(learn, every='epoch', name='model'),
CSVLogger(learn, csv_fn),
# tbwriter,
]
if params['network']['type'] != 'pervasive-embeddings':
learn.metrics.append(BLEUScoreMetric(learn, 5, queues, pindex))
if params['freeze']:
if isinstance(learn.model, DistributedDataParallel):
model = learn.model.module
model = learn.model
learn.split([model.unprojection, model.prediction_dropout])
# Untie target language embedding weights from input layer.
model.prediction.weight = torch.nn.Parameter(
model.prediction.weight.clone())
learn.freeze_to(1)
# Train with a one cycle schedule for each epoch.
check_params(params, [
'optim.epochs',
'optim.lr',
])
if pindex == 0:
g = len(params['gpu_ids']) if params['gpu_ids'] else 0
logger.info(f"Learning rate: {params['optim']['lr']}, "
f"Beta1: {params['optim']['beta1']}, "
f"Beta2: {params['optim']['beta2']}, "
f"Weight decay: {params['optim']['wd']}, "
f"Batch size: {params['data']['batch_size']}, "
f"Epoch size: {params['data']['epoch_size']}, "
f"Epochs: {params['optim']['epochs']}, "
f"GPUs: {g}")
learn.fit_one_cycle(params['optim']['epochs'],
params['optim']['lr'],
tot_epochs=params['optim']['epochs'],
start_epoch=epoch)
class CSVLogger(LearnerCallback):
"""
A `LearnerCallback` that saves history of metrics while training
`learn` into CSV `filename`.
This is adapted from the fastai library. It is copied here so the file
writes can be written using `with` blocks. This (1) forces the files to
flush the log after every write (2) allows multiple processes to write
to the file in the distributed training setting. Original:
https://github.com/fastai/fastai/blob/master/fastai/callbacks/csv_logger.py
"""
def __init__(self,
learn: Learner,
filename: str = 'history',
append: bool = False):
super().__init__(learn)
self.filename, self.append = filename, append
self.path = self.learn.path / f'{filename}.csv'
self.add_time = True
def read_logged_file(self):
"Read the content of saved file"
return pd.read_csv(self.path)
def on_train_begin(self, **kwargs):
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
names = self.learn.recorder.names[:(None if self.add_time else -1)]
header = ','.join(names) + '\n'
if self.append:
with self.path.open('a') as f:
f.write(header)
else:
with self.path.open('w') as f:
f.write(header)
def on_epoch_begin(self, **kwargs):
"""Saves the start time at the beginning of an epoch."""
if self.add_time:
self.start_epoch = time.time()
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
"Add a line with `epoch` number, `smooth_loss` and `last_metrics`."
last_metrics = last_metrics if last_metrics is not None else []
metrics = zip(self.learn.recorder.names,
[epoch, smooth_loss] + last_metrics)
stats = [
str(stat) if isinstance(stat, int) else
'#na#' if stat is None else f'{stat:.6f}' for name, stat in metrics
]
if self.add_time:
stats.append(format_time(time.time() - self.start_epoch))
str_stats = ','.join(stats)
with self.path.open('a') as f:
f.write(str_stats + '\n')
class BLEUScoreMetric(LearnerCallback):
"""
A BLEU score `Callback` that generates an output sentence using beam
search with beam size `beam_size` and then calculates its BLEU score.
"""
def __init__(self, learn, beam_size=5, queues=None, pindex=None):
"""
`queues` is a list of Queues for passing BLEU scores between processes.
`pindex` is the index of the current process which selects the
queue of the current process from `queues`.
"""
super().__init__(learn)
self.name = 'bleu'
self.beam_size = beam_size
self.tgt_vocab = learn.data.valid_dl.tgt_vocab
if isinstance(learn.model, DistributedDataParallel):
self.Ts = self.learn.model.module.Ts
self.Tt = self.learn.model.module.Tt
else:
self.Ts = self.learn.model.Ts
self.Tt = self.learn.model.Tt
self.eos = self.tgt_vocab.eos
self.pad = self.tgt_vocab.pad
self.queues = queues
self.pindex = pindex
def on_epoch_begin(self, **kwargs):
"""
Resets the BLEU score and sentence count at the beginning of each epoch.
"""
self.bleu, self.count = 0.0, 0
def on_batch_begin(self, last_input, last_target, train, **kwargs):
"""
Calculates output sentence using beam search for every batch of
validation examples.
"""
| |
<reponame>exoticDFT/drms<filename>drms/json.py
import json as _json
from urllib.parse import urlencode, quote_plus
from urllib.request import urlopen
from .config import ServerConfig, _server_configs
from .utils import _split_arg
__all__ = ['const', 'HttpJsonRequest', 'HttpJsonClient']
class JsocInfoConstants:
"""
Constants for DRMS queries.
Attributes
----------
all
= ``'**ALL**'``
none
= ``'**NONE**'``
recdir
= ``'*recdir*'``
dirmtime
= ``'*dirmtime*'``
logdir
= ``'*logdir*'``
recnum
= ``'*recnum*'``
sunum
= ``'*sunum*'``
size
= ``'*size*'``
online
= ``'*online*'``
retain
= ``'*retain*'``
archive
= ``'*archive*'``
"""
all = '**ALL**'
none = '**NONE**'
recdir = '*recdir*'
dirmtime = '*dirmtime*'
logdir = '*logdir*'
recnum = '*recnum*'
sunum = '*sunum*'
size = '*size*'
online = '*online*'
retain = '*retain*'
archive = '*archive*'
const = JsocInfoConstants()
class HttpJsonRequest:
"""
Class for handling HTTP/JSON requests.
Use `HttpJsonClient` to create an instance.
"""
def __init__(self, url, encoding):
self._encoding = encoding
self._http = urlopen(url)
self._data_str = None
self._data = None
def __repr__(self):
return f'<HttpJsonRequest: {self.url}>'
@property
def url(self):
return self._http.url
@property
def raw_data(self):
if self._data_str is None:
self._data_str = self._http.read()
return self._data_str
@property
def data(self):
if self._data is None:
self._data = _json.loads(self.raw_data.decode(self._encoding))
return self._data
class HttpJsonClient:
"""
HTTP/JSON communication with the DRMS server CGIs.
Parameters
----------
server : str or drms.config.ServerConfig
Registered server ID or ServerConfig instance.
Defaults to JSOC.
debug : bool
Enable or disable debug mode (default is disabled).
"""
def __init__(self, server='jsoc', debug=False):
if isinstance(server, ServerConfig):
self._server = server
else:
self._server = _server_configs[server.lower()]
self.debug = debug
def __repr__(self):
return f'<HttpJsonClient: {self._server.name}>'
def _json_request(self, url):
if self.debug:
print(url)
return HttpJsonRequest(url, self._server.encoding)
@property
def server(self):
return self._server
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
self._debug = True if value else False
def show_series(self, ds_filter=None):
"""
List available data series.
Parameters
----------
ds_filter : str
Name filter regexp.
Returns
-------
result : dict
"""
query = '?' if ds_filter is not None else ""
if ds_filter is not None:
query += urlencode({'filter': ds_filter})
req = self._json_request(self._server.url_show_series + query)
return req.data
def show_series_wrapper(self, ds_filter=None, info=False):
"""
List available data series.
This is an alternative to show_series, which needs to be used
to get a list of all available series provided by JSOC. There
is currently no support for retrieving primekeys using this
CGI.
Parameters
----------
ds_filter : str
Name filter regexp.
info : bool
If False (default), the result only contains series names.
If set to True, the result includes a description for each
series.
Returns
-------
result : dict
"""
query_args = {'dbhost': self._server.show_series_wrapper_dbhost}
if ds_filter is not None:
query_args['filter'] = ds_filter
if info:
query_args['info'] = '1'
query = f'?{urlencode(query_args)}'
req = self._json_request(self._server.url_show_series_wrapper + query)
return req.data
def series_struct(self, ds):
"""
Get information about the content of a data series.
Parameters
----------
ds : str
Name of the data series.
Returns
-------
result : dict
Dictionary containing information about the data series.
"""
query = f'?{urlencode({"op": "series_struct", "ds": ds})}'
req = self._json_request(self._server.url_jsoc_info + query)
return req.data
def rs_summary(self, ds):
"""
Get summary (i.e. count) of a given record set.
Parameters
----------
ds : str
Record set query (only one series).
Returns
-------
result : dict
Dictionary containg 'count', 'status' and 'runtime'.
"""
query = f'?{urlencode({"op": "rs_summary", "ds": ds})}'
req = self._json_request(self._server.url_jsoc_info + query)
return req.data
def rs_list(self, ds, key=None, seg=None, link=None, recinfo=False, n=None, uid=None):
"""
Get detailed information about a record set.
Parameters
----------
ds : str
Record set query.
key : str, list or None
List of requested keywords, optional.
seg : str, list or None
List of requested segments, optional.
link : str or None
List of requested Links, optional.
recinfo : bool
Request record info for each record in the record set.
n : int or None
Record set limit. For positive values, the first n records
of the record set are returned, for negative values the
last abs(n) records. If set to None (default), no limit is
applied.
uid : str or None
Session ID used when calling rs_list CGI, optional.
Returns
-------
result : dict
Dictionary containing the requested record set information.
"""
if key is None and seg is None and link is None:
raise ValueError('At least one key, seg or link must be specified')
d = {'op': 'rs_list', 'ds': ds}
if key is not None:
d['key'] = ','.join(_split_arg(key))
if seg is not None:
d['seg'] = ','.join(_split_arg(seg))
if link is not None:
d['link'] = ','.join(_split_arg(link))
if recinfo:
d['R'] = '1'
if n is not None:
d['n'] = f'{int(int(n))}'
if uid is not None:
d['userhandle'] = uid
query = f'?{urlencode(d)}'
req = self._json_request(self._server.url_jsoc_info + query)
return req.data
def check_address(self, email):
"""
Check if an email address is registered for export data requests.
Parameters
----------
email : str
Email address to be verified.
Returns
-------
result : dict
Dictionary containing 'status' and 'msg'.
Some status codes are:
- 2: Email address is valid and registered
- 4: Email address has neither been validated nor registered
- -2: Not a valid email address
"""
query = '?' + urlencode({'address': quote_plus(email), 'checkonly': '1'})
req = self._json_request(self._server.url_check_address + query)
return req.data
def exp_request(self, *args, **kwargs):
"""
Request data export.
Parameters
----------
ds : str
Data export record set query.
notify : str
Registered email address.
method : str
Export method. Supported methods are: 'url_quick', 'url',
'url-tar', 'ftp' and 'ftp-tar'. Default is 'url_quick'.
protocol : str
Export protocol. Supported protocols are: 'as-is', 'fits',
'jpg', 'mpg' and 'mp4'. Default is 'as-is'.
protocol_args : dict or None
Extra protocol arguments for protocols 'jpg', 'mpg' and
'mp4'. Valid arguments are: 'ct', 'scaling', 'min', 'max'
and 'size'.
filenamefmt : str, None
Custom filename format string for exported files. This is
ignored for 'url_quick'/'as-is' data exports.
process : `dict`, None
Dictionary of processing commands. Each entry is also a `dict`
containing all of the applicable options for that processing
command.
n : int or None
Limits the number of records requested. For positive
values, the first n records of the record set are returned,
for negative values the last abs(n) records. If set to None
(default), no limit is applied.
requestor : str, None or bool
Export user ID. Default is None, in which case the user
name is determined from the email address. If set to False,
the requestor argument will be omitted in the export
request.
Returns
-------
result : dict
Dictionary containing the server response to the export
request.
"""
req = self._json_request(self._exp_request_url(*args, **kwargs))
return req.data
def _exp_request_url(
self,
ds,
notify,
method='url_quick',
protocol='as-is',
protocol_args=None,
filenamefmt=None,
n=None,
process=None,
requestor=None,
):
method = method.lower()
method_list = ['url_quick', 'url', 'url-tar', 'ftp', 'ftp-tar']
if method not in method_list:
raise ValueError(
'Method {} is not supported, valid methods are: {}'.format(
method, ', '.join(str(s) for s in method_list)
)
)
protocol = protocol.lower()
img_protocol_list = ['jpg', 'mpg', 'mp4']
protocol_list = ['as-is', 'fits'] + img_protocol_list
if protocol not in protocol_list:
raise ValueError(
'Protocol {} is not supported, valid protocols are: {}'.format(
protocol, ', '.join(str(s) for s in protocol_list)
)
)
# method "url_quick" is meant to be used with "as-is", change method
# to "url" if protocol is not "as-is"
if method == 'url_quick' and protocol != 'as-is':
method = 'url'
if protocol in img_protocol_list:
extra_keys = {'ct': 'grey.sao', 'scaling': 'MINMAX', 'size': 1}
if protocol_args is not None:
for k, v in protocol_args.items():
if k.lower() == 'ct':
extra_keys['ct'] = v
elif k == 'scaling':
extra_keys[k] = v
elif k == 'size':
extra_keys[k] = int(v)
elif k in ['min', 'max']:
extra_keys[k] = float(v)
else:
raise ValueError(f'Unknown protocol argument: {k}')
protocol += ',CT={ct},scaling={scaling},size={size}'.format(**extra_keys)
if 'min' in extra_keys:
protocol += f',min={extra_keys["min"]:g}'
if 'max' in extra_keys:
protocol += f',max={extra_keys["max"]:g}'
else:
if protocol_args is not None:
raise ValueError(f'protocol_args not supported for protocol {protocol}')
d = {
'op': 'exp_request',
'format': 'json',
'ds': ds,
'notify': notify,
'method': method,
'protocol': protocol,
}
if filenamefmt is not None:
d['filenamefmt'] = filenamefmt
n = int(n) if n is not None else 0
d['process=n'] = f'{n}'
if process is not None:
allowed_processes = [
'im_patch',
'resize',
'rebin',
'aia_scale_aialev1',
'aia_scale_orig',
'aia_scale_other',
| |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from contextlib import ContextDecorator
import datetime
import json
import logging
import re
import time
from enum import IntEnum, unique
import redis
from util import slash_join
from util.expiresdict import ExpiresDict
logger = logging.getLogger(__name__)
ONE_DAY = 60 * 60 * 24
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION = 5
DEFAULT_LOCK_EXPIRATION = 10000
REDIS_EXPIRING_SUFFIX = "/expiring"
REDIS_EXPIRED_SUFFIX = "/expired"
REDIS_DEFAULT_PUBSUB_KEY = "orchestrator_events"
REDIS_EVENT_KIND_MESSAGE = "message"
REDIS_EVENT_KIND_PMESSAGE = "pmessage"
REDIS_NONEXPIRING_KEY = -1
# This constant defines the Redis configuration flags used to watch [K]eyspace and e[x]pired
# events on keys. For more info, see https://redis.io/topics/notifications#configuration
REDIS_KEYSPACE_EXPIRED_EVENT_CONFIG_VALUE = "Kx"
REDIS_KEYSPACE_EVENT_CONFIG_KEY = "notify-keyspace-events"
REDIS_KEYSPACE_KEY_PATTERN = "__keyspace@%s__:%s"
REDIS_EXPIRED_KEYSPACE_PATTERN = slash_join(REDIS_KEYSPACE_KEY_PATTERN, REDIS_EXPIRING_SUFFIX)
REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r"(\S+)", r"(\S+)"))
def orchestrator_from_config(manager_config, canceller_only=False):
"""
:param manager_config: the configuration for the orchestrator
:type manager_config: dict
:rtype: :class: Orchestrator
"""
# Sanity check that legacy prefixes are no longer being used.
for key in list(manager_config["ORCHESTRATOR"].keys()):
words = key.split("_")
if len(words) > 1 and words[-1].lower() == "prefix":
raise AssertionError("legacy prefix used, use ORCHESTRATOR_PREFIX instead")
def _dict_key_prefix(d):
"""
:param d: the dict that has keys prefixed with underscore
:type d: {str: any}
:rtype: str
"""
return list(d.keys())[0].split("_", 1)[0].lower()
orchestrator_name = _dict_key_prefix(manager_config["ORCHESTRATOR"])
def format_key(key):
return key.lower().split("_", 1)[1]
orchestrator_kwargs = {
format_key(key): value for (key, value) in manager_config["ORCHESTRATOR"].items()
}
if manager_config.get("ORCHESTRATOR_PREFIX") is not None:
orchestrator_kwargs["orchestrator_prefix"] = manager_config["ORCHESTRATOR_PREFIX"]
orchestrator_kwargs["canceller_only"] = canceller_only
logger.debug(
"attempting to create orchestrator %s with kwargs %s",
orchestrator_name,
orchestrator_kwargs,
)
return orchestrator_by_name(orchestrator_name, **orchestrator_kwargs)
def orchestrator_by_name(name, **kwargs):
_ORCHESTRATORS = {
"mem": MemoryOrchestrator,
"redis": RedisOrchestrator,
}
return _ORCHESTRATORS.get(name, MemoryOrchestrator)(**kwargs)
class OrchestratorError(Exception):
pass
# TODO: replace with ConnectionError when this codebase is Python 3.
class OrchestratorConnectionError(OrchestratorError):
pass
@unique
class KeyEvent(IntEnum):
CREATE = 1
SET = 2
DELETE = 3
EXPIRE = 4
class KeyChange(namedtuple("KeyChange", ["event", "key", "value"])):
pass
class Orchestrator(metaclass=ABCMeta):
"""
Orchestrator is the interface that is used to synchronize the build states across build
managers.
This interface assumes that storage is being done by a key-value store
that supports watching for events on keys.
Missing keys should return KeyError; otherwise, errors should raise an
OrchestratorError.
:param key_prefix: the prefix of keys being watched
:type key_prefix: str
"""
@abstractmethod
def on_key_change(self, key, callback, restarter=None):
"""
The callback parameter takes in a KeyChange object as a parameter.
"""
pass
@abstractmethod
def get_prefixed_keys(self, prefix):
"""
:returns: a dict of key value pairs beginning with prefix
:rtype: {str: str}
"""
pass
@abstractmethod
def get_key(self, key):
"""
:returns: the value stored at the provided key
:rtype: str
"""
pass
@abstractmethod
def set_key(self, key, value, overwrite=False, expiration=None):
"""
:param key: the identifier for the value
:type key: str
:param value: the value being stored
:type value: str
:param overwrite: whether or not a KeyError is thrown if the key already exists
:type overwrite: bool
:param expiration: the duration in seconds that a key should be available
:type expiration: int
"""
pass
@abstractmethod
def delete_key(self, key):
"""
Deletes a key that has been set in the orchestrator.
:param key: the identifier for the key
:type key: str
"""
pass
@abstractmethod
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
"""
Takes a lock for synchronizing exclusive operations cluster-wide.
:param key: the identifier for the lock
:type key: str
:param expiration: the duration until the lock expires
:type expiration: :class:`datetime.timedelta` or int (seconds)
:returns: whether or not the lock was acquired
:rtype: bool
"""
pass
@abstractmethod
def shutdown():
"""
This function should shutdown any final resources allocated by the Orchestrator.
"""
pass
def _sleep_orchestrator():
"""
This function blocks by sleeping in order to backoff if a failure
such as a ConnectionError has occurred.
"""
logger.exception(
"Connecting to orchestrator failed; sleeping for %s and then trying again",
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION,
)
time.sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
logger.exception(
"Connecting to orchestrator failed; slept for %s and now trying again",
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION,
)
class MemoryOrchestrator(Orchestrator):
def __init__(self, **kwargs):
self.state = ExpiresDict()
self.callbacks = {}
def _callbacks_prefixed(self, key):
return (callback for (prefix, callback) in self.callbacks.items() if key.startswith(prefix))
def on_key_change(self, key, callback, restarter=None):
self.callbacks[key] = callback
def get_prefixed_keys(self, prefix):
return {
k: value for (k, value) in list(self.state.items())
if k.startswith(prefix) and not k.endswith(REDIS_EXPIRED_SUFFIX) and not k.endswith(REDIS_EXPIRING_SUFFIX)
}
def get_key(self, key):
return self.state[key]
def set_key(self, key, value, overwrite=False, expiration=None):
preexisting_key = key in self.state
if preexisting_key and not overwrite:
raise KeyError(key)
# Simulate redis' behavior when using xx and the key does not exist.
if not preexisting_key and overwrite:
return
absolute_expiration = None
if expiration is not None:
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
self.state.set(key, value, expires=absolute_expiration)
self.state.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value, expires=absolute_expiration)
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
for callback in self._callbacks_prefixed(key):
callback(KeyChange(event, key, value))
def delete_key(self, key):
value = self.state[key]
del self.state[key]
for callback in self._callbacks_prefixed(key):
callback(KeyChange(KeyEvent.DELETE, key, value))
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
try:
self.set_key(key, "", overwrite=False, expiration=expiration)
except KeyError:
return False
return True
def shutdown(self):
self.state = None
self.callbacks = None
class RedisOrchestrator(Orchestrator):
def __init__(
self,
host="127.0.0.1",
port=6379,
password=<PASSWORD>,
db=0,
cert_and_key=None,
ca_cert=None,
ssl=False,
skip_keyspace_event_setup=False,
canceller_only=False,
**kwargs,
):
self.is_canceller_only = canceller_only
(cert, key) = tuple(cert_and_key) if cert_and_key is not None else (None, None)
self._client = redis.StrictRedis(
host=host,
port=port,
password=password,
db=db,
ssl_certfile=cert,
ssl_keyfile=key,
ssl_ca_certs=ca_cert,
ssl=ssl,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
)
self._shutting_down = False
self._watched_keys = {}
self._pubsub_key = slash_join(
kwargs.get("orchestrator_prefix", ""), REDIS_DEFAULT_PUBSUB_KEY
).lstrip("/")
if not self.is_canceller_only:
# sleep_time is not really calling time.sleep(). It is the socket's timeout value.
# run_in_thread uses an event loop that uses a non-blocking `parse_response` of the PubSub object.
# This means the event loop will return immedietely even if there are no new messages.
# Setting a value other than the default 0 prevents that thread from exhausting CPU time.
# https://github.com/andymccurdy/redis-py/issues/821
# Configure a subscription to watch events that the orchestrator manually publishes.
logger.debug("creating pubsub with key %s", self._pubsub_key)
self._pubsub = self._client.pubsub()
self._pubsub.subscribe(**{self._pubsub_key: self._published_key_handler})
self._pubsub_thread = self._pubsub.run_in_thread(daemon=True, sleep_time=5)
# Configure a subscription to watch expired keyspace events.
if not skip_keyspace_event_setup:
self._client.config_set(
REDIS_KEYSPACE_EVENT_CONFIG_KEY, REDIS_KEYSPACE_EXPIRED_EVENT_CONFIG_VALUE
)
self._pubsub_expiring = self._client.pubsub()
self._pubsub_expiring.psubscribe(
**{REDIS_EXPIRED_KEYSPACE_PATTERN % (db, "*"): self._expiring_key_handler}
)
self._pubsub_expiring_thread = self._pubsub_expiring.run_in_thread(daemon=True, sleep_time=5)
def _expiring_key_handler(self, message):
try:
message_tup = (
message.get("type"),
message.get("pattern").decode("utf-8"),
message.get("channel").decode("utf-8"),
message.get("data").decode("utf-8"),
)
if self._is_expired_keyspace_event(message_tup):
# Get the value of the original key before the expiration happened.
key = self._key_from_expiration(message_tup)
expired_value = self._client.get(key)
# Mark key as expired. This key is used to track post job cleanup in the callback,
# to allow another manager to pickup the cleanup if this fails.
self._client.set(
slash_join(key, REDIS_EXPIRED_SUFFIX), expired_value
)
self._client.delete(key)
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError as re:
logger.exception("Redis exception watching redis expirations: %s - %s", key, re)
except Exception as e:
logger.exception("Unknown exception watching redis expirations: %s - %s", key, e)
if self._is_expired_keyspace_event(message_tup) and expired_value is not None:
for watched_key, callback in self._watched_keys.items():
if key.startswith(watched_key):
callback(KeyChange(KeyEvent.EXPIRE, key, expired_value))
def _published_key_handler(self, message):
try:
redis_event, event_key, event_value = (
message.get("type"),
message.get("channel").decode("utf-8"),
message.get("data").decode("utf-8"),
)
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError as re:
logger.exception("Redis exception watching redis expirations: %s - %s", key, re)
except Exception as e:
logger.exception("Unknown exception watching redis expirations: %s - %s", key, e)
if redis_event == REDIS_EVENT_KIND_MESSAGE:
keychange = self._publish_to_keychange(event_value)
for watched_key, callback in self._watched_keys.items():
if keychange.key.startswith(watched_key):
callback(keychange)
def on_key_change(self, key, callback, restarter=None):
assert not self.is_canceller_only
logger.debug("watching key: %s", key)
self._watched_keys[key] = callback
@staticmethod
def _is_expired_keyspace_event(event_result):
"""
Sanity check that this isn't an unrelated keyspace event.
There could be a more efficient keyspace event config to avoid this client-side filter.
"""
if event_result is None:
return False
(redis_event, _pattern, matched_key, expired) = event_result
return (
redis_event == REDIS_EVENT_KIND_PMESSAGE
and expired == "expired"
and REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None
)
@staticmethod
def _key_from_expiration(event_result):
(_redis_event, _pattern, matched_key, _expired) = event_result
return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1]
@staticmethod
def _publish_to_keychange(event_value):
e = json.loads(event_value)
return KeyChange(KeyEvent(e["event"]), e["key"], e["value"])
def get_prefixed_keys(self, prefix):
assert not self.is_canceller_only
# TODO: This can probably be done with redis pipelines to make it transactional.
keys = self._client.keys(prefix + "*")
# Yielding to the event loop is required, thus this cannot be written as a dict comprehension.
results = {}
for key in keys:
if key.decode("utf-8").endswith(REDIS_EXPIRING_SUFFIX) or key.decode("utf-8").endswith(REDIS_EXPIRED_SUFFIX):
continue
ttl = self._client.ttl(key)
if ttl == REDIS_NONEXPIRING_KEY:
# Only redis keys without expirations are live build manager keys.
try:
value = self._client.get(key)
if value is None:
raise KeyError(key)
except redis.ConnectionError as rce:
raise OrchestratorConnectionError(rce)
except redis.RedisError as re:
raise OrchestratorError(re)
results.update({key.decode("utf-8"): value.decode("utf-8")})
return results
| |
be somehow achieved by the
# same (weird) mechanism that LexMySQL auto increments when inserting a new word?
# Kind of a lex.next_available_idn() method or something?
unix_epoch = Pythonic.unix_epoch_now()
num = Number(unix_epoch)
word.set_idn_if_you_really_have_to(num)
else:
try:
unix_epoch = float(idn)
except ValueError:
unix_epoch = None
num = Number.NAN
else:
num = idn
if unix_epoch is None:
txt = Text("((indeterminate time))")
else:
txt = Text(Pythonic.time_format_yyyy_mmdd_hhmm_ss(unix_epoch))
word.populate_from_num_txt(
num=num,
txt=txt,
)
word.whn = word.num
# NOTE: Yes, idn == num == whn
# This breaks the rules for whn,
# which is supposed to indicate when the word became choate,
# e.g. was inserted into a LexMySQL record.
# But maybe that rule should only apply to a LexSentence word.
# Anyway, here it will facilitate creating a TimeLex word
# that represents a time difference.
# Because then you can check the difference
# between a TimeLex word (representing a moment in time)
# and any other word (representing anything)
# because only the whn fields will be compared.
# For example,
# t = TimeLex()
# now_word = t.now_word()
# and .-- ALL these fields in a now_word
# v represent time: idn == num == whn
# how_old_is_word_w = t[w]('differ')[now_word]
# ^--- in word w, only the whn field has a time
# in case w.num represents a time too, you can:
# t[w.num]('differ')[now_word]
return True
# TODO: TimeLex()[t1:t2] could be a time interval shorthand!
class LexSentence(Lex):
# rename candidates: Site, Book, Server, Domain, Dictionary, Qorld, Lex, Lexicon
# Station, Repo, Repository, Depot, Log, Tome, Manuscript,
# Diary, Heap, Midden, Scribe, Stow (but it's a verb), Stowage,
# Eventually, this will encapsulate other word repositories
# Or should it simply be a sibling of e.g. Listing (List)?
# This could encapsulate the idea of a container of sbj-vrb-obj words
# a sentence that defines a word.
# Yeesh, should Word be an abstract base class, and derived classes
# have sbj,vrb,obj members, and other derivations that don't?
# class Sentence(Word)?
# Make Lex formally an abstract base class
"""
LexSentence is a collection of Sentences.
A Sentence is a Numbered Word that is defined by a triplet of Words: subject, verb, object
LexSentence is the abstract base class for this kind of Word collector and factory.
Instantiate a derived class of Lex for a database or other collection of word definitions.
The word_class property is the class of words unique to this Lex instance.
If one is not supplied to this constructor,
as in LexSubClass(word_class=WordSubClass),
then such a class will be created for you.
The _lex property is a bit mind-bending.
It is the word in the lex that's an abstraction for the lex.
See, each lex needs a way to refer to itself.
A pale shadow of the way it can refer to another lex, also.
That reference (to an abstraction of itself)
is usually in the sbj or obj of a word in the lex.
If `lex` is an instance of a Lex subclass,
Then lex._lex is a word that represents that lex.
lex._lex is an instance of lex.word_class
Boy for all that mind-bending it sure isn't used for much.
It's used in Word.define as a default for the sbj=None parameter.
It's used in LexMySQL.__init__() as a hint the lex is new and empty.
"""
# TODO: class WordForLexSentence base class, ala WordListed for Listing.
def populate_word_from_idn(self, word, idn):
raise NotImplementedError
def __init__(self, **kwargs):
super(LexSentence, self).__init__(**kwargs)
self._lex = None
self._noun = None
self._verb = None
self._define = None
self._duplicate_definition_callback_functions = []
def duplicate_definition_notify(self, f):
# XXX: Sure is a drastic, totalitarian solution.
# But duplicate defines have in the past wasted a lot of time.
self._duplicate_definition_callback_functions.append(f)
class ConnectError(Exception):
pass
# Hard-code the idns of the fundamental words.
IDN_LEX = Number(0)
IDN_DEFINE = Number(1)
IDN_NOUN = Number(2)
IDN_VERB = Number(3)
IDN_AGENT = Number(4)
IDN_MAX_FIXED = Number(4)
# TODO: Why did this start at 1 before?
# TODO: Why does this start at 0 now?
def install_from_scratch(self):
raise NotImplementedError()
def uninstall_to_scratch(self):
raise NotImplementedError()
def _install_all_seminal_words(self):
"""
Insert the five fundamental sentences into the Lex database. (Unless already there.)
Each sentence uses verbs and nouns defined in some of the other seminal sentences.
The five seminal sentences:
lex = lex.define(agent, 'lex')
lex.define(verb, 'define')
noun = lex.define(noun, 'noun')
verb = lex.define(noun, 'verb')
agent = lex.define(noun, 'agent')
At least that's how they'd be defined if forward references were not a problem.
"""
def seminal_word(_idn, _obj, _txt):
"""Subject is always 'lex'. Verb is always 'define'."""
word = self[_idn]
if not word.exists():
self._install_one_seminal_word(_idn, _obj, _txt)
word = self[_idn]
assert word.exists()
__crazy_idea_define_lex_first__ = True
# TODO: Haha, the order of idns is defined by the constants.
# Rearrange them, e.g. Word.IDN_LEX
if __crazy_idea_define_lex_first__:
# forward,reflexive references
seminal_word(self.IDN_LEX, self.IDN_AGENT, 'lex') # 2,1 0,+1,+4
seminal_word(self.IDN_DEFINE, self.IDN_VERB, 'define') # 1,1 -1, 0,+2
seminal_word(self.IDN_NOUN, self.IDN_NOUN, 'noun') # 0,1 -2,-1, 0
seminal_word(self.IDN_VERB, self.IDN_NOUN, 'verb') # 0,0 -3,-2,-1
seminal_word(self.IDN_AGENT, self.IDN_NOUN, 'agent') # 0,0 -4,-3,-2
# ---
# 3,3
else:
# forward,reflexive references
seminal_word(self.IDN_DEFINE, self.IDN_VERB, 'define') # 2,1 +4, 0,+2
seminal_word(self.IDN_NOUN, self.IDN_NOUN, 'noun') # 1,1 +3,-1, 0
seminal_word(self.IDN_VERB, self.IDN_NOUN, 'verb') # 1,0 +2,-2,-1
seminal_word(self.IDN_AGENT, self.IDN_NOUN, 'agent') # 1,0 +1,-3,-2
seminal_word(self.IDN_LEX, self.IDN_AGENT, 'lex') # 0,1 0,-4,-1
# ---
# 5,3
def _install_one_seminal_word(self, _idn, _obj, _txt):
self.create_word(
override_idn=_idn,
sbj=self.IDN_LEX,
vrb=self.IDN_DEFINE,
obj=_obj,
num=Number(1),
txt=_txt,
)
def insert_word(self, word):
raise NotImplementedError()
def populate_word_from_definition(self, word, define_txt):
raise NotImplementedError()
def populate_word_from_sbj_vrb_obj(self, word, sbj, vrb, obj):
raise NotImplementedError()
def populate_word_from_sbj_vrb_obj_num_txt(self, word, sbj, vrb, obj, num, txt):
raise NotImplementedError()
def noun(self, name=None):
if name is None:
return self._noun
else:
return self.define(self._noun, name)
def verb(self, name=None): # was , sbj=None):
if name is None:
return self._verb
else:
return self.define(self._verb, name) # was: , sbj=sbj)
class DefinitionMustBeUnicode(TypeError):
"""In a word definition, the name (txt) must be Unicode."""
def define(self, obj, txt): # was , sbj=None): --- is this needed any more?
obj_could_be_many_types = obj
# sbj_could_be_none = sbj
# sbj = sbj_could_be_none or self._lex
sbj = self._lex
vrb = self._define
obj = self[obj_could_be_many_types]
if not Text.is_valid(txt):
raise self.DefinitionMustBeUnicode(
"Definition must have unicode name, not " + repr(txt)
)
try:
old_definition = self[txt]
except ValueError:
'''txt must not be defined yet.'''
else:
if old_definition.exists():
# TODO: Use create_word's use_already option instead?
# Oops, cannot!
# define() goes to EARLIEST definition
# create_word(use_already=True) goes to LATEST definition
if len(self._duplicate_definition_callback_functions) > 0:
duplicate_words = self.find_words(vrb=self._define, txt=txt, sbj=sbj)
if len(duplicate_words) != 1:
for function in self._duplicate_definition_callback_functions:
function(
txt,
"Trying to define a {obj} called '{txt}', "
"but there are already {count} definitions for '{txt}': "
"{word}".format(
obj=str(obj),
count=len(duplicate_words),
txt=txt,
word=", ".join("{idn}:{txt}".format(
idn=w.idn.qstring(),
txt=str(w.obj),
) for w in duplicate_words),
)
)
return old_definition
return self.create_word(sbj=sbj, vrb=vrb, obj=obj, txt=txt)
def find_words(self, **kwargs):
raise NotImplementedError()
outer = 0 # HACK
inner = 0 # HACK
_global_lock = threading.Lock()
def _lock_next_word(self):
"""
Make the auto-increment simulation thread-safe.
Derived class may override the class variable _global_lock,
by mimicking the above line exactly,
so that it referees among all instances of that class,
(but only that class, not among sibling class instances)
e.g. to keep thread-specific instances of that class from racing one another.
Or the derived class may override the instance method _lock_next_word(),
so that each instance of that class has its own lock.
This might make sense if a single instance could be shared by multiple threads.
Wouldn't work with LexMySQL because apparently one mysql.connector.connect()
object cannot be shared by multiple threads.
By default all instances of all derived classes use the singleton LexSentence._global_lock
(Only applies to instances running on the same host of course.)
"""
return self._global_lock
def insert_next_word(self, word):
# global max_idn_lock
# noinspection PyUnusedLocal
def droid(step):
"""
Probe droid for debugging the browse storm bugs.
EXAMPLE:
INSERT_A 'shrubbery' 0 1 unlock 54423720 0q82_04
INSERT_B 'shrubbery' 1 1 LOCKED 54423720 0q82_04
INSERT_C 'shrubbery' 1 1 LOCKED 54423720 0q82_05
INSERT_D 'shrubbery' 0 1 | |
size=226368493,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58454174-p58655598.7z"),
page_ids=range(58454174, 58655599),
darus_id=95142,
sha1="e9318db4ec4c086ea06e41116709a7607c24f388",
size=279661929,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58655599-p58788270.7z"),
page_ids=range(58655599, 58788271),
darus_id=95144,
sha1="8223eeda60b61b897d3a79c1b2b7434cd0e58b3e",
size=215380822,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58788271-p58944638.7z"),
page_ids=range(58788271, 58944639),
darus_id=95146,
sha1="947d5b7613aaef8c7d630ecd2d87534385e2f8a2",
size=203765671,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58944639-p59108291.7z"),
page_ids=range(58944639, 59108292),
darus_id=95147,
sha1="c99a7777d03d2ca14f38f23d96248c1b6d003c63",
size=218267983,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59108292-p59273992.7z"),
page_ids=range(59108292, 59273993),
darus_id=95149,
sha1="760134e3e9ad649f8906d11f29bd4c3575f4cf1c",
size=219081885,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59273993-p59405079.7z"),
page_ids=range(59273993, 59405080),
darus_id=95150,
sha1="39e5c58e9ced3b913687c84d2ff2d1c070f54d0e",
size=181023817,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59405080-p59505406.7z"),
page_ids=range(59405080, 59505407),
darus_id=95151,
sha1="36cd863ded3fe297488e8bcd58f29852f10fcedf",
size=144318028,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59505407-p59649436.7z"),
page_ids=range(59505407, 59649437),
darus_id=95152,
sha1="31dc29a4e7f7b2ae58c58b357fde6df7060d2e06",
size=195583116,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59649437-p59781420.7z"),
page_ids=range(59649437, 59781421),
darus_id=95155,
sha1="d38165b342cad5e19f1300a647dcd4df8aeba297",
size=201052495,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59781421-p59918839.7z"),
page_ids=range(59781421, 59918840),
darus_id=95156,
sha1="4d3b19c0a4ee5e7ec088f833439cae7daa210650",
size=207203743,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59918840-p60065594.7z"),
page_ids=range(59918840, 60065595),
darus_id=95157,
sha1="a72eda8c2610fdde496a2b4003456964e9b818cb",
size=180762569,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60065595-p60192698.7z"),
page_ids=range(60065595, 60192699),
darus_id=95159,
sha1="7963d57fd8c9814d8d37e566f8687197e191a064",
size=177548376,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60192699-p60322125.7z"),
page_ids=range(60192699, 60322126),
darus_id=95160,
sha1="f57c0dd5aa55ff3fcf8b6a04cae9fd4635abc798",
size=169307236,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60322126-p60459703.7z"),
page_ids=range(60322126, 60459704),
darus_id=95161,
sha1="312ea96636940743ae598343ad878328301a6e50",
size=182549339,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60459704-p60587338.7z"),
page_ids=range(60459704, 60587339),
darus_id=95163,
sha1="d441dd9d598e68d9693ea40a4e2a5cf2638b06d9",
size=182950580,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60587339-p60701562.7z"),
page_ids=range(60587339, 60701563),
darus_id=95165,
sha1="e11581c4dde11abf697fc059fb59ac7c2521fa21",
size=151097337,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60701563-p60854585.7z"),
page_ids=range(60701563, 60854586),
darus_id=95166,
sha1="f0213d5cc4f0de4092f670a0376e5e21bb0becf9",
size=148669732,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60854586-p61032550.7z"),
page_ids=range(60854586, 61032551),
darus_id=95167,
sha1="1312074e4ff98dbc22f8dddcfb1d5238e7f847b2",
size=144597728,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61032551-p61246796.7z"),
page_ids=range(61032551, 61246797),
darus_id=95169,
sha1="388cf3cc3e2007be0d766c4d10b735af6cc15c64",
size=143770719,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61246797-p61363915.7z"),
page_ids=range(61246797, 61363916),
darus_id=95170,
sha1="9dd554b71dfea471316ad6776e5d1f1d4861cb4a",
size=151073856,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61363916-p61461961.7z"),
page_ids=range(61363916, 61461962),
darus_id=95172,
sha1="504e41c826e69bf03226e1d1ef4d9c17f40ab440",
size=167132048,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61461962-p61563343.7z"),
page_ids=range(61461962, 61563344),
darus_id=95173,
sha1="d73ec69b468dd2d07efa8cb711a54340a7d6a7d5",
size=154731084,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61563344-p61691958.7z"),
page_ids=range(61563344, 61691959),
darus_id=95175,
sha1="1dd1e14ac503655bd9cf90d0646506a4081c2b15",
size=188143424,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61691959-p61827438.7z"),
page_ids=range(61691959, 61827439),
darus_id=95176,
sha1="9b4518d247774cd15988e1ba8bfe1268fa2dc36a",
size=198203826,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61827439-p61940925.7z"),
page_ids=range(61827439, 61940926),
darus_id=95177,
sha1="0c87adcfbeb619f8a921ce1e4224b016dd1f7d89",
size=163817930,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61940926-p61951134.7z"),
page_ids=range(61940926, 61951135),
darus_id=95178,
sha1="b125f731bda8c70772d36bcd4aa18adb6494cd2d",
size=19480600,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61951135-p61999598.7z"),
page_ids=range(61951135, 61999599),
darus_id=95179,
sha1="a7c7a2cd87e395373a7b4143d14a848092095963",
size=92922683,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61999599-p62009330.7z"),
page_ids=range(61999599, 62009331),
darus_id=95180,
sha1="9d91e2b92f96af91c570431bda0b10259e9cbb40",
size=31262209,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62009331-p62015421.7z"),
page_ids=range(62009331, 62015422),
darus_id=95181,
sha1="535011d2ae6ea0989b98311ac62673a82e895fd5",
size=29084469,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62015422-p62021053.7z"),
page_ids=range(62015422, 62021054),
darus_id=95182,
sha1="6132cdb9acb581978943fba6110eca278f702911",
size=32730543,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62021054-p62038584.7z"),
page_ids=range(62021054, 62038585),
darus_id=95183,
sha1="b7e35dfd85c987ba2ef85b426ad1fa0d712c9ade",
size=70184041,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62038585-p62066422.7z"),
page_ids=range(62038585, 62066423),
darus_id=95184,
sha1="8d3cdb0cd0951b1bad26d158dbf09567c6a1a7ec",
size=75139816,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62066423-p62077450.7z"),
page_ids=range(62066423, 62077451),
darus_id=95186,
sha1="0754d524d79f69fe038b48a874916ebf7c605e23",
size=39501278,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62077451-p62087506.7z"),
page_ids=range(62077451, 62087507),
darus_id=95187,
sha1="2f7b86e19727224d7f1302a4d63a79d311fa3a53",
size=78794161,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62087507-p62253005.7z"),
page_ids=range(62087507, 62253006),
darus_id=95188,
sha1="5452fc455f4bcef494f566c4ef3af949218201d6",
size=170527732,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62253006-p62413174.7z"),
page_ids=range(62253006, 62413175),
darus_id=95189,
sha1="6c5cd16294eea99fa30d63966ac30a99b3a07ac4",
size=398693213,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62413175-p62632019.7z"),
page_ids=range(62413175, 62632020),
darus_id=95190,
sha1="3da694c9e7566729176aad189fef7ead519a3a3a",
size=198587383,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62632020-p62799095.7z"),
page_ids=range(62632020, 62799096),
darus_id=95192,
sha1="d07ce85c83a9c4964729c42983d96a9c71f5d01f",
size=290051278,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62799096-p62938309.7z"),
page_ids=range(62799096, 62938310),
darus_id=95193,
sha1="373be03ece4ac14f6afb33eb903c2a68c8267955",
size=414297050,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62938310-p63030244.7z"),
page_ids=range(62938310, 63030245),
darus_id=95195,
sha1="44a370ead5577d6406edf6153d16563c8c22514e",
size=109469023,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63030245-p63114211.7z"),
page_ids=range(63030245, 63114212),
darus_id=95196,
sha1="4ed1b890d1933f1e73f4a76f24fd547e8ec288d7",
size=109241585,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63114212-p63278403.7z"),
page_ids=range(63114212, 63278404),
darus_id=95197,
sha1="4aaabe428d803fcbb12a7a8da85e840a9003e0f4",
size=173464367,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63278404-p63479808.7z"),
page_ids=range(63278404, 63479809),
darus_id=95199,
sha1="fb96d64dbd510d46f06aa8326cb4694ab5994360",
size=183106936,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63479809-p63664031.7z"),
page_ids=range(63479809, 63664032),
darus_id=95201,
sha1="8ada17938e935e702d69e2dfc066693a9238f50e",
size=185017851,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63664032-p63828840.7z"),
page_ids=range(63664032, 63828841),
darus_id=95202,
sha1="4ee1e2913af96dd6f1f48634d69b81137353d05e",
size=235524168,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63828841-p64022670.7z"),
page_ids=range(63828841, 64022671),
darus_id=95204,
sha1="c8a3f89cb1d2f58fd782a2b5396e20abbea9f29a",
size=220767083,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64022671-p64258411.7z"),
page_ids=range(64022671, 64258412),
darus_id=95205,
sha1="70a3684acc986677d5a075b0cb44746a39b0789a",
size=213757752,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64258412-p64417768.7z"),
page_ids=range(64258412, 64417769),
darus_id=95206,
sha1="f69e895e00677df3eef070349fc082f68e964560",
size=180566965,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64417769-p64591960.7z"),
page_ids=range(64417769, 64591961),
darus_id=95208,
sha1="683560a65bd354fa869e8cbbfcb9e7429499ae4a",
size=183497068,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64591961-p64767773.7z"),
page_ids=range(64591961, 64767774),
darus_id=95209,
sha1="a655142edfc1b57d313d8dc8d50e07a368d23071",
size=186175365,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64767774-p65063475.7z"),
page_ids=range(64767774, 65063476),
darus_id=95210,
sha1="958dc2515d3a1d09b6281e2ecb5705f55c897ea6",
size=149018402,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65063476-p65195512.7z"),
page_ids=range(65063476, 65195513),
darus_id=95213,
sha1="116bd638c3956468655db9b39d18125665b2b556",
size=120432423,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65195513-p65286578.7z"),
page_ids=range(65195513, 65286579),
darus_id=95215,
sha1="61b4c06e413ca3484ddb14d97f15fe1529e18d18",
size=111632710,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65286579-p65393993.7z"),
page_ids=range(65286579, 65393994),
darus_id=95216,
sha1="5d4975d7fc94caba955e83c31d8c2527148f2cc2",
size=90520466,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65393994-p65557534.7z"),
page_ids=range(65393994, 65557535),
darus_id=95217,
sha1="150ad2a8d5d820c85573ba400decb1505932d0b4",
size=106732760,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65557535-p65585258.7z"),
page_ids=range(65557535, 65585259),
darus_id=95218,
sha1="570975bf516649579b14370c0e2cff206fc86d64",
size=58428500,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65585259-p65757268.7z"),
page_ids=range(65585259, 65757269),
darus_id=95220,
sha1="e593493bba4d091e3c508495d2318eb697b7a480",
size=164841072,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65757269-p66077482.7z"),
page_ids=range(65757269, 66077483),
darus_id=95221,
sha1="859809485261aaf5c4dc9ddb7ef87ff56e96ad02",
size=232023533,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66077483-p66255364.7z"),
page_ids=range(66077483, 66255365),
darus_id=95223,
sha1="582ab6db9fac17b6fff6a6878d1510df20a1e578",
size=231409525,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66255365-p66509805.7z"),
page_ids=range(66255365, 66509806),
darus_id=95225,
sha1="21df233971cf727c2b194c76a5096b2c5be96cf2",
size=202868921,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66509806-p66781694.7z"),
page_ids=range(66509806, 66781695),
darus_id=95226,
sha1="e2d0f92473a4d42240955cf70466d44fd8ee0063",
size=209380361,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66781695-p67076296.7z"),
page_ids=range(66781695, 67076297),
darus_id=95229,
sha1="a54894be7196cf87e63c98703d56d7a5867281ae",
size=238123105,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67076297-p67448269.7z"),
page_ids=range(67076297, 67448270),
darus_id=95231,
sha1="c2e7a160edbd29cb04c66c72eb3993453520b7ba",
size=257323916,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67448270-p67746260.7z"),
page_ids=range(67448270, 67746261),
darus_id=95232,
sha1="c24a6f7d78f2fe50efa80b7f028dd4e3581299f4",
size=269146491,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67746261-p68099469.7z"),
page_ids=range(67746261, 68099470),
darus_id=95233,
sha1="9551921fdbd2dec39bf6a583777c5d0f6ad74606",
size=305985214,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68099470-p68432080.7z"),
page_ids=range(68099470, 68432081),
darus_id=95235,
sha1="b57fa6d077be56d133836a12600d9e98ba136cd3",
size=311330330,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68432081-p68740980.7z"),
page_ids=range(68432081, 68740981),
darus_id=95237,
sha1="67dd4026d8faee79f9ea564b37478cf1cf826a11",
size=298226513,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68740981-p68962162.7z"),
page_ids=range(68740981, 68962163),
darus_id=95239,
sha1="4040e843d3b391584092c061708f889d15965e2d",
size=193736022,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68962163-p69247397.7z"),
page_ids=range(68962163, 69247398),
darus_id=95240,
sha1="18a6940972fb29cceeb2b822b5db855fc2eadff3",
size=248801641,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69247398-p69576596.7z"),
page_ids=range(69247398, 69576597),
darus_id=95242,
sha1="08daa6f24ca132ae7110fd2393775700d364c3ad",
size=281430806,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69576597-p69963244.7z"),
page_ids=range(69576597, 69963245),
darus_id=95243,
sha1="e7011d6ccc169c870332e380c2894eabea81d113",
size=319615957,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69963245-p70352985.7z"),
page_ids=range(69963245, 70352986),
darus_id=95246,
sha1="4b1ceb453302500f80568152ce1d99a9142c072c",
size=318939926,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70352986-p70755365.7z"),
page_ids=range(70352986, 70755366),
darus_id=95248,
sha1="74f4b36eba25b385550eb98fb03fbf688ba4a7c8",
size=325967208,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70755366-p70952447.7z"),
page_ids=range(70755366, 70952448),
darus_id=95250,
sha1="207f60427bfa05b8a25f860d12324b9bda88cbc4",
size=175826321,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70952448-p70957232.7z"),
page_ids=range(70952448, 70957233),
darus_id=95251,
sha1="c6172d455d4db466023237374ffaf4779d10197f",
size=18552156,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70957233-p70961768.7z"),
page_ids=range(70957233, 70961769),
darus_id=95252,
sha1="f90fa53c8984b224812a9e64bf81ddd8ad5544bf",
size=18719248,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70961769-p70966625.7z"),
page_ids=range(70961769, 70966626),
darus_id=95253,
sha1="f9bf9bb1ccfaef1c8319b2ad24499b80e6359b1f",
size=19305079,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70966626-p70969258.7z"),
page_ids=range(70966626, 70969259),
darus_id=95254,
sha1="de1106f39c7d8789578aec723f1464c6d22bdf9d",
size=17840469,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70969259-p70971691.7z"),
page_ids=range(70969259, 70971692),
darus_id=95255,
sha1="81e9ac2b170ccc5315160cdeabf476789a4be166",
size=18218230,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70971692-p70974507.7z"),
page_ids=range(70971692, 70974508),
darus_id=95256,
sha1="92ec06aff6a0a51e1c6e8bed457d49824f82c963",
size=19899859,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70974508-p71048851.7z"),
page_ids=range(70974508, 71048852),
darus_id=95258,
sha1="4d96422cac4cdbddba67161fee35d12ec2913c06",
size=79835321,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71048852-p71445591.7z"),
page_ids=range(71048852, 71445592),
darus_id=95259,
sha1="e4e6a927db60c5debd16d0c95655078d9ac96449",
size=326597300,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71445592-p71805048.7z"),
page_ids=range(71445592, 71805049),
darus_id=95260,
sha1="41b97b1ee64a4fd65f6cd25bea59816b32697194",
size=308660576,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71805049-p72094286.7z"),
page_ids=range(71805049, 72094287),
darus_id=95263,
sha1="8faf363b2bc45e9a85f87dd51d8367c2c98742c1",
size=239032615,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72094287-p72339340.7z"),
page_ids=range(72094287, 72339341),
darus_id=95265,
sha1="2bcbdee3132e8f3b3adea0e12020eaa1b52d8b6f",
size=224346811,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72339341-p72722930.7z"),
page_ids=range(72339341, 72722931),
darus_id=95266,
sha1="00e88319a39d8a6c828b687d3856b89b0f38c7e3",
size=327529734,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72722931-p73077030.7z"),
page_ids=range(72722931, 73077031),
darus_id=95268,
sha1="15848ceafffecdaa0ed2198c4d69094bf10a9456",
size=330914795,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73077031-p73455249.7z"),
page_ids=range(73077031, 73455250),
darus_id=95270,
sha1="66703902e78a2532f454d666034830874b52bcd1",
size=330944997,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73455250-p73825768.7z"),
page_ids=range(73455250, 73825769),
darus_id=95272,
sha1="f81d5a3dc73180c6ce992a74ff9cc901a956d0c9",
size=365335216,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73825769-p74197527.7z"),
page_ids=range(73825769, 74197528),
darus_id=95275,
sha1="9ed6e59e81f14b059a98372aa4633f6afcabd716",
size=384702624,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74197528-p74596141.7z"),
page_ids=range(74197528, 74596142),
darus_id=95277,
sha1="67bfa973bc4186d6852f8a5bd10942ec92d7a8f0",
size=384449392,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74596142-p74803927.7z"),
page_ids=range(74596142, 74803928),
darus_id=95279,
sha1="4b74002612c39e8be6cf16cafb8e2508c8e19e8e",
size=236588084,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74803928-p74933695.7z"),
page_ids=range(74803928, 74933696),
darus_id=95280,
sha1="185e61fdb868949d8b2da3cc8ab100589393ac36",
size=165975375,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74933696-p75091810.7z"),
page_ids=range(74933696, 75091811),
darus_id=95281,
sha1="9f7c550c203924845c332fc9c7f3ecd8cb246b77",
size=165801647,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75091811-p75281950.7z"),
page_ids=range(75091811, 75281951),
darus_id=95282,
sha1="6c257d50380fc8be56262aadc2a3ea61d609a80f",
size=182903195,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75281951-p75472873.7z"),
page_ids=range(75281951, 75472874),
darus_id=95284,
sha1="50a53cbb176bfdefabd7ebed96dae76164084124",
size=182779511,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75472874-p75649065.7z"),
page_ids=range(75472874, 75649066),
darus_id=95285,
sha1="c161b577d005f92ddcb59eeaf24523c8fe6fc59d",
size=160657907,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75649066-p75798893.7z"),
page_ids=range(75649066, 75798894),
darus_id=95286,
sha1="c23785b2f900bdb08ca46ff3ed6c31bce162229d",
size=134462578,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / | |
<gh_stars>1-10
"""Fields used for forming more complex structures with other fields."""
import collections.abc
import typing
from typing import Any
from typing import BinaryIO
from typing import Callable
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union as _Union
from binobj import errors
from binobj.fields.base import Field
from binobj.fields.base import NOT_PRESENT
from binobj.typedefs import StrDict
if typing.TYPE_CHECKING: # pragma: no cover
from binobj.structures import Struct
__all__ = ["Array", "Nested", "Union"]
T = TypeVar("T")
TStruct = TypeVar("TStruct", covariant=True, bound="Struct")
HaltCheckFn = Callable[["Array[T]", BinaryIO, List, Any, StrDict], bool]
FieldOrTStruct = _Union[Field[Any], TStruct]
LoadDecider = Callable[
[BinaryIO, Tuple[FieldOrTStruct, ...], Any, StrDict], FieldOrTStruct
]
DumpDecider = Callable[[Any, Tuple[FieldOrTStruct, ...], Any, StrDict], FieldOrTStruct]
class Array(Field[List[Optional[T]]]):
"""An array of other serializable objects.
:param Field component:
The component this array is comprised of. Must be an instance.
:param count:
Optional. Some way of indicating the number of elements in this array. The value
for this argument can be one of the following:
* An integer. The array always contains this many elements.
* A :class:`~binobj.fields.base.Field` instance that must 1) be an integer;
2) occur before this array in the same struct.
* A string naming a field fitting the above criteria. You'll need this if your
size field's name is a Python keyword.
:param callable halt_check:
A function taking five arguments. See :meth:`should_halt` for the default
implementation. Subclasses can override this function if desired to avoid having
to pass in a custom function every time.
.. versionchanged:: 0.3.0
``count`` can now be a :class:`~.fields.base.Field` or string.
.. versionchanged:: 0.6.1
:meth:`~.fields.base.Field.to_stream` and :meth:`~.fields.base.Field.to_bytes`
throw an :class:`~.errors.ArraySizeError` if ``count`` is set and the iterable
passed in is too long. Due to a bug it used to be ignored when dumping.
.. versionchanged:: 0.7.0
:attr:`.size` is set if ``component.size`` is defined and ``count`` is an
integer constant.
"""
def __init__(
self,
component: Field[T],
*,
count: _Union[int, Field[int], str, None] = None,
halt_check: Optional[HaltCheckFn] = None,
**kwargs: Any
):
super().__init__(**kwargs)
self.component = component
self.halt_check = halt_check or self.should_halt
if count is None or (
isinstance(count, (int, str, Field)) and not isinstance(count, bool)
):
# The isinstance bool check is needed because `bool` is a subclass of `int`.
self.count = count
else:
raise TypeError("`count` must be an integer, string, or a `Field`.")
if isinstance(self.count, int) and component.has_fixed_size:
self._size = self.count * typing.cast(int, component.size)
def get_final_element_count(self, field_values: StrDict) -> Optional[int]:
"""Calculate the number of elements in the array based on other fields' values.
:param dict field_values:
A dict mapping field names to their deserialized values. It doesn't need to
have every value in the struct; if :attr:`count` references a field, it only
requires that field to be present here.
:return:
The expected number of elements in this array, or ``None`` if the array
doesn't have a fixed size.
:rtype: int
.. versionadded:: 0.6.1
.. versionchanged:: 0.8.0
Throws a `ConfigurationError` if this field's :attr:`count` is a `Field` but
doesn't have an assigned name.
"""
if self.count is None:
return None
if isinstance(self.count, int):
return self.count
if isinstance(self.count, Field):
name = self.count.name
if name is None:
# This will only happen if someone creates a field outside of a Struct
# and passes it to this field as the count object.
raise errors.ConfigurationError(
"`count` field for %r has no assigned name." % self,
field=self.count,
)
elif isinstance(self.count, str):
name = self.count
else:
raise TypeError(
"Unexpected type for `count`: %r" % type(self.count).__name__
)
# The number of fields in this array is a field that should already have been
# loaded.
if name not in field_values:
raise errors.FieldReferenceError(
"Array size depends on field %r but it wasn't found." % name,
field=name,
)
return typing.cast(int, field_values[name])
@staticmethod
def should_halt(
seq: "Array[T]",
stream: BinaryIO,
values: List[Optional[T]],
context: Any,
loaded_fields: StrDict,
) -> bool:
"""Determine if the deserializer should stop reading from the input.
This function should return ``True`` to indicate loading for this field should
stop, or ``False`` to continue adding elements.
The default implementation does the following:
- If ``count`` is an integer, it compares ``count`` against the length of
``values``. If ``len(values)`` is equal to or more than ``count`` it'll return
``True`` (halt), ``False`` otherwise.
- If ``count`` is a :class:`~binobj.fields.base.Field`, that field should
already have been loaded and in ``loaded_fields``. The expected array size is
taken from there, and compared as above.
- If ``count`` is a string, it's the name of a field already loaded and in
``loaded_fields``. The expected array size is taken from there, and compared
as above.
- Otherwise, the function assumes the array ends at EOF and only returns
``True`` if there's no more data in the stream.
Subclasses' implementations must handle all four cases.
:param Array seq:
The sequence being checked.
:param BinaryIO stream:
The data stream to read from. Except in rare circumstances, this is the same
stream that was passed to :meth:`~.fields.base.Field.from_stream`. The
stream pointer should be returned to its original position when the function
exits.
:param list values:
A list of the objects that have been deserialized so far. In general this
function *should not* modify the list. A possible exception to this rule is
to remove a sentinel value from the end of the list.
:param context:
The ``context`` object passed to :meth:`~.fields.base.Field.from_stream`.
:param dict loaded_fields:
The fields in the struct that have been loaded so far.
:return: ``True`` if the deserializer should stop reading, ``False``
otherwise.
:rtype: bool
.. versionchanged:: 0.8.0
The default implementation now throws :class:`~.errors.UndefinedSizeError`
if the length of the array couldn't be determined. Previously this would
crash with a :class:`TypeError`.
"""
if seq.count is not None:
count = seq.get_final_element_count(loaded_fields)
if count is None:
# Theoretically this should never happen, as get_final_element_count()
# should only return None if seq.count is None.
raise errors.UndefinedSizeError(field=seq)
return count <= len(values)
# Else: count is None. Our only option is to check to see if we hit EOF.
offset = stream.tell()
try:
return stream.read(1) == b""
finally:
stream.seek(offset)
def _do_dump(
self,
stream: BinaryIO,
data: Iterable[Optional[T]],
context: Any,
all_fields: StrDict,
) -> None:
"""Convert the given data into bytes and write it to ``stream``.
:param BinaryIO stream:
A binary stream to write the serialized data into.
:param iterable data:
An iterable of values to dump.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict all_fields:
A dictionary of the fields about to be dumped. This is guaranteed to not be
``None``.
"""
n_elems = self.get_final_element_count(all_fields)
if not isinstance(data, collections.abc.Sized):
self._dump_unsized(stream, data, n_elems, context, all_fields)
return
if n_elems is not None and len(data) != n_elems:
raise errors.ArraySizeError(
field=self, n_expected=n_elems, n_given=len(data)
)
for value in iter(data):
self.component.to_stream(stream, value, context, all_fields)
def _dump_unsized(
self,
stream: BinaryIO,
data: Iterable[Optional[T]],
n_elems: Optional[int],
context: Any,
all_fields: StrDict,
) -> None:
"""Dump an unsized iterable into the stream."""
n_written = 0
for value in data:
if n_written == n_elems:
# We've already written the requisite number of items to the stream, but
# received at least one more item. Crash.
raise errors.ArraySizeError(
field=self, n_expected=n_elems, n_given=n_written + 1
)
self.component.to_stream(
stream, value, context=context, all_fields=all_fields
)
n_written += 1
if n_elems is not None and n_written < n_elems:
raise errors.ArraySizeError(
field=self, n_expected=n_elems, n_given=n_written
)
def _do_load(
self, stream: BinaryIO, context: Any, loaded_fields: StrDict
) -> List[Optional[T]]:
"""Load a structure list from the given stream.
:param BinaryIO stream:
A bit stream to read data from.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param dict loaded_fields:
A dictionary of the fields that have already been loaded. This is guaranteed
to not be ``None``.
:return: The deserialized data.
:rtype: list
"""
result = [] # type: List[Optional[T]]
while not self.halt_check(self, stream, result, context, loaded_fields):
component = self.component.from_stream(stream, context, loaded_fields)
if component is NOT_PRESENT:
continue
result.append(component)
return result
class Nested(Field[TStruct]):
"""Used to nest one struct inside of another.
:param Type[~binobj.structures.Struct] struct_class:
The | |
<reponame>CalebF98/tbcc-moonkin-dps-simulator<filename>app/simulation.py
import numpy
import pandas as pd
import sys
import logging
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def compute_avg_dps(num_fights, intellect, crit_score, hit_score, spellpower, haste_score, is_csd, is_spellstrike, is_spellfire):
msg = f'Stats provided to sim:\n\tIntellect: {intellect}\n\tSpell Crit: {crit_score}\n\tSpell Hit: {hit_score}\n\tSpellpower: {spellpower}\n\tHaste: {haste_score}\n\tChaotic Skyfire Diamond: {is_csd}\n\tSpellstrike Set: {is_spellstrike}\n\tSpellfire Set: {is_spellfire}'
logging.info(msg)
balance_of_power = 4 # +4% Hit
focused_starlight = 4 # +4% crit for SF and Wrath
moonkin_form = 5 # +5% Crit
improved_mf = 10 # +10% Moonfire crit
starlight_wrath = True # reduce cast time by 0.5s
vengeance = True # +100% Crit damange
lunar_guidance = True # Spellpower bonus = 24% of total intellect
moonfury = 1.1 # +10% damage
wrath_of_cenarius = 1.2 # +20% Spellpower for SF | +10% SpellPower for Wrath
fight_length = 90 # in seconds
# Sets bonuses
spellfire = is_spellfire # SP bonus = +7% of total intellect
spellstrike = is_spellstrike # 5% chance to have +92sp for 10s - No ICD
windhawk = False # 8MP/5 KEK
# Meta GEM - Chaotic Skyfire Diamond
csd_equiped = is_csd
# Special Trinkets
eye_of_mag = False # Grants 170 increased spell damage for 10 sec when one of your spells is resisted.
silver_crescent = False # Use: Increases damage and healing done by magical spells and effects by up to 155 for 20 sec. (2 Min Cooldown)
scryer_gem = False # Use: Increases spell damage by up to 150 and healing by up to 280 for 15 sec. (1 Min, 30 Sec Cooldown)
quagmirran = False # Equip: Your harmful spells have a chance to increase your spell haste rating by 320 for 6 secs. (Proc chance: 10%, 45s cooldown)
essence_sapphi = False # Use: Increases damage and healing done by magical spells and effects by up to 130 for 20 sec. (2 Min Cooldown)
# Translating stats to %
# At level 70, 22.1 Spell Critical Strike Rating increases your chance to land a Critical Strike with a Spell by 1%
# At level 70, 12.6 Spell Hit Rating increases your chance to Hit with Spells by 1%. Hit cap is 202 FLAT (not including talents & buffs).
# Druids receive 1% Spell Critical Strike chance for every 79.4 points of intellect.
# Moonfire base damage : 305 to 357 Arcane damage and then an additional 600 Arcane damage over 12 sec.
MF_coeff = 0.15
MF_coeff_dot = 0.52
# Starfire base damage : 605 to 711 Arcane damage -> 658 on average
SF_coeff = 1
SF_average_damage = 658
MF_average_damage = 331
MF_average_dot_damage = 600
partial_coeff = 0.5 # For the moment, let's say that in average, partials get 50% damage reduction
sf_cast_time = 3
sf_cast_time_ng = 2.5
# Improved CotE
curse_of_the_elements = 1.13
# Apply spell haste coefficients here
# 15.77 Spell Haste Rating increases casting speed by 1%
# % Spell Haste at level 70 = (Haste Rating / 15.77)
# New Casting Time = Base Casting Time / (1 + (% Spell Haste / 100))
spell_haste = haste_score / 15.77
sf_cast_time = 3 / (1 + (spell_haste/100))
sf_cast_time_ng = 2.5 / (1 + (spell_haste/100))
# print("SF Cast time : " + str(sf_cast_time))
# print("SF NG Cast time : " + str(sf_cast_time_ng))
# Spell power calculation for fight SP + lunar guidance
if lunar_guidance:
spellpower = spellpower + 0.24 * intellect
if spellfire:
spellpower = spellpower + 0.08 * intellect
# Hit chance
# 12.6 Spell Hit Rating -> 1%
hit_chance = min(99, 83 + (hit_score/12.6) + balance_of_power )
logging.debug(f'Hit chance is : {hit_chance}')
# Crit chance
# At level 70, 22.1 Spell Critical Strike Rating -> 1%
# Druids receive 1% Spell Critical Strike chance for every 79.4 points of intellect.
MF_crit_percent = crit_score/22.1 + intellect/79.4 + improved_mf + moonkin_form + focused_starlight
SF_crit_percent = crit_score/22.1 + intellect/79.4 + + moonkin_form + focused_starlight
logging.debug(f'Moonfire crit chance is : {MF_crit_percent}')
logging.debug(f'Starfire crit chance is : {SF_crit_percent}')
logging.debug(f'Spellpower is : {spellpower}')
# Crit coeff
if csd_equiped:
crit_coeff = 2.09
else:
crit_coeff = 2
# Spellstrike bonus:
if spellstrike:
spellstrike_bonus = 92
else:
spellstrike_bonus = 0
# Prepare and launch the simulations
loop_size = num_fights # number of fights simulated
logging.info(f'Calculating average dps of {loop_size} fights, hang tight...')
average_dps = 0
n = 0
while n < loop_size:
n = n +1
logging.debug(f'Simulating fight #{n}...')
# Initialization
total_damage_done = 0
damage = 0
fight_time = 0
spellstrike_uptime = 0
ff_uptime = 0
mf_uptime = 0
is_ff_up = False
is_mf_up = False
is_ng = False
spellstrike_proc = False
ng_proc = False
# Time to kick ass and chew bubble gum
while fight_time <= fight_length:
loop_duration = 1 #GCD - can't be less, it's the rule !
damage = 0
if spellstrike_proc:
fight_spell_power = spellpower + spellstrike_bonus
else:
fight_spell_power = spellpower
# if FF not up, cast FF
if not is_ff_up:
logging.debug('Casting Faerie Fire !')
is_crit = False # can't crit on FF
damage = 0 # and no damage applied
if(numpy.random.randint(1, high = 101, size = 1) <= hit_chance):
is_hit = True
ff_uptime = 40
is_ff_up = True
# Test if spellstrike is proc
spellstrike_proc = (numpy.random.randint(1, high = 101, size = 1) <= 10)
else:
is_hit = False
logging.debug('Faerie Fire -> Resist !')
loop_duration = 1 #GCD
# if Moonfire not up, cast Moonfire
else:
if not is_mf_up:
logging.debug('Casting Moonfire !')
loop_duration = 1 #GCD because we cast a spell
# Is it a hit ?
if(numpy.random.randint(1, high = 101, size = 1) <= hit_chance):
is_hit = True
# Is it a crit ?
is_crit = (numpy.random.randint(1, high = 101, size = 1) <= MF_crit_percent)
# Is it a partial ?
if(numpy.random.randint(1, high = 101, size = 1) <= hit_chance):
damage = MF_average_damage + MF_coeff * fight_spell_power * partial_coeff
else:
damage = MF_average_damage + MF_coeff * fight_spell_power
# Apply damage
if is_crit:
damage = damage * crit_coeff
# DoT :
damage = damage + MF_average_dot_damage + (MF_coeff_dot * fight_spell_power * min(12, (fight_length - fight_time - 1))/12)
# There is a Hit ! update model
is_mf_up = True
mf_uptime = 12
else:
is_hit = False
logging.debug('Moonfire -> Resist ! ')
else:
# Cast Starfire
logging.debug('Casting Starfire !')
# Is it a hit ?
if(numpy.random.randint(1, high = 101, size = 1) <= hit_chance):
is_hit = True
# Is it a crit ?
is_crit = (numpy.random.randint(1, high = 101, size = 1) <= SF_crit_percent)
# Is it a partial ?
if(numpy.random.randint(1, high = 101, size = 1) > hit_chance):
logging.debug('Partial hit !')
damage = (SF_average_damage + (SF_coeff * fight_spell_power * wrath_of_cenarius * partial_coeff )) * moonfury
# logging.info("Damage done : " + str(damage))
else:
damage = (SF_average_damage + (SF_coeff * fight_spell_power * wrath_of_cenarius )) * moonfury
logging.debug(f'Damage done : {damage}')
if is_crit:
damage = damage * crit_coeff
else:
is_hit = False
logging.debug('Starfire -> Resist ! ')
if is_ng:
loop_duration = sf_cast_time_ng
else:
loop_duration = sf_cast_time
is_ng = False # Consume NG once SF is cast
# if there's a hit, we check Spellstrike proc
# Update time and model
fight_time = fight_time + loop_duration
ff_uptime = ff_uptime - loop_duration
mf_uptime = mf_uptime - loop_duration
# Check the timer on buffs / debuffs
spellstrike_uptime = spellstrike_uptime - loop_duration
if spellstrike_uptime <= 0:
spellstrike_proc = False
if mf_uptime <= 0:
is_mf_up = False
if ff_uptime <= 0:
is_ff_up = False
# @TODO if trinket available, activate
# Update nature's grace
if is_crit:
is_ng = True
total_damage_done = total_damage_done + damage * curse_of_the_elements
# If there is a Hit, Check if spellstrike is proc or refreshed :
if is_hit:
if numpy.random.randint(1, high = 11, size = 1) == 10:
spellstrike_proc = True
spellstrike_uptime = 10
logging.debug('Spellstrike proc !!!')
# Print output
logging.debug(f'Loop Duration: {loop_duration}')
logging.debug(f'Loop Damage: {damage}')
dps = total_damage_done / fight_time # We use fight_time here in | |
<gh_stars>10-100
import os
import time
import shutil
from tqdm import trange
import torch
from torch import nn
import torch.nn.parallel
from torch.autograd import Variable
from model import DenseNet
from tensorboard_logger import configure, log_value
class Trainer(object):
"""
The Trainer class encapsulates all the logic necessary for
training the DenseNet model. It use SGD to update the weights
of the model given hyperparameters constraints provided by the
user in the config file.
"""
def __init__(self, config, data_loader):
"""
Construct a new Trainer instance.
Params
------
- config: object containing command line arguments.
- data_loader: data iterator
"""
self.config = config
if config.is_train:
self.train_loader = data_loader[0]
self.valid_loader = data_loader[1]
else:
self.test_loader = data_loader
# network params
self.num_blocks = config.num_blocks
self.num_layers_total = config.num_layers_total
self.growth_rate = config.growth_rate
self.bottleneck = config.bottleneck
self.theta = config.compression
# training params
self.epochs = config.epochs
self.start_epoch = 0
self.best_valid_acc = 0.
self.init_lr = config.init_lr
self.lr = self.init_lr
self.is_decay = True
self.momentum = config.momentum
self.weight_decay = config.weight_decay
self.dropout_rate = config.dropout_rate
if config.lr_decay == '':
self.is_decay = False
else:
self.lr_decay = [float(x) for x in config.lr_decay.split(',')]
# other params
self.ckpt_dir = config.ckpt_dir
self.logs_dir = config.logs_dir
self.num_gpu = config.num_gpu
self.use_tensorboard = config.use_tensorboard
self.resume = config.resume
self.print_freq = config.print_freq
self.dataset = config.dataset
if self.dataset == 'cifar10':
self.num_classes = 10
elif self.dataset == 'cifar100':
self.num_classes = 100
else:
self.num_classes = 1000
# build densenet model
self.model = DenseNet(self.num_blocks, self.num_layers_total,
self.growth_rate, self.num_classes, self.bottleneck,
self.dropout_rate, self.theta)
print('[*] Number of model parameters: {:,}'.format(
sum([p.data.nelement() for p in self.model.parameters()])))
# define loss and optimizer
self.criterion = nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.init_lr,
momentum=self.momentum, weight_decay=self.weight_decay)
if self.num_gpu > 0:
self.model.cuda()
self.criterion.cuda()
# finally configure tensorboard logging
if self.use_tensorboard:
tensorboard_dir = self.logs_dir + self.get_model_name()
print('[*] Saving tensorboard logs to {}'.format(tensorboard_dir))
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
configure(tensorboard_dir)
def train(self):
"""
Train the model on the training set.
A checkpoint of the model is saved after each epoch
and if the validation accuracy is improved upon,
a separate ckpt is created for use on the test set.
"""
# switch to train mode for dropout
self.model.train()
# load the most recent checkpoint
if self.resume:
self.load_checkpoint(best=False)
for epoch in trange(self.start_epoch, self.epochs):
# decay learning rate
if self.is_decay:
self.anneal_learning_rate(epoch)
# train for 1 epoch
self.train_one_epoch(epoch)
# evaluate on validation set
valid_acc = self.validate(epoch)
is_best = valid_acc > self.best_valid_acc
self.best_valid_acc = max(valid_acc, self.best_valid_acc)
self.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_valid_acc': self.best_valid_acc}, is_best)
def test(self):
"""
Test the model on the held-out test data.
This function should only be called at the very
end once the model has finished training.
"""
# switch to test mode for dropout
self.model.eval()
accs = AverageMeter()
batch_time = AverageMeter()
# load the best checkpoint
self.load_checkpoint(best=True)
tic = time.time()
for i, (image, target) in enumerate(self.test_loader):
if self.num_gpu > 0:
image = image.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(image)
target_var = torch.autograd.Variable(target)
# forward pass
output = self.model(input_var)
# compute loss & accuracy
acc = self.accuracy(output.data, target)
accs.update(acc, image.size()[0])
# measure elapsed time
toc = time.time()
batch_time.update(toc-tic)
# print to screen
if i % self.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Test Acc: {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(self.test_loader), batch_time=batch_time,
acc=accs))
print('[*] Test Acc: {acc.avg:.3f}'.format(acc=accs))
def train_one_epoch(self, epoch):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
batch_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
tic = time.time()
for i, (image, target) in enumerate(self.train_loader):
if self.num_gpu > 0:
image = image.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(image)
target_var = torch.autograd.Variable(target)
# forward pass
output = self.model(input_var)
# compute loss & accuracy
loss = self.criterion(output, target_var)
acc = self.accuracy(output.data, target)
losses.update(loss.data[0], image.size()[0])
accs.update(acc, image.size()[0])
# compute gradients and update SGD
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure elapsed time
toc = time.time()
batch_time.update(toc-tic)
# print to screen
if i % self.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Train Loss: {loss.val:.4f} ({loss.avg:.4f})\t'
'Train Acc: {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(self.train_loader), batch_time=batch_time,
loss=losses, acc=accs))
# log to tensorboard
if self.use_tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', accs.avg, epoch)
def validate(self, epoch):
"""
Evaluate the model on the validation set.
"""
batch_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
tic = time.time()
for i, (image, target) in enumerate(self.valid_loader):
if self.num_gpu > 0:
image = image.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(image)
target_var = torch.autograd.Variable(target)
# forward pass
output = self.model(input_var)
# compute loss & accuracy
loss = self.criterion(output, target_var)
acc = self.accuracy(output.data, target)
losses.update(loss.data[0], image.size()[0])
accs.update(acc, image.size()[0])
# measure elapsed time
toc = time.time()
batch_time.update(toc-tic)
# print to screen
if i % self.print_freq == 0:
print('Valid: [{0}/{1}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Valid Loss: {loss.val:.4f} ({loss.avg:.4f})\t'
'Valid Acc: {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(self.valid_loader), batch_time=batch_time,
loss=losses, acc=accs))
print('[*] Valid Acc: {acc.avg:.3f}'.format(acc=accs))
# log to tensorboard
if self.use_tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', accs.avg, epoch)
return accs.avg
def save_checkpoint(self, state, is_best):
"""
Save a copy of the model so that it can be loaded at a future
date. This function is used when the model is being evaluated
on the test data.
Furthermore, the model with the highest accuracy is saved as
with a special name.
"""
print("[*] Saving model to {}".format(self.ckpt_dir))
filename = self.get_model_name() + '_ckpt.pth.tar'
ckpt_path = os.path.join(self.ckpt_dir, filename)
torch.save(state, ckpt_path)
if is_best:
filename = self.get_model_name() + '_model_best.pth.tar'
shutil.copyfile(ckpt_path,
os.path.join(self.ckpt_dir, filename))
print("[*] ==== Best Valid Acc Achieved ====")
def load_checkpoint(self, best=False):
"""
Load the best copy of a model. This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Params
------
- best: if set to True, loads the best model. Use this if you want
to evaluate your model on the test data. Else, set to False in
which case the most recent version of the checkpoint is used.
"""
print("[*] Loading model from {}".format(self.ckpt_dir))
filename = self.get_model_name() + '_ckpt.pth.tar'
if best:
filename = self.get_model_name() + '_model_best.pth.tar'
ckpt_path = os.path.join(self.ckpt_dir, filename)
ckpt = torch.load(ckpt_path)
# load variables from checkpoint
self.start_epoch = ckpt['epoch']
self.best_valid_acc = ckpt['best_valid_acc']
self.model.load_state_dict(ckpt['state_dict'])
print("[*] Loaded {} checkpoint @ epoch {} with best valid acc of {:.3f}".format(
filename, ckpt['epoch'], ckpt['best_valid_acc']))
def anneal_learning_rate(self, epoch):
"""
This function decays the learning rate at 2 instances.
- The initial learning rate is divided by 10 at
t1*epochs.
- It is further divided by 10 at t2*epochs.
t1 and t2 are floats specified by the user. The default
values used by the authors of the paper are 0.5 and 0.75.
"""
sched1 = int(self.lr_decay[0] * self.epochs)
sched2 = int(self.lr_decay[1] * self.epochs)
self.lr = self.init_lr * (0.1 ** (epoch // sched1)) \
* (0.1 ** (epoch // sched2))
# log to tensorboard
if self.use_tensorboard:
log_value('learning_rate', self.lr, epoch)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
def get_model_name(self):
"""
Returns the name of the model based on the configuration
parameters.
The name will take the form DenseNet-X-Y-Z where:
- X: total number of layers specified by `config.total_num_layers`.
- Y: can be BC or an empty string specified by `config.bottleneck`.
- Z: name of the dataset specified by `config.dataset`.
For example, given 169 layers with bottleneck on CIFAR-10, this
function will output `DenseNet-BC-169-cifar10`.
"""
if self.bottleneck:
return 'DenseNet-BC-{}-{}'.format(self.num_layers_total,
self.dataset)
return 'DenseNet-{}-{}'.format(self.num_layers_total,
self.dataset)
def accuracy(self, predicted, ground_truth):
"""
Utility function for calculating the accuracy of the model.
Params
------
- predicted: (torch.FloatTensor)
- ground_truth: (torch.LongTensor)
Returns
-------
- acc: (float) % accuracy.
"""
predicted = torch.max(predicted, 1)[1]
total = len(ground_truth)
correct = (predicted == ground_truth).sum()
acc = 100 * (correct / total)
return acc
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / | |
or "window" keyword arguments must be given and not None.'
error(msg,'gwf.taper')
#
wfarr = this.wfarr
wfarr[:,1] = window * this.wfarr[:,1]
wfarr[:,2] = window * this.wfarr[:,2]
# NOTE that objects cannot be redefined within their methods, but their properties can be changed. For this reason, the line below uses setfields() rather than gwf() to apply the taper.
this.setfields( wfarr=wfarr )
# Set this object's window
this.window = this.window * window
# Apply mask
def apply_mask( this, mask=None ):
#
if mask is None: error('the mask input must be given, and it must be index or boolean ')
#
this.setfields( this.wfarr[mask,:] )
# If desired, reset the waveform object to its original state (e.g. it's state just afer loading).
# Note that after this methed is called, the current object will occupy a different address in memory.
def reset(this): this.setfields( this.__rawgwfarr__ )
# return a copy of the current object
def copy(this):
#
from copy import deepcopy as copy
return copy(this)
# RETURN a clone the current waveform object. NOTE that the copy package may also be used here
def clone(this): return gwf(this.wfarr).meet(this)
# Interpolate the current object
def interpolate(this,dt=None,domain=None):
# Validate inputs
if (dt is None) and (domain is None):
msg = red('First "dt" or "domain" must be given. See traceback above.')
error(msg,'gwf.interpolate')
if (dt is not None) and (domain is not None):
msg = red('Either "dt" or "domain" must be given, not both. See traceback above.')
error(msg,'gwf.interpolate')
# Create the new wfarr by interpolating
if domain is None:
wfarr = intrp_wfarr(this.wfarr,delta=dt)
else:
wfarr = intrp_wfarr(this.wfarr,domain=domain)
# Set the current object to its new state
this.setfields(wfarr)
# Pad this waveform object in the time domain with zeros
def pad(this,new_length=None,where=None,apply=False,extend=True):
#
where = 'right' if where is None else where
# Pad this waveform object to the left and right with zeros
ans = this.copy() if not apply else this
if new_length is not None:
# Create the new wfarr
wfarr = pad_wfarr( this.wfarr, new_length,where=where,extend=extend )
# Confer to the current object
ans.setfields(wfarr)
#
if extend==False:
if len(ans.t)!=new_length:
error('!!!')
return ans
# Shift this waveform object in the time domain
def tshift(this,shift=None,apply=False,method=None, verbose=False):
# Pad this waveform object to the left and right with zeros
ans = this.copy() if not apply else this
if not (shift is None):
# Create the new wfarr
wfarr = tshift_wfarr( ans.wfarr, shift, method=method, verbose=verbose )
# Confer to the current object
ans.setfields(wfarr)
return ans
# Analog of the numpy ndarray conj()
def conj(this):
this.wfarr[:,2] *= -1
this.setfields()
return this
# Align the gwf with a reference gwf using a desired method
def align( this,
that, # The reference gwf object
method=None, # The alignment type e.g. phase
options=None, # Addtional options for subroutines
mask=None, # Boolean mask to apply for alignment (useful e.g. for average-phase alignment)
kind=None,
verbose=False ):
#
if that.__class__.__name__!='gwf':
msg = 'first input must be gwf -- the gwf object to alignt the current object to'
error(msg,'gwf.align')
# Set default method
if method is None:
msg = 'No method chosen. We will proceed by aligning the waveform\'s initial phase.'
warning(msg,'gwf.align')
method = ['initial-phase']
# Make sure method is list or tuple
if not isinstance(method,(list,tuple)):
method = [method]
# Make sure all methods are strings
for k in method:
if not isinstance(k,str):
msg = 'non-string method type found: %s'%k
error(msg,'gwf.align')
# Check for handled methods
handled_methods = [ 'initial-phase','average-phase' ]
for k in method:
if not ( k in handled_methods ):
msg = 'non-handled method input: %s. Handled methods include %s'%(red(k),handled_methods)
error(msg,'gwf.align')
#
if kind is None: kind = 'srtain'
# Look for phase-alignement
if 'initial-phase' in method:
this.wfarr = align_wfarr_initial_phase( this.wfarr, that.wfarr, mask=mask, )
this.setfields()
if 'average-phase' in method:
this.wfarr = align_wfarr_average_phase( this.wfarr, that.wfarr, mask=mask, verbose=verbose)
this.setfields()
#
return this
# Shift the waveform phase
def shift_phase(this,
dphi,
fromraw=False, # If True, rotate the wavefor relative to its default wfarr (i.e. __rawgwfarr__)
apply = True,
fast = False,
verbose=False):
#
from numpy import ndarray
if isinstance(dphi,(list,tuple,ndarray)):
if len(dphi)==1:
dphi = dphi[0]
else:
error( 'dphi found to be iterable of length greater than one. the method is not implemented to handle this scenario. Please loop over desired values externally.' )
if not isinstance(dphi,(float,int)):
error('input must of float or int real valued','gwf.shift_phase')
if fromraw:
wfarr = this.__rawgwfarr__
else:
wfarr = this.wfarr
#
msg = 'This function could be sped up by manually aligning relevant fields, rather than regenerating all fields which includes taking an FFT.'
if this.verbose: warning(msg,'gwf.shift_phase')
#
ans = this if apply else this.copy()
wfarr = shift_wfarr_phase( wfarr, dphi )
if fast:
ans.setfields(wfarr,setfd=False)
else:
ans.setfields(wfarr)
#
if not apply:
return ans
#
def __rotate_frame_at_all_times__( this, # The current object
like_l_multipoles, # List of available multipoles with same l
euler_alpha_beta_gamma, # List of euler angles
ref_orientation = None, # A reference orienation (useful for BAM)
transform_domain=None, # Domain of transformation ('td','fd')
verbose=False ): # Toggle for letting the people know
#
that = this.copy()
#
allowed_transform_domains = ('td','fd')
if not ( transform_domain.lower() in allowed_transform_domains ):
error('Transform domain must be in %s'%str(allowed_transform_domains))
else:
alert( 'Transforming to the coprecessing frame using %s angles.'%yellow(transform_domain.upper()),verbose=verbose )
#
if not ( ref_orientation is None ) :
error('The use of "ref_orientation" has been depreciated for this function.')
#
like_l_multipoles_dict = { (y.l,y.m): (y.wfarr if transform_domain=='td' else y.fd_wfarr) for y in like_l_multipoles }
#
rotated_wfarr = rotate_wfarrs_at_all_times( this.l,this.m, like_l_multipoles_dict, euler_alpha_beta_gamma, ref_orientation=ref_orientation )
# IF domain is frequency domain,
# THEN convert waveform array into the time domain
if transform_domain.lower() == 'fd':
from numpy import array
from scipy.fftpack import ifftshift,ifft,fft
that.raw_transformed_fd_wfarr = rotated_wfarr.copy()
f,fd_p,fd_c = rotated_wfarr.T
t = this.t
## DIAGNOSTIC PLOTTING
# if (this.l,this.m)==(2,2):
# alert('diagnostic plotting for '+red(this.kind)+': ')
# from matplotlib.pyplot import plot,show,loglog,xscale,yscale
# from numpy import sqrt
# ff = abs(f)
# loglog(ff,abs(fd_p+1j*fd_c))
# show()
# the FD rotation introduces a non-trivial phase shift
# that results in a complex term in the TD polarizations
# which must be included. As a result, the code below can be incorrect:
# td_re = ifft(ifftshift( fd_p )).real * this.df*this.n
# td_im = ifft(ifftshift( fd_c )).real * this.df*this.n
# And the correct code is
td_re_temp = ifft(ifftshift( fd_p )) * this.df*this.n
td_im_temp = ifft(ifftshift( fd_c )) * this.df*this.n
td_y = td_re_temp + 1j*td_im_temp
# Where the real valued polarizations are polarizations
td_re = td_y.real
td_im = td_y.imag
rotated_wfarr = array( [t,td_re,td_im], dtype=float ).T
# NOTE that there can be an overall time shift at this stage
# Reset related fields using the new data
that.setfields( rotated_wfarr )
#
return that
# frequency domain filter the waveform given a window state for the frequency domain
def fdfilter(this,window):
#
from scipy.fftpack import fft, fftfreq, fftshift, ifft
from numpy import floor,array,log
from matplotlib.pyplot import plot,show
#
if this.__lowpassfiltered__:
msg = 'wavform already low pass filtered'
warning(msg,'gwf.lowpass')
else:
#
fd_y = this.fd_y * window
plot( log(this.f), log( abs(this.fd_y) ) )
plot( log(this.f), log( abs(fd_y) ) )
show()
#
y = ifft( fftshift( fd_y ) )
this.wfarr[:,1],this.wfarr[:,2] = y.real,y.imag
#
this.setfields()
#
this.__lowpassfiltered__ = True
#
def __flip_cross_sign_convention__(this):
#
warning('You should not need to use this function. If you are using this functoin, please check your workflow for possible sign convention inconsistencies.')
this.wfarr[:,-1] *= -1
this.setfields()
#
def __get_derivative__(this,n=1):
#
from numpy import array
#
that = this.copy()
#
t,A,B = this.wfarr.T
#
DnA = spline_diff(t,A,n)
DnB = spline_diff(t,B,n)
#
wfarr = array([t,DnA,DnB]).T
that.setfields(wfarr)
#
if '\\psi' in that.kind:
that.kind = that.kind.replace('\\psi','D^{%i}\\psi'%n)
elif 'rh' in that.kind:
that.kind = that.kind.replace('rh','rD^{%i}h'%n)
elif 'r\dot' in that.kind:
that.kind = that.kind.replace('r\dot','rD^{%i}\dot'%n)
else:
that.kind = '$D^{%i}$'%n + that.kind
return that
#
def __get_antiderivative__(this,n=1):
return None
# Class for waveforms: Psi4 multipoles, strain multipoles (both spin weight -2), recomposed waveforms containing h+ and hx. NOTE that detector response waveforms will be left to pycbc to handle
class gwylm:
'''
Class to hold spherical multipoles of | |
#!/usr/bin/env python
import sys
import os
from matplotlib import transforms
from matplotlib.colors import get_named_colors_mapping
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, get
import numpy as np
from matplotlib.patches import Circle
from pandas.core import algorithms
from scipy import cluster
from .utils import read_file,save_seqs
from .character import Character
from .column import Column
from .item import Item
from .utils import get_coor_by_angle, link_edges, rotate, text3d
from .connect import get_connect, get_score_mat, msa
from matplotlib.patches import Circle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
import mpl_toolkits.mplot3d.art3d as art3d
import seaborn as sns
from matplotlib.lines import Line2D
from matplotlib.patches import Arc, RegularPolygon
from numpy import radians as rad
import math
import re
import time
import pandas as pd
import pathlib
import os
from scipy.spatial import distance
from scipy.cluster import hierarchy
from scipy.cluster.hierarchy import dendrogram, linkage
from mpl_toolkits.axes_grid1 import make_axes_locatable
from .utils import grouping,detect_seq_type
from .logobits import compute_bits, compute_prob
from .colors import get_color_scheme
from .version import __version__
basic_dna_color = get_color_scheme('basic_dna_color')
class Logo(Item):
def __init__(self, bits, ax = None, start_pos=(0,0), logo_type='Horizontal', column_width=1,
column_margin_ratio=0.1, char_margin_ratio = 0.1, parent_start = (0,0), origin = (0,0), id='',
help_color='b', color=basic_dna_color, limited_char_width=None, path_dict={}, *args, **kwargs):
super(Logo, self).__init__(*args, **kwargs)
self.bits = bits
self.start_pos = start_pos
self.logo_type = logo_type
self.parent_start = parent_start
self.column_margin_ratio = column_margin_ratio
self.char_margin_ratio = char_margin_ratio
self.column_width = column_width
self.origin = origin
self.id = id
self.color = color
self.help_color = help_color
self.columns = []
self.limited_char_width = limited_char_width
self.path_dict = path_dict
if ax == None:
self.ax = self.generate_ax(threed=(self.logo_type=='Threed'))
else:
self.ax = ax
if limited_char_width == None:
self.limited_char_width = self.get_limited_char_width()
self.generate_components()
def generate_components(self):
for index,bit in enumerate(self.bits):
chars = [x[0] for x in bit]
weights = [x[1] for x in bit]
if chars == []:
chars = ['-']
weights = [0]
column = Column(chars,weights,ax=self.ax,width=self.column_width,logo_type=self.logo_type,
origin=self.origin, color=self.color, char_margin_ratio=self.char_margin_ratio,
limited_char_width=self.limited_char_width,path_dict=self.path_dict)
self.columns.append(column)
def draw(self):
self.compute_positions()
for col in self.columns:
col.draw()
def draw_help(self,show_id=True,group_id_size=10, **kwargs):
if self.logo_type == 'Threed':
self.draw_3d_help(show_id=show_id, group_id_size=group_id_size, **kwargs)
if self.logo_type == 'Horizontal':
self.draw_hz_help(show_id=show_id, group_id_size=group_id_size,**kwargs)
if self.logo_type == 'Circle':
self.draw_circle_help(show_id=show_id, group_id_size=group_id_size,**kwargs)
if self.logo_type == 'Radiation':
self.draw_rad_help(show_id=show_id, group_id_size=group_id_size,**kwargs)
def draw_rad_help(self, show_id=True, group_id_size=10, **kwargs):
if show_id:
label_radius = (self.start_pos[0] + self.get_width() )
label_x = label_radius * np.cos(self.deg)
label_y = label_radius * np.sin(self.deg)
self.id_txt = self.ax.text(label_x,label_y, f'{self.id}',rotation=math.degrees(self.deg),fontsize=group_id_size)
def draw_hz_help(self,show_id=True,group_id_size=10, **kwargs):
if show_id:
#self.id_txt = self.ax.text(self.get_width() + 0.5, self.start_pos[1]+0.1,
self.id_txt = self.ax.text(self.get_width() + 0.5, self.start_pos[1]+self.get_height()*0.1,
f"{self.id}", fontsize=group_id_size, clip_on=True)#,bbox={'fc': '0.8', 'pad': 0})
def draw_circle_help(self,show_id=True, group_id_size=10,draw_arrow=False,**kwargs):
self.ax.add_patch(Circle(self.parent_start,self.radius,linewidth=1,fill=False,edgecolor='grey',alpha=0.5))
space_deg = self.degs[0] + (self.degs[-1] - self.degs[0])/2
space_coor = get_coor_by_angle(self.radius ,space_deg)
self.ax.scatter(space_coor[0],space_coor[1],color=self.help_color)
space_coor2 = get_coor_by_angle(self.radius + self.get_height() ,space_deg)
#self.ax.plot([self.parent_start[0],space_coor[0]],[self.parent_start[1],space_coor[1]],zorder=-1)
self.ax.plot([space_coor[0],space_coor2[0]],[space_coor[1],space_coor2[1]],zorder=-1,color='grey')
if draw_arrow == True:
self.ax.plot([self.origin[0],space_coor[0]],[self.origin[1],space_coor[1]],zorder=-1,color='grey')
arc = Arc(self.origin,self.radius,self.radius,angle=270,
theta1=0,theta2=180,capstyle='round',linestyle='-',lw=2,color='black')
self.ax.add_patch(arc)
endX = 0
endY = -self.radius/2
self.ax.add_patch( #Create triangle as arrow head
RegularPolygon(
(endX, endY), # (x,y)
3, # number of vertices
self.radius/9, # radius
rad(30+180), # orientation
color='black'
)
)
def draw_3d_help(self,z_height_3d=2, show_id=True, group_id_size=10,**kwargs):
if show_id:
self.ax.text(0, self.start_pos[2], z_height_3d, f'{self.id}', 'z',fontsize=group_id_size)
def compute_positions(self):
if self.logo_type == 'Circle':
self.column_margin_ratio = 0
self.radius = np.sqrt((self.start_pos[0]-self.parent_start[0])**2 + (self.start_pos[1]-self.parent_start[1])**2)
self.each_deg = 2*np.pi / len(self.bits)
width = 2 * self.radius * np.tan(self.each_deg/2)
width = width * 0.95
self.column_width = width
degs = [x*self.each_deg + np.pi/2 for x in range(len(self.bits))]
degs = degs[::-1]
degs = [degs.pop()] + degs
self.degs = degs
start_pos = self.start_pos
for index,col in enumerate(self.columns):
col.set_parent_start(self.start_pos)
if self.logo_type == 'Horizontal':
col.set_start_pos(start_pos)
col.compute_positions()
start_pos = (start_pos[0] + col.get_width() * (1+self.column_margin_ratio), start_pos[1])
elif self.logo_type == 'Circle':
start_pos = get_coor_by_angle(self.radius, self.degs[index], self.parent_start)
col.set_start_pos(start_pos)
col.set_deg(self.degs[index])
col.set_width(self.column_width)
col.compute_positions()
elif self.logo_type == 'Radiation':
col.set_start_pos(start_pos)
col.set_deg(self.deg)
col.set_radiation_space(self.radiation_space)
col.compute_positions()
start_pos = (start_pos[0] + col.get_width() *(1+self.column_margin_ratio), start_pos[1])
elif self.logo_type == 'Threed':
col.set_start_pos(start_pos)
col.compute_positions()
start_pos = (start_pos[0] + col.get_width() *(1+self.column_margin_ratio), start_pos[1], start_pos[2])
else:
pass
def get_height(self):
return max([col.get_height() for col in self.columns]+[0])
def get_width(self):
return sum([col.get_width() *(1+self.column_margin_ratio) for col in self.columns])
class LogoGroup(Item):
def __init__(self, seqs, ax=None, group_order='length', group_strategy='length', group_resolution=0.5,
clustering_method = 'max', min_length = 0, max_length = 100,
start_pos = (0,0), logo_type = 'Horizontal', init_radius=1,
logo_margin_ratio = 0.1, column_margin_ratio = 0.05, char_margin_ratio = 0.05,
align = True, align_metric='sort_consistency', connect_threshold=0.8,
radiation_head_n = 5, threed_interval = 4, color = basic_dna_color, task_name='MetaLogo',
x_label = 'Position', y_label = 'bits',z_label = 'bits', show_grid = True, show_group_id = True,
display_range_left = 0, display_range_right = -1,
hide_left_axis=False, hide_right_axis=False, hide_top_axis=False, hide_bottom_axis=False,
hide_x_ticks=False, hide_y_ticks=False, hide_z_ticks=False,
title_size=20, label_size=10, tick_size=10, group_id_size=10,align_color='blue',align_alpha=0.1,
figure_size_x=-1, figure_size_y=-1,gap_score=-1, padding_align=False, hide_version_tag=False,
sequence_type = 'auto', height_algorithm = 'bits',omit_prob = 0,
seq_file = '', seq_file_type = 'fasta', fa_output_dir = '.', output_dir = '.', uid = '',
withtree = False,group_limit=20,
clustalo_bin = '', fasttree_bin = '', fasttreemp_bin = '', treecluster_bin = '',
auto_size=True,
*args, **kwargs):
super(LogoGroup, self).__init__(*args, **kwargs)
self.seqs = seqs
self.seq_file = seq_file
self.seq_file_type = seq_file_type
self.min_length = 0
self.max_length = 100
self.target_sequence = None
self.group_order = group_order
self.group_strategy = group_strategy
self.group_resolution = float(group_resolution)
self.start_pos = start_pos
self.logo_margin_ratio = logo_margin_ratio
self.column_margin_ratio = column_margin_ratio
self.char_margin_ratio = char_margin_ratio
self.logo_type = logo_type
self.init_radius = init_radius
self.radiation_head_n = 5
self.threed_interval = threed_interval
self.align = align
self.ceiling_pos = (0,1)
self.align_metric = align_metric
self.connect_threshold = connect_threshold
self.color = color
self.task_name = task_name
self.height_algorithm = height_algorithm
self.omit_prob = omit_prob
self.align_color = align_color
self.align_alpha = align_alpha
self.padding_align = padding_align
self.display_range_left = display_range_left
self.display_range_right = display_range_right
self.gap_score = gap_score
self.hide_left_axis = hide_left_axis
self.hide_right_axis = hide_right_axis
self.hide_bottom_axis = hide_bottom_axis
self.hide_top_axis = hide_top_axis
self.hide_x_ticks = hide_x_ticks
self.hide_y_ticks = hide_y_ticks
self.hide_z_ticks = hide_z_ticks
self.x_label = x_label
self.y_label = y_label
self.z_label = z_label
self.tick_size = tick_size
self.title_size = title_size
self.label_size = label_size
self.group_id_size = group_id_size
self.show_group_id = show_group_id
self.show_grid = show_grid
self.figure_size_x = figure_size_x
self.figure_size_y = figure_size_y
self.hide_version_tag = hide_version_tag
self.clustalo_bin = clustalo_bin
self.fasttree_bin = fasttree_bin
self.fasttreemp_bin = fasttreemp_bin
self.treecluster_bin = treecluster_bin
self.fa_output_dir = fa_output_dir
self.output_dir = output_dir
self.uid = uid
self.clustering_method = clustering_method
self.withtree = withtree
self.group_limit = group_limit
self.auto_size = auto_size
if (self.seqs is None) and (not os.path.exists(self.seq_file)):
print('No sequences provided')
self.error = 'No sequences detected'
return
if (self.seqs is None) and os.path.exists(self.seq_file):
seq_dict,seqnames,seqname_dict = read_file(self.seq_file, self.seq_file_type)
self.seqname_dict = seqname_dict
if len(seqnames) == 0:
print('No sequences detected')
self.error = 'No sequences detected'
return
if (len(seq_dict[seqnames[0]]) < self.min_length) or (len(seq_dict[seqnames[0]]) > self.max_length):
print('The first sequence not satisfied the length limit')
self.error = 'The first sequence not satisfied the length limit'
return
seqs = [[seqname,seq_dict[seqname]] for seqname in seqnames if (len(seq_dict[seqname])>self.min_length) and (len(seq_dict[seqname])<self.max_length)]
target_sequence = seq_dict[seqnames[0]]
if len(seqs) == 0:
print('No sequences left after length filter')
self.error = 'No sequences left after length filter'
return
self.seqs = seqs
self.target_sequence = target_sequence
elif self.seqs is not None:
self.target_sequence = self.seqs[0][1]
print('target_sequence: ', self.target_sequence)
if self.seqs is not None:
if not os.path.exists(self.seq_file):
if self.seq_file != '':
save_seqs(self.seqs,self.seq_file)
else:
save_seqs(self.seqs,f'{self.fa_output_dir}/server.{self.uid}.fasta')
if self.seq_file_type.lower() in ['fastq','fq']:
save_seqs(self.seqs,f'{self.fa_output_dir}/server.{self.uid}.fasta')
self.seq_file = f'server.{self.uid}.fasta'
if sequence_type == 'auto':
self.sequence_type = detect_seq_type(self.seqs)
else:
self.sequence_type = sequence_type
self.check_dep()
if hasattr(self,'error'):
return
self.logos = []
self.prepare_bits()
if hasattr(self,'error'):
return
if ax is None:
withtree = False
if (len(self.group_ids) > 1) and (self.withtree) and (self.logo_type == 'Horizontal') and ( self.group_strategy == 'auto' or (self.align and self.padding_align)):
withtree = True
self.generate_ax(threed=(self.logo_type=='Threed'),withtree=withtree)
else:
self.ax = ax
self.generate_components()
def check_dep(self):
if not os.path.exists(self.clustalo_bin):
err = 'Clustal omega not found'
print(err)
self.error = err
elif not os.path.exists(self.fasttree_bin):
err = 'FastTree not found'
print(err)
self.error = err
elif not os.path.exists(self.fasttreemp_bin):
err = 'FastTreeMP not found'
print(err)
self.error = err
else:
pass
return
def prepare_bits(self):
self.groups = grouping(self.seqs,seq_file=self.seq_file,sequence_type=self.sequence_type,group_by=self.group_strategy,
group_resolution=self.group_resolution,clustering_method=self.clustering_method,
clustalo_bin=self.clustalo_bin,fasttree_bin=self.fasttree_bin,fasttreemp_bin=self.fasttreemp_bin,treecluster_bin=self.treecluster_bin,
uid=self.uid,fa_output_dir=self.fa_output_dir,figure_output_dir=self.output_dir)
self.raw_group_count = len(self.groups)
self.target_group = None
for grpid in self.groups:
for name,seq in self.groups[grpid]:
if seq.replace('-','') == self.target_sequence.replace('-',''):
self.target_group = grpid
break
if self.target_group is not None:
break
print('target_group: ',self.target_group)
if self.group_strategy.lower() == 'identifier':
for group_id in self.groups:
seqs = self.groups[group_id]
if len(set([len(x[1]) for x in seqs])) > 1:
print('Sequence lengths not same in one group')
self.error = 'In identifier-grouping mode, sequence lengths should be the same in one group'
return
if len(self.groups) > self.group_limit :
new_groups = {}
sorted_groups = sorted(self.groups.items(),key=lambda d:len(d[1]),reverse=True)
for gid,group in sorted_groups[:max(0,self.group_limit-1)]:
new_groups[gid] = group
if self.target_group is not None:
new_groups[self.target_group] = self.groups[self.target_group]
else:
if self.group_limit > 0 :
add_grp_id,add_grp = sorted_groups[self.group_limit-1]
new_groups[add_grp_id] = add_grp
self.groups = new_groups
self.probs = compute_prob(self.groups,threshold=self.omit_prob)
if self.height_algorithm == 'probabilities':
self.seq_bits = self.probs.copy()
seq_bits = {}
for key in self.probs:
seq_bits[key] = []
for pos in self.probs[key]:
item = []
| |
'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861856214':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861856215':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861856216':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861856217':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861856218':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861856219':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861867194':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861864518':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861867192':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861866070':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86186288':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'86186281':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86186280':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86186283':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86186282':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861865144':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861867198':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861865145':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86186062':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861848758':{'en': 'Li<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861848759':{'en': 'Nu<NAME>', 'zh': u('\u4e91\u5357\u7701\u6012\u6c5f\u5088\u50f3\u65cf\u81ea\u6cbb\u5dde')},
'861848756':{'en': 'Li<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861848757':{'en': 'Li<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861848754':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861848755':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861848752':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861848753':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861848750':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861848751':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861840932':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861840933':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')},
'861840930':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861840931':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861840936':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5f20\u6396\u5e02')},
'861840937':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861840934':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861840935':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861840938':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861840939':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')},
'861863919':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02\u6d4e\u6e90\u5e02')},
'861855935':{'en': 'N<NAME>', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861860884':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861860885':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861860886':{'en': 'Nujiang, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6012\u6c5f\u5088\u50f3\u65cf\u81ea\u6cbb\u5dde')},
'861860887':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u8fea\u5e86\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861860880':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861860881':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861860882':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861860883':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861865020':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861860888':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861860889':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861865023':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'86185718':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86185716':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86185717':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86185715':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861860600':{'en': '<NAME>ian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861867278':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861867279':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861855798':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861855799':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855828':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855829':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855826':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855827':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855824':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855825':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855822':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861855823':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861855820':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861855821':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861840756':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861840757':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861840754':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861840755':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861840752':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861840753':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861840750':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861840751':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861840758':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861840759':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861853433':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861853432':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861853431':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861853430':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861853437':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861853436':{'en': 'Linfen, Shanxi', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861853435':{'en': 'Linfen, Shanxi', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861853434':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861853438':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861839392':{'en': 'Dingxi, Gansu', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861839393':{'en': 'Pingliang, Gansu', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')},
'861839390':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861839391':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861839396':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')},
'861839397':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861839394':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861839395':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861839398':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861839399':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861850520':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861850521':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861850522':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861850523':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861850524':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861850525':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861850526':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861850527':{'en': 'Su<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861850528':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861850529':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861866669':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861867606':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861847810':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861838561':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861858719':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861858718':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861858717':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861858716':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861858715':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861858714':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861858713':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861858712':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861858711':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861858710':{'en': 'Deqen, Yunnan', 'zh': u('\u4e91\u5357\u7701\u8fea\u5e86\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861867604':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857536':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857537':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857534':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857535':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857532':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857533':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857530':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861857531':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857538':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861857539':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861856919':{'en': 'Zhangjiajie, Hunan', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'861856918':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861867603':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861856913':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856912':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856911':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856910':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856917':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856916':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856915':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861856914':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861846928':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861862644':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861862645':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861862646':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861862647':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861862640':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861862641':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861855953':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861862643':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861862648':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861862649':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861860326':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861864993':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861864990':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861864991':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861839152':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861864997':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861864994':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861863369':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861864998':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861864999':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861865478':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861865479':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861867600':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'86186498':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861839153':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'86186490':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'86186491':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'86186492':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'86186496':{'en': 'Xiamen, Fujian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86186497':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861866443':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861855956':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861863363':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861865476':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861865477':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861839155':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'86185521':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86185520':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86185523':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861860278':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'86185528':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861855954':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861839947':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861839946':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861839945':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861839944':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861843638':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861839942':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861839941':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861839940':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861843634':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861843635':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861843636':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861843637':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861843630':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861843631':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861843632':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861839948':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5410\u9c81\u756a\u5730\u533a')},
'861865645':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861864325':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861865647':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861865646':{'en': 'LiuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861865641':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861865640':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861865643':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861864324':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861839157':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861865649':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861864327':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861864326':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861860496':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'861864321':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'8618440':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861864320':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861841984':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861853073':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861841986':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861841987':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861841980':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861841981':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861841982':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861857025':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861839158':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861839494':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')},
'861839497':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')},
'861839496':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')},
'861841988':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u7518\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861841989':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861839493':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861839492':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861853070':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'86184389':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'86184386':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861853077':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'86184383':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'86184380':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861853076':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861857022':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u6e58\u6f6d\u5e02')},
'861839159':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861853074':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861854302':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')},
'861860494':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861846968':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861866445':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861848419':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861848418':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861848411':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861848410':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861848413':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861848412':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861848415':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861848414':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861848417':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861848416':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838548':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861838549':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861838540':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838541':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838542':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838543':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838544':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838545':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861838546':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861838547':{'en': 'Qiannan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861840583':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861840582':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861840581':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861840580':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861840587':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861840586':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861840585':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861840584':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861840589':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861840588':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861855589':{'en': 'LiuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861855588':{'en': 'LiuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861866938':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861866939':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861855581':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861855580':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861855583':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861839232':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861839235':{'en': 'XiAn, Shaanxi', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.