code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import re
from dataclasses import dataclass
from enum import IntEnum
from typing import Tuple, Sequence, Literal
from lxml import etree
from .base import FYGClient, ReadType, VS, ClickType, LimitReachedError
from .character import EquipConfig, Role
from .items import Equipment, grade_from_class
_exp = re.compile(r"获得了 (\d+) ([^<]+)")
_base = re.compile(r"基准值:(\d+),随机范围([0-9.]+)-([0-9.]+)倍")
_lv_pattern = re.compile(r"Lv\.(\d+)(?:\((\d+)%\))?")
_spaces = re.compile(r"\s+")
_icon_class_map = {
"icon icon-bolt text-danger fyg_f14": "AD",
"icon icon-bolt text-primary fyg_f14": "AP",
"icon icon-bolt text-warning fyg_f14": "TD",
"icon icon-minus text-danger fyg_f14": "HP_lose",
"icon icon-minus text-info fyg_f14": "ES_lose",
"icon icon-plus text-danger fyg_f14": "HP_health",
"icon icon-plus text-info fyg_f14": "ES_health",
}
CreepType = Literal["铁皮木人", "嗜血的迅捷蛛", "魔灯之灵", "憨憨的食铁兽"]
class PKRank(IntEnum):
C = 1
CC = 2
CCC = 3
B = 4
BB = 5
BBB = 6
A = 7
AA = 8
AAA = 9
S = 10
SS = 11
SSS = 12
@dataclass(slots=True)
class PKInfo:
"""争夺战场里的段位、体力等等状态"""
rank: PKRank # 当前所在段位
progress: int # 段位进度
power: int # 今日体力
strengthen: int # 野怪附加强度
@property
def base_trophy(self):
"""该状态下的搜刮奖励基准值,与段位和进度有关"""
x = self.rank ** 2 * 200
p = self.progress
return x * p * 0.0025 + p * 25 + x
@dataclass(eq=False, slots=True)
class Trophy:
"""一次搜刮资源的结果"""
value: int # 数值
type: str # 资源类型
base: int # 基准值
range: Tuple[float, float] # 范围
@dataclass(eq=False, slots=True)
class Player:
"""战斗中的玩家"""
name: str # 名字
role: Role # 职业(卡片)
leval: int # 等级
equipment: EquipConfig # 装备
@dataclass(slots=True)
class Creep:
"""战斗中的野怪"""
type: CreepType # 名字
leval: int # 等级
strengthen: float # 强度
# 元组的两个元素是名字和数量,比如 星芒 15! = ("星芒", 15),没有数量的为 0
_StateList = Sequence[tuple[str, int]]
@dataclass(eq=False, slots=True)
class Action:
is_attack: bool # 是攻击方?
state: _StateList # 技能和状态
HP: int = 0 # 血量
ES: int = 0 # 护盾
AD: int = None # 物理攻击(未计算护甲)
AP: int = None # 魔法攻击(未计算护甲)
TD: int = None # 绝对攻击
HP_lose: int = None # 生命值损失
ES_lose: int = None # 护盾损失
HP_health: int = None # 生命值回复
ES_health: int = None # 护盾回复
# 表示一次交手,左边是自己右边是对方。
Round = tuple[Action, Action]
@dataclass(eq=False, slots=True)
class Battle:
"""表示一场战斗"""
player: Player # 自己
enemy: Player | Creep # 敌人
is_win: bool # 己方胜利
actions: Sequence[Round] # 过程
def _parse_fighter(equips, info):
h3 = info.getchildren()[0]
lv, role = _spaces.split(h3.getchildren()[0].tail)
if role.startswith("Lv"):
lv, role = role, lv
match = _lv_pattern.match(lv)
if role == "野怪":
level, strengthen = match.groups()
return Creep(h3.text, int(level), int(strengthen) / 100)
# TODO: 如果装备不齐?最近不能注册新号,没法测。
e = []
for button in equips.iterchildren():
grade = grade_from_class(button)
name = button.get("title")
level = int(button.getchildren()[0].tail)
e.append(Equipment(grade, name, level, None, None))
ec = EquipConfig(*e)
return Player(h3.text, role, int(match.group(1)), ec)
def _parse_values(action, icon_col, col2):
for icon in icon_col.iterfind("p/i"):
key = _icon_class_map[icon.get("class")]
setattr(action, key, int(icon.text))
es, hp = col2.xpath("span/text()")
action.ES, action.HP = int(es), int(hp)
def _parse_state(values):
for x in values:
x = x.rstrip("!").split(" ")
c = x[1] if len(x) > 1 else 0
yield x[0], int(c)
class PKApi:
def __init__(self, api: FYGClient):
self.api = api
def get_info(self):
"""获取段位、体力等基本信息"""
html = self.api.fyg_read(ReadType.PK)
html = etree.HTML(html)
spans = html.findall(".//span")
return PKInfo(
PKRank[spans[0].text],
int(spans[1].text[:-1]),
int(spans[-2].text),
int(spans[-1].text),
)
def battle(self, target: VS):
"""
战斗,攻击野怪或抢夺其他玩家的进度。
:param target: 打谁
:return: 战斗结果
"""
html = etree.HTML(self.api.fyg_v_intel(target))
rows = html.findall("body/div/div")
fs = rows[0].findall("div/div[1]/div[1]")
player = _parse_fighter(*fs[0].getchildren())
enemy = _parse_fighter(*reversed(fs[1].getchildren()))
actions = []
for i in range(1, len(rows) - 2, 3):
p1 = rows[i].find("div[1]/p")
attack = "bg-special" in p1.get("class")
s1 = p1.xpath("i/b/text()")
s2 = rows[i].xpath("div[2]/p/i/b/text()")
act1 = Action(attack, tuple(_parse_state(s1)))
act2 = Action(not attack, tuple(_parse_state(s2)))
h = rows[i + 1].getchildren()
la, ls, rs, ra = h
_parse_values(act1, la, ls)
_parse_values(act2, ra, rs)
actions.append((act1, act2))
win = "smile" in rows[-1].find("div[2]/div/i").get("class")
return Battle(player, enemy, win, actions)
def pillage(self):
"""搜刮资源"""
html = self.api.fyg_click(ClickType.Pillage)
match1 = _exp.search(html)
if match1 is None:
raise LimitReachedError("没有体力了")
match2 = _base.search(html)
min_, max_ = match2.groups()[1:]
return Trophy(
int(match1.group(1)),
match1.group(2),
int(match2.group(1)),
(float(min_), float(max_)),
)
def rejuvenate(self):
"""恢复体力到 100,固定消耗 20 星沙"""
text = self.api.fyg_click(ClickType.Rejuvenate)
if text != "体力已刷新。":
raise LimitReachedError("星沙不够") | guguzhen/api/pk.py | import re
from dataclasses import dataclass
from enum import IntEnum
from typing import Tuple, Sequence, Literal
from lxml import etree
from .base import FYGClient, ReadType, VS, ClickType, LimitReachedError
from .character import EquipConfig, Role
from .items import Equipment, grade_from_class
_exp = re.compile(r"获得了 (\d+) ([^<]+)")
_base = re.compile(r"基准值:(\d+),随机范围([0-9.]+)-([0-9.]+)倍")
_lv_pattern = re.compile(r"Lv\.(\d+)(?:\((\d+)%\))?")
_spaces = re.compile(r"\s+")
_icon_class_map = {
"icon icon-bolt text-danger fyg_f14": "AD",
"icon icon-bolt text-primary fyg_f14": "AP",
"icon icon-bolt text-warning fyg_f14": "TD",
"icon icon-minus text-danger fyg_f14": "HP_lose",
"icon icon-minus text-info fyg_f14": "ES_lose",
"icon icon-plus text-danger fyg_f14": "HP_health",
"icon icon-plus text-info fyg_f14": "ES_health",
}
CreepType = Literal["铁皮木人", "嗜血的迅捷蛛", "魔灯之灵", "憨憨的食铁兽"]
class PKRank(IntEnum):
C = 1
CC = 2
CCC = 3
B = 4
BB = 5
BBB = 6
A = 7
AA = 8
AAA = 9
S = 10
SS = 11
SSS = 12
@dataclass(slots=True)
class PKInfo:
"""争夺战场里的段位、体力等等状态"""
rank: PKRank # 当前所在段位
progress: int # 段位进度
power: int # 今日体力
strengthen: int # 野怪附加强度
@property
def base_trophy(self):
"""该状态下的搜刮奖励基准值,与段位和进度有关"""
x = self.rank ** 2 * 200
p = self.progress
return x * p * 0.0025 + p * 25 + x
@dataclass(eq=False, slots=True)
class Trophy:
"""一次搜刮资源的结果"""
value: int # 数值
type: str # 资源类型
base: int # 基准值
range: Tuple[float, float] # 范围
@dataclass(eq=False, slots=True)
class Player:
"""战斗中的玩家"""
name: str # 名字
role: Role # 职业(卡片)
leval: int # 等级
equipment: EquipConfig # 装备
@dataclass(slots=True)
class Creep:
"""战斗中的野怪"""
type: CreepType # 名字
leval: int # 等级
strengthen: float # 强度
# 元组的两个元素是名字和数量,比如 星芒 15! = ("星芒", 15),没有数量的为 0
_StateList = Sequence[tuple[str, int]]
@dataclass(eq=False, slots=True)
class Action:
is_attack: bool # 是攻击方?
state: _StateList # 技能和状态
HP: int = 0 # 血量
ES: int = 0 # 护盾
AD: int = None # 物理攻击(未计算护甲)
AP: int = None # 魔法攻击(未计算护甲)
TD: int = None # 绝对攻击
HP_lose: int = None # 生命值损失
ES_lose: int = None # 护盾损失
HP_health: int = None # 生命值回复
ES_health: int = None # 护盾回复
# 表示一次交手,左边是自己右边是对方。
Round = tuple[Action, Action]
@dataclass(eq=False, slots=True)
class Battle:
"""表示一场战斗"""
player: Player # 自己
enemy: Player | Creep # 敌人
is_win: bool # 己方胜利
actions: Sequence[Round] # 过程
def _parse_fighter(equips, info):
h3 = info.getchildren()[0]
lv, role = _spaces.split(h3.getchildren()[0].tail)
if role.startswith("Lv"):
lv, role = role, lv
match = _lv_pattern.match(lv)
if role == "野怪":
level, strengthen = match.groups()
return Creep(h3.text, int(level), int(strengthen) / 100)
# TODO: 如果装备不齐?最近不能注册新号,没法测。
e = []
for button in equips.iterchildren():
grade = grade_from_class(button)
name = button.get("title")
level = int(button.getchildren()[0].tail)
e.append(Equipment(grade, name, level, None, None))
ec = EquipConfig(*e)
return Player(h3.text, role, int(match.group(1)), ec)
def _parse_values(action, icon_col, col2):
for icon in icon_col.iterfind("p/i"):
key = _icon_class_map[icon.get("class")]
setattr(action, key, int(icon.text))
es, hp = col2.xpath("span/text()")
action.ES, action.HP = int(es), int(hp)
def _parse_state(values):
for x in values:
x = x.rstrip("!").split(" ")
c = x[1] if len(x) > 1 else 0
yield x[0], int(c)
class PKApi:
def __init__(self, api: FYGClient):
self.api = api
def get_info(self):
"""获取段位、体力等基本信息"""
html = self.api.fyg_read(ReadType.PK)
html = etree.HTML(html)
spans = html.findall(".//span")
return PKInfo(
PKRank[spans[0].text],
int(spans[1].text[:-1]),
int(spans[-2].text),
int(spans[-1].text),
)
def battle(self, target: VS):
"""
战斗,攻击野怪或抢夺其他玩家的进度。
:param target: 打谁
:return: 战斗结果
"""
html = etree.HTML(self.api.fyg_v_intel(target))
rows = html.findall("body/div/div")
fs = rows[0].findall("div/div[1]/div[1]")
player = _parse_fighter(*fs[0].getchildren())
enemy = _parse_fighter(*reversed(fs[1].getchildren()))
actions = []
for i in range(1, len(rows) - 2, 3):
p1 = rows[i].find("div[1]/p")
attack = "bg-special" in p1.get("class")
s1 = p1.xpath("i/b/text()")
s2 = rows[i].xpath("div[2]/p/i/b/text()")
act1 = Action(attack, tuple(_parse_state(s1)))
act2 = Action(not attack, tuple(_parse_state(s2)))
h = rows[i + 1].getchildren()
la, ls, rs, ra = h
_parse_values(act1, la, ls)
_parse_values(act2, ra, rs)
actions.append((act1, act2))
win = "smile" in rows[-1].find("div[2]/div/i").get("class")
return Battle(player, enemy, win, actions)
def pillage(self):
"""搜刮资源"""
html = self.api.fyg_click(ClickType.Pillage)
match1 = _exp.search(html)
if match1 is None:
raise LimitReachedError("没有体力了")
match2 = _base.search(html)
min_, max_ = match2.groups()[1:]
return Trophy(
int(match1.group(1)),
match1.group(2),
int(match2.group(1)),
(float(min_), float(max_)),
)
def rejuvenate(self):
"""恢复体力到 100,固定消耗 20 星沙"""
text = self.api.fyg_click(ClickType.Rejuvenate)
if text != "体力已刷新。":
raise LimitReachedError("星沙不够") | 0.411111 | 0.124479 |
# author: <NAME>
from pyquil import Program
from pyquil.quil import DefGate
from pyquil.gates import *
from pyquil.api import WavefunctionSimulator
import numpy as np
import sys
class Obstacle:
def __init__(self, qubits, keys):
rows = 2 ** len(qubits)
arr = np.zeros((rows, rows), int)
for row in range(rows):
diagonal_element = 1
if(row in keys):
diagonal_element = -1
arr[row][row] = diagonal_element
self.obstacle_definition = DefGate("OBSTACLE", arr)
self.qubits = qubits
def init(self):
return Program(self.obstacle_definition)
def iterate(self):
OBSTACLE = self.obstacle_definition.get_constructor()
qbits = [qubit for qubit in reversed(self.qubits)]
return Program(OBSTACLE(*qbits))
class GroversDiffusionOperator:
def __init__(self, qubits):
rows = 2 ** len(qubits)
arr = np.zeros((rows, rows), int)
arr = 2 / rows * \
np.ones((rows, rows), int) - np.identity(rows)
self.diffusion_operator_definition = DefGate("DIFFUSION_OPERATOR", arr)
self.qubits = qubits
def init(self):
return Program(self.diffusion_operator_definition)
def iterate(self):
DIFFUSION_OPERATOR = self.diffusion_operator_definition.get_constructor()
qbits = [qubit for qubit in reversed(self.qubits)]
return Program(DIFFUSION_OPERATOR(*qbits))
def equalSuperPosition(qubits):
prog = Program()
for i in qubits:
prog += Program(H(qubits[i]))
return prog
def diffusion_iterations(num_of_qubits, length_of_key):
'''
iterations = pi / 4 * √(N/M) , M < N / 2
'''
return ((np.pi / 4) * np.sqrt(2 ** num_of_qubits / length_of_key)).astype(int)
def obstacle(num_of_qubits):
prog = Program()
num_of_qubits_minus_one = num_of_qubits - 1
gate = Z(num_of_qubits_minus_one)
for i in range(num_of_qubits_minus_one):
gate = gate.controlled(i)
prog += Program(gate)
return prog
def flipQubits(num_of_qubits, key):
prog = Program()
counter = 0
key_in_binary = '{:0{:d}b}'.format(key, num_of_qubits)
for bit in reversed(key_in_binary):
if bit == "0":
prog += Program(X(counter))
counter += 1
return prog
def groversAlgorithmSingleKeySimulation(num_of_qubits, key):
prog = Program()
qubits = range(num_of_qubits)
prog += equalSuperPosition(qubits)
grovers_diffusion_operator = GroversDiffusionOperator(qubits)
prog += grovers_diffusion_operator.init()
iterations = diffusion_iterations(num_of_qubits, 1)
for _ in range(iterations):
prog += obstacle(num_of_qubits)
prog += grovers_diffusion_operator.iterate()
prog += flipQubits(num_of_qubits, key)
return prog
def groversAlgorithm(num_of_qubits, keys):
qubits = range(num_of_qubits)
prog = Program()
prog += equalSuperPosition(qubits)
obstacle = Obstacle(qubits, keys)
prog += obstacle.init()
grovers_diffusion_operator = GroversDiffusionOperator(qubits)
prog += grovers_diffusion_operator.init()
iterations = diffusion_iterations(num_of_qubits, len(keys))
for _ in range(iterations):
prog += obstacle.iterate()
prog += grovers_diffusion_operator.iterate()
return prog
def getNumOfQubitsAndSearchKey(argv):
num_of_qubits = 3
keys = [0]
if (len(argv) >= 2):
arr = [int(x) for x in argv[1] if x != ',']
try:
'''M < N / 2'''
if(argv[0] in argv and argv[1] in argv and 2 ** int(argv[0]) / 2 > len(keys)):
num_of_qubits = int(argv[0])
keys = arr
except ValueError:
pass
return num_of_qubits, keys
def main(argv):
num_of_qubits, keys = getNumOfQubitsAndSearchKey(argv)
prog = groversAlgorithm(num_of_qubits, keys)
# prog = groversAlgorithmSingleKeySimulation(num_of_qubits, keys[0])
wfn = WavefunctionSimulator().wavefunction(prog)
print(wfn)
if __name__ == "__main__":
main(sys.argv[1:]) | grovers_algorithm.py |
# author: <NAME>
from pyquil import Program
from pyquil.quil import DefGate
from pyquil.gates import *
from pyquil.api import WavefunctionSimulator
import numpy as np
import sys
class Obstacle:
def __init__(self, qubits, keys):
rows = 2 ** len(qubits)
arr = np.zeros((rows, rows), int)
for row in range(rows):
diagonal_element = 1
if(row in keys):
diagonal_element = -1
arr[row][row] = diagonal_element
self.obstacle_definition = DefGate("OBSTACLE", arr)
self.qubits = qubits
def init(self):
return Program(self.obstacle_definition)
def iterate(self):
OBSTACLE = self.obstacle_definition.get_constructor()
qbits = [qubit for qubit in reversed(self.qubits)]
return Program(OBSTACLE(*qbits))
class GroversDiffusionOperator:
def __init__(self, qubits):
rows = 2 ** len(qubits)
arr = np.zeros((rows, rows), int)
arr = 2 / rows * \
np.ones((rows, rows), int) - np.identity(rows)
self.diffusion_operator_definition = DefGate("DIFFUSION_OPERATOR", arr)
self.qubits = qubits
def init(self):
return Program(self.diffusion_operator_definition)
def iterate(self):
DIFFUSION_OPERATOR = self.diffusion_operator_definition.get_constructor()
qbits = [qubit for qubit in reversed(self.qubits)]
return Program(DIFFUSION_OPERATOR(*qbits))
def equalSuperPosition(qubits):
prog = Program()
for i in qubits:
prog += Program(H(qubits[i]))
return prog
def diffusion_iterations(num_of_qubits, length_of_key):
'''
iterations = pi / 4 * √(N/M) , M < N / 2
'''
return ((np.pi / 4) * np.sqrt(2 ** num_of_qubits / length_of_key)).astype(int)
def obstacle(num_of_qubits):
prog = Program()
num_of_qubits_minus_one = num_of_qubits - 1
gate = Z(num_of_qubits_minus_one)
for i in range(num_of_qubits_minus_one):
gate = gate.controlled(i)
prog += Program(gate)
return prog
def flipQubits(num_of_qubits, key):
prog = Program()
counter = 0
key_in_binary = '{:0{:d}b}'.format(key, num_of_qubits)
for bit in reversed(key_in_binary):
if bit == "0":
prog += Program(X(counter))
counter += 1
return prog
def groversAlgorithmSingleKeySimulation(num_of_qubits, key):
prog = Program()
qubits = range(num_of_qubits)
prog += equalSuperPosition(qubits)
grovers_diffusion_operator = GroversDiffusionOperator(qubits)
prog += grovers_diffusion_operator.init()
iterations = diffusion_iterations(num_of_qubits, 1)
for _ in range(iterations):
prog += obstacle(num_of_qubits)
prog += grovers_diffusion_operator.iterate()
prog += flipQubits(num_of_qubits, key)
return prog
def groversAlgorithm(num_of_qubits, keys):
qubits = range(num_of_qubits)
prog = Program()
prog += equalSuperPosition(qubits)
obstacle = Obstacle(qubits, keys)
prog += obstacle.init()
grovers_diffusion_operator = GroversDiffusionOperator(qubits)
prog += grovers_diffusion_operator.init()
iterations = diffusion_iterations(num_of_qubits, len(keys))
for _ in range(iterations):
prog += obstacle.iterate()
prog += grovers_diffusion_operator.iterate()
return prog
def getNumOfQubitsAndSearchKey(argv):
num_of_qubits = 3
keys = [0]
if (len(argv) >= 2):
arr = [int(x) for x in argv[1] if x != ',']
try:
'''M < N / 2'''
if(argv[0] in argv and argv[1] in argv and 2 ** int(argv[0]) / 2 > len(keys)):
num_of_qubits = int(argv[0])
keys = arr
except ValueError:
pass
return num_of_qubits, keys
def main(argv):
num_of_qubits, keys = getNumOfQubitsAndSearchKey(argv)
prog = groversAlgorithm(num_of_qubits, keys)
# prog = groversAlgorithmSingleKeySimulation(num_of_qubits, keys[0])
wfn = WavefunctionSimulator().wavefunction(prog)
print(wfn)
if __name__ == "__main__":
main(sys.argv[1:]) | 0.496826 | 0.53965 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from ._base import BaseModule, torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["_BaseAdversarialTraining",
"AdversarialTrainingClassifier",
"AdversarialTrainingRegressor"]
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`DataLoader` container that contains the training data.
epochs : int, default=100
The number of training epochs.
epsilon : float, defaul=0.01
The step used to generate adversarial samples in the fast gradient
sign method (FGSM), which should be in the range [0, 1].
log_interval : int, default=100
The number of batches to wait before printting the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`DataLoader` container that contains the evaluating data.
- If ``None``, no validation is conducted after each training
epoch.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each training epoch.
save_model : bool, default=True
Whether to save the model.
- If test_loader is ``None``, the ensemble containing
``n_estimators`` base estimators will be saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _adversarial_training_model_doc(header, item="fit"):
"""
Decorator on obtaining documentation for different adversarial training
models.
"""
def get_doc(item):
"""Return selected item"""
__doc = {"fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
def _parallel_fit_per_epoch(train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
device,
is_classification):
"""
Private function used to fit base estimators in parallel.
WARNING: Parallelization when fitting large base estimators may cause
out-of-memory error.
"""
for batch_idx, (data, target) in enumerate(train_loader):
batch_size = data.size()[0]
data, target = data.to(device), target.to(device)
data.requires_grad = True
# Get adversarial samples
_output = estimator(data)
_loss = criterion(_output, target)
_loss.backward()
data_grad = data.grad.data
adv_data = _get_fgsm_samples(data, epsilon, data_grad)
# Compute the training loss
optimizer.zero_grad()
org_output = estimator(data)
adv_output = estimator(adv_data)
loss = criterion(org_output, target) + criterion(adv_output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
# Classification
if is_classification:
_, predicted = torch.max(org_output.data, 1)
correct = (predicted == target).sum().item()
msg = ("Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f} | Correct: {:d}/{:d}")
print(
msg.format(
idx, epoch, batch_idx, loss, correct, batch_size
)
)
# Regression
else:
msg = ("Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}")
print(msg.format(idx, epoch, batch_idx, loss))
return estimator, optimizer
def _get_fgsm_samples(sample, epsilon, sample_grad):
"""
Private functions used to generate adversarial samples with fast gradient
sign method (FGSM)."""
# Check the input range of `sample`
min_value, max_value = torch.min(sample), torch.max(sample)
if not 0 <= min_value < max_value <= 1:
msg = ("The input range of samples passed to adversarial training"
" should be in the range [0, 1], but got [{:.3f}, {:.3f}]"
" instead.")
raise ValueError(msg.format(min_value, max_value))
sign_sample_grad = sample_grad.sign()
perturbed_sample = sample + epsilon * sign_sample_grad
perturbed_sample = torch.clamp(perturbed_sample, 0, 1)
return perturbed_sample
class _BaseAdversarialTraining(BaseModule):
def _validate_parameters(self, epochs, epsilon, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = ("The number of training epochs = {} should be strictly"
" positive.")
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not 0 < epsilon <= 1:
msg = ("The step used to generate adversarial samples in FGSM"
" should be in the range (0, 1], but got {} instead.")
self.logger.error(msg.format(epsilon))
raise ValueError(msg.format(epsilon))
if not log_interval > 0:
msg = ("The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead.")
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
@torchensemble_model_doc("""Implementation on the AdversarialTrainingClassifier.""", # noqa: E501
"model")
class AdversarialTrainingClassifier(_BaseAdversarialTraining):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_classification = True
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingClassifier.""", # noqa: E501
"classifier_forward")
def forward(self, x):
# Take the average over class distributions from all base estimators.
outputs = [F.softmax(estimator(x), dim=1)
for estimator in self.estimators_]
proba = op.average(outputs)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingClassifier.""", # noqa: E501
"set_optimizer")
def set_optimizer(self, optimizer_name, **kwargs):
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingClassifier.""", # noqa: E501
"set_scheduler")
def set_scheduler(self, scheduler_name, **kwargs):
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingClassifier.""", # noqa: E501
"fit"
)
def fit(self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader, True)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(set_module.set_optimizer(estimators[i],
self.optimizer_name,
**self.optimizer_args))
if self.use_scheduler_:
schedulers = []
for i in range(self.n_estimators):
schedulers.append(set_module.set_scheduler(optimizers[i],
self.scheduler_name,
**self.scheduler_args)) # noqa: E501
# Utils
criterion = nn.CrossEntropyLoss()
best_acc = 0.
# Internal helper function on pesudo forward
def _forward(estimators, data):
outputs = [F.softmax(estimator(data), dim=1)
for estimator in estimators]
proba = op.average(outputs)
return proba
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
rets = parallel(delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
self.device,
False
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers))
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, (data, target) in enumerate(test_loader):
data = data.to(self.device)
target = target.to(self.device)
output = _forward(estimators, data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
self.estimators_ = nn.ModuleList() # reset
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = ("Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %")
self.logger.info(msg.format(epoch, acc, best_acc))
# Update the scheduler
if self.use_scheduler_:
for i in range(self.n_estimators):
schedulers[i].step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(
"""Implementation on the evaluating stage of AdversarialTrainingClassifier.""", # noqa: E501
"classifier_predict")
def predict(self, test_loader):
self.eval()
correct = 0
total = 0
for _, (data, target) in enumerate(test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.forward(data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
return acc
@torchensemble_model_doc("""Implementation on the AdversarialTrainingRegressor.""", # noqa: E501
"model")
class AdversarialTrainingRegressor(_BaseAdversarialTraining):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_classification = False
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingRegressor.""", # noqa: E501
"regressor_forward")
def forward(self, x):
# Take the average over predictions from all base estimators.
outputs = [estimator(x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingRegressor.""", # noqa: E501
"set_optimizer")
def set_optimizer(self, optimizer_name, **kwargs):
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingRegressor.""", # noqa: E501
"set_scheduler")
def set_scheduler(self, scheduler_name, **kwargs):
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingRegressor.""", # noqa: E501
"fit"
)
def fit(self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader, True)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(set_module.set_optimizer(estimators[i],
self.optimizer_name,
**self.optimizer_args))
if self.use_scheduler_:
schedulers = []
for i in range(self.n_estimators):
schedulers.append(set_module.set_scheduler(optimizers[i],
self.scheduler_name,
**self.scheduler_args)) # noqa: E501
# Utils
criterion = nn.MSELoss()
best_mse = float("inf")
# Internal helper function on pesudo forward
def _forward(estimators, data):
outputs = [estimator(data) for estimator in estimators]
pred = op.average(outputs)
return pred
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
rets = parallel(delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
self.device,
True
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers))
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
mse = 0
for _, (data, target) in enumerate(test_loader):
data = data.to(self.device)
target = target.to(self.device)
output = _forward(estimators, data)
mse += criterion(output, target)
mse /= len(test_loader)
if mse < best_mse:
best_mse = mse
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = ("Epoch: {:03d} | Validation MSE:"
" {:.5f} | Historical Best: {:.5f}")
self.logger.info(msg.format(epoch, mse, best_mse))
# Update the scheduler
if self.use_scheduler_:
for i in range(self.n_estimators):
schedulers[i].step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(
"""Implementation on the evaluating stage of AdversarialTrainingRegressor.""", # noqa: E501
"regressor_predict")
def predict(self, test_loader):
self.eval()
mse = 0
criterion = nn.MSELoss()
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.forward(data)
mse += criterion(output, target)
return mse / len(test_loader) | torchensemble/adversarial_training.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from ._base import BaseModule, torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["_BaseAdversarialTraining",
"AdversarialTrainingClassifier",
"AdversarialTrainingRegressor"]
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`DataLoader` container that contains the training data.
epochs : int, default=100
The number of training epochs.
epsilon : float, defaul=0.01
The step used to generate adversarial samples in the fast gradient
sign method (FGSM), which should be in the range [0, 1].
log_interval : int, default=100
The number of batches to wait before printting the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`DataLoader` container that contains the evaluating data.
- If ``None``, no validation is conducted after each training
epoch.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each training epoch.
save_model : bool, default=True
Whether to save the model.
- If test_loader is ``None``, the ensemble containing
``n_estimators`` base estimators will be saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _adversarial_training_model_doc(header, item="fit"):
"""
Decorator on obtaining documentation for different adversarial training
models.
"""
def get_doc(item):
"""Return selected item"""
__doc = {"fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
def _parallel_fit_per_epoch(train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
device,
is_classification):
"""
Private function used to fit base estimators in parallel.
WARNING: Parallelization when fitting large base estimators may cause
out-of-memory error.
"""
for batch_idx, (data, target) in enumerate(train_loader):
batch_size = data.size()[0]
data, target = data.to(device), target.to(device)
data.requires_grad = True
# Get adversarial samples
_output = estimator(data)
_loss = criterion(_output, target)
_loss.backward()
data_grad = data.grad.data
adv_data = _get_fgsm_samples(data, epsilon, data_grad)
# Compute the training loss
optimizer.zero_grad()
org_output = estimator(data)
adv_output = estimator(adv_data)
loss = criterion(org_output, target) + criterion(adv_output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
# Classification
if is_classification:
_, predicted = torch.max(org_output.data, 1)
correct = (predicted == target).sum().item()
msg = ("Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f} | Correct: {:d}/{:d}")
print(
msg.format(
idx, epoch, batch_idx, loss, correct, batch_size
)
)
# Regression
else:
msg = ("Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}")
print(msg.format(idx, epoch, batch_idx, loss))
return estimator, optimizer
def _get_fgsm_samples(sample, epsilon, sample_grad):
"""
Private functions used to generate adversarial samples with fast gradient
sign method (FGSM)."""
# Check the input range of `sample`
min_value, max_value = torch.min(sample), torch.max(sample)
if not 0 <= min_value < max_value <= 1:
msg = ("The input range of samples passed to adversarial training"
" should be in the range [0, 1], but got [{:.3f}, {:.3f}]"
" instead.")
raise ValueError(msg.format(min_value, max_value))
sign_sample_grad = sample_grad.sign()
perturbed_sample = sample + epsilon * sign_sample_grad
perturbed_sample = torch.clamp(perturbed_sample, 0, 1)
return perturbed_sample
class _BaseAdversarialTraining(BaseModule):
def _validate_parameters(self, epochs, epsilon, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = ("The number of training epochs = {} should be strictly"
" positive.")
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not 0 < epsilon <= 1:
msg = ("The step used to generate adversarial samples in FGSM"
" should be in the range (0, 1], but got {} instead.")
self.logger.error(msg.format(epsilon))
raise ValueError(msg.format(epsilon))
if not log_interval > 0:
msg = ("The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead.")
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
@torchensemble_model_doc("""Implementation on the AdversarialTrainingClassifier.""", # noqa: E501
"model")
class AdversarialTrainingClassifier(_BaseAdversarialTraining):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_classification = True
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingClassifier.""", # noqa: E501
"classifier_forward")
def forward(self, x):
# Take the average over class distributions from all base estimators.
outputs = [F.softmax(estimator(x), dim=1)
for estimator in self.estimators_]
proba = op.average(outputs)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingClassifier.""", # noqa: E501
"set_optimizer")
def set_optimizer(self, optimizer_name, **kwargs):
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingClassifier.""", # noqa: E501
"set_scheduler")
def set_scheduler(self, scheduler_name, **kwargs):
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingClassifier.""", # noqa: E501
"fit"
)
def fit(self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader, True)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(set_module.set_optimizer(estimators[i],
self.optimizer_name,
**self.optimizer_args))
if self.use_scheduler_:
schedulers = []
for i in range(self.n_estimators):
schedulers.append(set_module.set_scheduler(optimizers[i],
self.scheduler_name,
**self.scheduler_args)) # noqa: E501
# Utils
criterion = nn.CrossEntropyLoss()
best_acc = 0.
# Internal helper function on pesudo forward
def _forward(estimators, data):
outputs = [F.softmax(estimator(data), dim=1)
for estimator in estimators]
proba = op.average(outputs)
return proba
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
rets = parallel(delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
self.device,
False
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers))
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, (data, target) in enumerate(test_loader):
data = data.to(self.device)
target = target.to(self.device)
output = _forward(estimators, data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
self.estimators_ = nn.ModuleList() # reset
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = ("Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %")
self.logger.info(msg.format(epoch, acc, best_acc))
# Update the scheduler
if self.use_scheduler_:
for i in range(self.n_estimators):
schedulers[i].step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(
"""Implementation on the evaluating stage of AdversarialTrainingClassifier.""", # noqa: E501
"classifier_predict")
def predict(self, test_loader):
self.eval()
correct = 0
total = 0
for _, (data, target) in enumerate(test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.forward(data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
return acc
@torchensemble_model_doc("""Implementation on the AdversarialTrainingRegressor.""", # noqa: E501
"model")
class AdversarialTrainingRegressor(_BaseAdversarialTraining):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_classification = False
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingRegressor.""", # noqa: E501
"regressor_forward")
def forward(self, x):
# Take the average over predictions from all base estimators.
outputs = [estimator(x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingRegressor.""", # noqa: E501
"set_optimizer")
def set_optimizer(self, optimizer_name, **kwargs):
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingRegressor.""", # noqa: E501
"set_scheduler")
def set_scheduler(self, scheduler_name, **kwargs):
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingRegressor.""", # noqa: E501
"fit"
)
def fit(self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader, True)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(set_module.set_optimizer(estimators[i],
self.optimizer_name,
**self.optimizer_args))
if self.use_scheduler_:
schedulers = []
for i in range(self.n_estimators):
schedulers.append(set_module.set_scheduler(optimizers[i],
self.scheduler_name,
**self.scheduler_args)) # noqa: E501
# Utils
criterion = nn.MSELoss()
best_mse = float("inf")
# Internal helper function on pesudo forward
def _forward(estimators, data):
outputs = [estimator(data) for estimator in estimators]
pred = op.average(outputs)
return pred
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
rets = parallel(delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
optimizer,
criterion,
idx,
epoch,
log_interval,
self.device,
True
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers))
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
mse = 0
for _, (data, target) in enumerate(test_loader):
data = data.to(self.device)
target = target.to(self.device)
output = _forward(estimators, data)
mse += criterion(output, target)
mse /= len(test_loader)
if mse < best_mse:
best_mse = mse
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = ("Epoch: {:03d} | Validation MSE:"
" {:.5f} | Historical Best: {:.5f}")
self.logger.info(msg.format(epoch, mse, best_mse))
# Update the scheduler
if self.use_scheduler_:
for i in range(self.n_estimators):
schedulers[i].step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(
"""Implementation on the evaluating stage of AdversarialTrainingRegressor.""", # noqa: E501
"regressor_predict")
def predict(self, test_loader):
self.eval()
mse = 0
criterion = nn.MSELoss()
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.forward(data)
mse += criterion(output, target)
return mse / len(test_loader) | 0.936059 | 0.59249 |
from tools import cli, tasks, pchelper, service_instance
from pyVmomi import vmodl, vim
def get_hdd_prefix_label(language):
language_prefix_label_mapper = {
'English': 'Hard disk ',
'Chinese': u'硬盘 '
}
return language_prefix_label_mapper.get(language)
def detach_disk_from_vm(vm, disk_number, language):
"""
Detach first class disk from vm
"""
hdd_prefix_label = get_hdd_prefix_label(language)
if not hdd_prefix_label:
raise RuntimeError('HDD prefix label could not be found')
hdd_label = hdd_prefix_label + str(disk_number)
virtual_hdd_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == hdd_label:
virtual_hdd_device = dev
if not virtual_hdd_device:
raise RuntimeError('Virtual {} could not '
'be found.'.format(virtual_hdd_device))
virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
virtual_hdd_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
virtual_hdd_spec.device = virtual_hdd_device
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_hdd_spec]
task = vm.ReconfigVM_Task(spec=spec)
return task
def main():
"""
Simple command-line program for detaching a disk from a virtual machine.
"""
parser = cli.Parser()
parser.add_optional_arguments(cli.Argument.VM_NAME, cli.Argument.UUID, cli.Argument.LANGUAGE)
parser.add_custom_argument('--disk-number', required=True, help='HDD number to detach.')
args = parser.get_args()
si = service_instance.connect(args)
try:
content = si.RetrieveContent()
# Retrieve VM
vm = None
if args.uuid:
search_index = content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
vm = pchelper.get_obj(content, [vim.VirtualMachine], args.vm_name)
# Detaching Disk from VM
if vm:
task = detach_disk_from_vm(vm, args.disk_number, args.language)
tasks.wait_for_tasks(si, [task])
else:
raise RuntimeError("VM not found.")
except vmodl.MethodFault as error:
print("Caught vmodl fault : " + error.msg)
return -1
return 0
# Start program
if __name__ == "__main__":
main() | samples/detach_disk_from_vm.py | from tools import cli, tasks, pchelper, service_instance
from pyVmomi import vmodl, vim
def get_hdd_prefix_label(language):
language_prefix_label_mapper = {
'English': 'Hard disk ',
'Chinese': u'硬盘 '
}
return language_prefix_label_mapper.get(language)
def detach_disk_from_vm(vm, disk_number, language):
"""
Detach first class disk from vm
"""
hdd_prefix_label = get_hdd_prefix_label(language)
if not hdd_prefix_label:
raise RuntimeError('HDD prefix label could not be found')
hdd_label = hdd_prefix_label + str(disk_number)
virtual_hdd_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == hdd_label:
virtual_hdd_device = dev
if not virtual_hdd_device:
raise RuntimeError('Virtual {} could not '
'be found.'.format(virtual_hdd_device))
virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
virtual_hdd_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
virtual_hdd_spec.device = virtual_hdd_device
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_hdd_spec]
task = vm.ReconfigVM_Task(spec=spec)
return task
def main():
"""
Simple command-line program for detaching a disk from a virtual machine.
"""
parser = cli.Parser()
parser.add_optional_arguments(cli.Argument.VM_NAME, cli.Argument.UUID, cli.Argument.LANGUAGE)
parser.add_custom_argument('--disk-number', required=True, help='HDD number to detach.')
args = parser.get_args()
si = service_instance.connect(args)
try:
content = si.RetrieveContent()
# Retrieve VM
vm = None
if args.uuid:
search_index = content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
vm = pchelper.get_obj(content, [vim.VirtualMachine], args.vm_name)
# Detaching Disk from VM
if vm:
task = detach_disk_from_vm(vm, args.disk_number, args.language)
tasks.wait_for_tasks(si, [task])
else:
raise RuntimeError("VM not found.")
except vmodl.MethodFault as error:
print("Caught vmodl fault : " + error.msg)
return -1
return 0
# Start program
if __name__ == "__main__":
main() | 0.536313 | 0.13612 |
import numpy as np
import pandas as pd
import tensorflow as tf
class MF:
def __init__(self, data_path):
self.data = np.genfromtxt(data_path,
delimiter='\t',
skip_header=True,
dtype=np.int32)
self.model = None
self.result = None
def train(self,
dim=2,
alpha=1e-3,
beta=1e-4,
epoch=300,
batch=1000,
verbose=2):
self.build(dim, alpha, beta)
self._train(epoch,
batch,
verbose)
self.predict()
def predict(self):
predicted = self.model.predict(
[self.data[:, 0], self.data[:, 1]]
).reshape(-1)
pd.DataFrame({'actual': self.data[:, 2],
'predicted': predicted})\
.to_csv('result/predict_keras.csv', index=False)
def recommend(self, num_rec_items):
"""
do recommend num_rec_items items excluding the observed items.
"""
user_mtx, item_mtx = self.model.get_weights()
predicted = np.inner(user_mtx, item_mtx) * -1
predicted[self.data[:, 0], self.data[:, 1]] *= 0
self.result = pd.DataFrame(predicted.argsort()[:, :num_rec_items],
columns=['top%s' % i
for i in range(1, num_rec_items + 1)],
index=np.arange(len(user_mtx)))
self.result.to_csv('result/recommend_keras.csv')
def build(self, dim, alpha, beta):
users, items, _ = self.data.max(axis=0)
user_input = tf.keras.layers.Input((1, ), name='user')
user_vec = self.embedding(user_input, users, dim, beta, 'user_vec')
item_input = tf.keras.layers.Input((1, ), name='item')
item_vec = self.embedding(item_input, items, dim, beta, 'item_vec')
outputs = tf.keras.layers.Dot(axes=1)([user_vec, item_vec])
model = tf.keras.models.Model([user_input, item_input], outputs)
adam = tf.keras.optimizers.Adam(alpha)
model.compile(adam, 'mse')
model.summary()
self.model = model
def _train(self, epoch, batch, verbose=2):
self.model.fit([self.data[:, 0], self.data[:, 1]],
self.data[:, 2],
epochs=epoch,
verbose=verbose,
batch_size=batch,
shuffle=False)
def embedding(self,
last_layer,
input_dim,
latent_dim,
beta,
name):
input_length = 1
regularizer = tf.keras.regularizers.l2(beta)
initializer = tf.keras\
.initializers\
.RandomNormal()
embedding = tf.keras.layers.Embedding(
input_dim+1,
latent_dim,
input_length=input_length,
embeddings_initializer=initializer,
embeddings_regularizer=regularizer)(last_layer)
return tf.keras.layers.Flatten(name=name)(embedding) | model/keras.py | import numpy as np
import pandas as pd
import tensorflow as tf
class MF:
def __init__(self, data_path):
self.data = np.genfromtxt(data_path,
delimiter='\t',
skip_header=True,
dtype=np.int32)
self.model = None
self.result = None
def train(self,
dim=2,
alpha=1e-3,
beta=1e-4,
epoch=300,
batch=1000,
verbose=2):
self.build(dim, alpha, beta)
self._train(epoch,
batch,
verbose)
self.predict()
def predict(self):
predicted = self.model.predict(
[self.data[:, 0], self.data[:, 1]]
).reshape(-1)
pd.DataFrame({'actual': self.data[:, 2],
'predicted': predicted})\
.to_csv('result/predict_keras.csv', index=False)
def recommend(self, num_rec_items):
"""
do recommend num_rec_items items excluding the observed items.
"""
user_mtx, item_mtx = self.model.get_weights()
predicted = np.inner(user_mtx, item_mtx) * -1
predicted[self.data[:, 0], self.data[:, 1]] *= 0
self.result = pd.DataFrame(predicted.argsort()[:, :num_rec_items],
columns=['top%s' % i
for i in range(1, num_rec_items + 1)],
index=np.arange(len(user_mtx)))
self.result.to_csv('result/recommend_keras.csv')
def build(self, dim, alpha, beta):
users, items, _ = self.data.max(axis=0)
user_input = tf.keras.layers.Input((1, ), name='user')
user_vec = self.embedding(user_input, users, dim, beta, 'user_vec')
item_input = tf.keras.layers.Input((1, ), name='item')
item_vec = self.embedding(item_input, items, dim, beta, 'item_vec')
outputs = tf.keras.layers.Dot(axes=1)([user_vec, item_vec])
model = tf.keras.models.Model([user_input, item_input], outputs)
adam = tf.keras.optimizers.Adam(alpha)
model.compile(adam, 'mse')
model.summary()
self.model = model
def _train(self, epoch, batch, verbose=2):
self.model.fit([self.data[:, 0], self.data[:, 1]],
self.data[:, 2],
epochs=epoch,
verbose=verbose,
batch_size=batch,
shuffle=False)
def embedding(self,
last_layer,
input_dim,
latent_dim,
beta,
name):
input_length = 1
regularizer = tf.keras.regularizers.l2(beta)
initializer = tf.keras\
.initializers\
.RandomNormal()
embedding = tf.keras.layers.Embedding(
input_dim+1,
latent_dim,
input_length=input_length,
embeddings_initializer=initializer,
embeddings_regularizer=regularizer)(last_layer)
return tf.keras.layers.Flatten(name=name)(embedding) | 0.811153 | 0.30205 |
import os.path
def validate_settings(settings):
if not getattr(settings, 'BIND_PORT_FOR_ENTRY', None):
raise ValueError('BIND_PORT_FOR_ENTRY not found in setttings')
if not getattr(settings, 'BIND_PORT_FOR_PULL', None):
raise ValueError('BIND_PORT_FOR_PULL not found in setttings')
if not hasattr(settings, 'THREAD_NUMS_PER_APPLICATION'):
raise ValueError('THREAD_NUMS_PER_APPLICATION not found in settings')
if not getattr(settings, 'APPLICATIONS', None):
raise ValueError('APPLICATIONS not found in settings')
for app in settings.APPLICATIONS:
if not 'application_id' in app:
raise ValueError('application_id not found in application list')
if not 'name' in app:
raise ValueError('name not found in application list')
if not 'sandbox' in app:
raise ValueError('sandbox not found in application list')
if not 'cert_file' in app:
raise ValueError('cert_file not found in application list')
if not 'key_file' in app:
raise ValueError('key_file not found in application list')
path_to_abspath(app)
check_file_exists(app)
return to_dict(settings)
def path_to_abspath(app):
BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
CERT_DIR = os.path.join(BASE_DIR, 'apns_certs')
cert_file = app['cert_file']
if not os.path.isabs(cert_file):
app['cert_file'] = os.path.join(CERT_DIR, cert_file)
key_file = app['key_file']
if not os.path.isabs(key_file):
app['key_file'] = os.path.join(CERT_DIR, key_file)
def check_file_exists(app):
if not os.path.isfile(app['cert_file']):
raise IOError('Certification file not found: %s' % app['cert_file'])
if not os.path.isfile(app['key_file']):
raise IOError('Key file not found: %s' % app['key_file'])
def to_dict(module):
return {
'BIND_PORT_FOR_ENTRY': module.BIND_PORT_FOR_ENTRY,
'BIND_PORT_FOR_PULL': module.BIND_PORT_FOR_PULL,
'THREAD_NUMS_PER_APPLICATION': module.THREAD_NUMS_PER_APPLICATION,
'APPLICATIONS': module.APPLICATIONS
} | apns_proxy_server/validator.py | import os.path
def validate_settings(settings):
if not getattr(settings, 'BIND_PORT_FOR_ENTRY', None):
raise ValueError('BIND_PORT_FOR_ENTRY not found in setttings')
if not getattr(settings, 'BIND_PORT_FOR_PULL', None):
raise ValueError('BIND_PORT_FOR_PULL not found in setttings')
if not hasattr(settings, 'THREAD_NUMS_PER_APPLICATION'):
raise ValueError('THREAD_NUMS_PER_APPLICATION not found in settings')
if not getattr(settings, 'APPLICATIONS', None):
raise ValueError('APPLICATIONS not found in settings')
for app in settings.APPLICATIONS:
if not 'application_id' in app:
raise ValueError('application_id not found in application list')
if not 'name' in app:
raise ValueError('name not found in application list')
if not 'sandbox' in app:
raise ValueError('sandbox not found in application list')
if not 'cert_file' in app:
raise ValueError('cert_file not found in application list')
if not 'key_file' in app:
raise ValueError('key_file not found in application list')
path_to_abspath(app)
check_file_exists(app)
return to_dict(settings)
def path_to_abspath(app):
BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
CERT_DIR = os.path.join(BASE_DIR, 'apns_certs')
cert_file = app['cert_file']
if not os.path.isabs(cert_file):
app['cert_file'] = os.path.join(CERT_DIR, cert_file)
key_file = app['key_file']
if not os.path.isabs(key_file):
app['key_file'] = os.path.join(CERT_DIR, key_file)
def check_file_exists(app):
if not os.path.isfile(app['cert_file']):
raise IOError('Certification file not found: %s' % app['cert_file'])
if not os.path.isfile(app['key_file']):
raise IOError('Key file not found: %s' % app['key_file'])
def to_dict(module):
return {
'BIND_PORT_FOR_ENTRY': module.BIND_PORT_FOR_ENTRY,
'BIND_PORT_FOR_PULL': module.BIND_PORT_FOR_PULL,
'THREAD_NUMS_PER_APPLICATION': module.THREAD_NUMS_PER_APPLICATION,
'APPLICATIONS': module.APPLICATIONS
} | 0.227469 | 0.042622 |
import json
import pytest
CONFIG = {
'api_endpoint': 'https://my.nsone.net',
# The api authentication key.
'api_key': 'testkey',
'metrics': {'qps': [{"test.com": None}], 'usage': [{"test.com": None}], 'pulsar': None, 'ddi': None},
}
CONFIG_NOMETRICS = {
'api_endpoint': 'https://test.com',
# The api authentication key.
'api_key': 'testkey',
'metrics': None,
}
CONFIG_NOKEY = {
'api_endpoint': 'https://test.com',
# The api authentication key.
'api_key': None,
'metrics': None,
}
CONFIG_2 = """{
"api_endpoint": "https://my.nsone.net",
"api_key": "testkey",
"metrics": {
"qps": [
{
"test.com": [
{
"www": "A"
},
{
"mail": "A"
}
]
}
],
"usage": [
{
"test.com": null
}
],
"pulsar": null,
"ddi": null,
"account":[
{"billing": null},
{"ttl": ["dloc.com", "dloc1.com", "dloc2.com"]}
]
}
}"""
CONFIG_DDI = """
{
"api_endpoint": "https://localhost",
"api_key": "testkey",
"min_collection_interval": 15,
"metrics": {
"ddi": [
2
]
}
}
"""
CONFIG_1 = """
{
"api_endpoint": "https://my.nsone.net",
"api_key": "testkey",
"min_collection_interval": 15,
"query_params": {
"usage_networks": "*",
"pulsar_period": "1m",
"pulsar_geo": "*",
"pulsar_asn": "*",
"pulsar_agg": "avg"
},
"metrics": {
"pulsar": null,
"pulsar_by_app": [
{
"1xy4sn3": "1xtvhvx"
}
],
"pulsar_by_record": [
{
"www.dloc1.com": "A"
},
{
"www.dloc2.com": "A"
}
],
"qps": [
{
"dloc.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc1.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc2.com": [
{
"www": "A"
},
{
"email": "A"
}
]
}
],
"usage": [
{
"dloc.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc1.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc2.com": [
{
"www": "A"
},
{
"email": "A"
}
]
}
],
"account": [
{
"billing": null
},
{
"ttl": [
"dloc.com",
"dloc1.com",
"dloc2.com"
]
}
]
}
}
"""
@pytest.fixture
def instance():
return CONFIG
@pytest.fixture
def instance_nokey():
return CONFIG_NOKEY
@pytest.fixture
def instance_nometrics():
return CONFIG_NOMETRICS
@pytest.fixture
def instance_empty():
return {}
@pytest.fixture
def instance_1():
return json.loads(CONFIG_1)
@pytest.fixture
def instance_ddi():
return json.loads(CONFIG_DDI) | ns1/tests/conftest.py | import json
import pytest
CONFIG = {
'api_endpoint': 'https://my.nsone.net',
# The api authentication key.
'api_key': 'testkey',
'metrics': {'qps': [{"test.com": None}], 'usage': [{"test.com": None}], 'pulsar': None, 'ddi': None},
}
CONFIG_NOMETRICS = {
'api_endpoint': 'https://test.com',
# The api authentication key.
'api_key': 'testkey',
'metrics': None,
}
CONFIG_NOKEY = {
'api_endpoint': 'https://test.com',
# The api authentication key.
'api_key': None,
'metrics': None,
}
CONFIG_2 = """{
"api_endpoint": "https://my.nsone.net",
"api_key": "testkey",
"metrics": {
"qps": [
{
"test.com": [
{
"www": "A"
},
{
"mail": "A"
}
]
}
],
"usage": [
{
"test.com": null
}
],
"pulsar": null,
"ddi": null,
"account":[
{"billing": null},
{"ttl": ["dloc.com", "dloc1.com", "dloc2.com"]}
]
}
}"""
CONFIG_DDI = """
{
"api_endpoint": "https://localhost",
"api_key": "testkey",
"min_collection_interval": 15,
"metrics": {
"ddi": [
2
]
}
}
"""
CONFIG_1 = """
{
"api_endpoint": "https://my.nsone.net",
"api_key": "testkey",
"min_collection_interval": 15,
"query_params": {
"usage_networks": "*",
"pulsar_period": "1m",
"pulsar_geo": "*",
"pulsar_asn": "*",
"pulsar_agg": "avg"
},
"metrics": {
"pulsar": null,
"pulsar_by_app": [
{
"1xy4sn3": "1xtvhvx"
}
],
"pulsar_by_record": [
{
"www.dloc1.com": "A"
},
{
"www.dloc2.com": "A"
}
],
"qps": [
{
"dloc.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc1.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc2.com": [
{
"www": "A"
},
{
"email": "A"
}
]
}
],
"usage": [
{
"dloc.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc1.com": [
{
"www": "A"
},
{
"email": "A"
}
]
},
{
"dloc2.com": [
{
"www": "A"
},
{
"email": "A"
}
]
}
],
"account": [
{
"billing": null
},
{
"ttl": [
"dloc.com",
"dloc1.com",
"dloc2.com"
]
}
]
}
}
"""
@pytest.fixture
def instance():
return CONFIG
@pytest.fixture
def instance_nokey():
return CONFIG_NOKEY
@pytest.fixture
def instance_nometrics():
return CONFIG_NOMETRICS
@pytest.fixture
def instance_empty():
return {}
@pytest.fixture
def instance_1():
return json.loads(CONFIG_1)
@pytest.fixture
def instance_ddi():
return json.loads(CONFIG_DDI) | 0.31542 | 0.436922 |
import json
import os
from bq_file_load_benchmark.generic_benchmark_tools import schema_creator
class TestSchemaCreator(object):
"""Tests functionality of load_benchmark_tools.schema_creator.SchemaCreator.
Attributes:
test_schemas_dir(str): Directory where test json schemas should be
written to.
test_file_parameters(dict): Dictionary containing each test file
parameter and its possible values.
"""
def setup(self):
"""Sets up resources for tests.
"""
abs_path = os.path.abspath(os.path.dirname(__file__))
self.test_schemas_dir = os.path.join(
abs_path,
'test_schemas'
)
self.test_file_parameters = {
'fileType': ['csv', 'json'],
'fileCompressionTypes': {
'csv': ['none'],
'json': ['none']
},
'numColumns': [4],
'numFiles': [1, 100, 1000, 10000],
'targetDataSizes': [.01],
'stagingDataSizes': ['10MB'],
'columnTypes': [
'100_STRING',
'50_STRING_50_NUMERIC',
],
}
self.expected_schema_path_1 = os.path.join(
abs_path,
'test_schemas/100_STRING_4.json'
)
self.expected_schema_path_2 = os.path.join(
abs_path,
'test_schemas/50_STRING_50_NUMERIC_4.json'
)
def test_create_schmeas(self):
"""Tests SchemaCreator.create_schemas().
Tests SchemaCreator's ability to create json schemas for benchmark
tables based off of parameters in a file_params dict.
Returns:
True if test passes, else False.
"""
test_schema_creator = schema_creator.SchemaCreator(
schemas_dir=self.test_schemas_dir,
file_params=self.test_file_parameters
)
test_schema_creator.create_schemas()
assert os.path.isfile(self.expected_schema_path_1)
assert os.path.isfile(self.expected_schema_path_2)
expected_schema_1 = {
"fields": [
{
"type": "STRING",
"name": "string1",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string2",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string3",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string4",
"mode": "REQUIRED"
}
]
}
expected_schema_2 = {
"fields": [
{
"type": "STRING",
"name": "string1",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string2",
"mode": "REQUIRED"
},
{
"type": "NUMERIC",
"name": "numeric1",
"mode": "REQUIRED"
},
{
"type": "NUMERIC",
"name": "numeric2",
"mode": "REQUIRED"
}
]
}
with open(self.expected_schema_path_1) as json_file_1:
json_str_1 = json_file_1.read()
json_schema_1 = json.loads(json_str_1)
assert expected_schema_1 == json_schema_1
with open(self.expected_schema_path_2) as json_file_2:
json_str_2 = json_file_2.read()
json_schema_2 = json.loads(json_str_2)
assert expected_schema_2 == json_schema_2
def teardown(self):
"""Tears down resources created in setup().
"""
os.remove(self.expected_schema_path_1)
os.remove(self.expected_schema_path_2) | examples/bq_file_load_benchmark/tests/test_schema_creator.py |
import json
import os
from bq_file_load_benchmark.generic_benchmark_tools import schema_creator
class TestSchemaCreator(object):
"""Tests functionality of load_benchmark_tools.schema_creator.SchemaCreator.
Attributes:
test_schemas_dir(str): Directory where test json schemas should be
written to.
test_file_parameters(dict): Dictionary containing each test file
parameter and its possible values.
"""
def setup(self):
"""Sets up resources for tests.
"""
abs_path = os.path.abspath(os.path.dirname(__file__))
self.test_schemas_dir = os.path.join(
abs_path,
'test_schemas'
)
self.test_file_parameters = {
'fileType': ['csv', 'json'],
'fileCompressionTypes': {
'csv': ['none'],
'json': ['none']
},
'numColumns': [4],
'numFiles': [1, 100, 1000, 10000],
'targetDataSizes': [.01],
'stagingDataSizes': ['10MB'],
'columnTypes': [
'100_STRING',
'50_STRING_50_NUMERIC',
],
}
self.expected_schema_path_1 = os.path.join(
abs_path,
'test_schemas/100_STRING_4.json'
)
self.expected_schema_path_2 = os.path.join(
abs_path,
'test_schemas/50_STRING_50_NUMERIC_4.json'
)
def test_create_schmeas(self):
"""Tests SchemaCreator.create_schemas().
Tests SchemaCreator's ability to create json schemas for benchmark
tables based off of parameters in a file_params dict.
Returns:
True if test passes, else False.
"""
test_schema_creator = schema_creator.SchemaCreator(
schemas_dir=self.test_schemas_dir,
file_params=self.test_file_parameters
)
test_schema_creator.create_schemas()
assert os.path.isfile(self.expected_schema_path_1)
assert os.path.isfile(self.expected_schema_path_2)
expected_schema_1 = {
"fields": [
{
"type": "STRING",
"name": "string1",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string2",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string3",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string4",
"mode": "REQUIRED"
}
]
}
expected_schema_2 = {
"fields": [
{
"type": "STRING",
"name": "string1",
"mode": "REQUIRED"
},
{
"type": "STRING",
"name": "string2",
"mode": "REQUIRED"
},
{
"type": "NUMERIC",
"name": "numeric1",
"mode": "REQUIRED"
},
{
"type": "NUMERIC",
"name": "numeric2",
"mode": "REQUIRED"
}
]
}
with open(self.expected_schema_path_1) as json_file_1:
json_str_1 = json_file_1.read()
json_schema_1 = json.loads(json_str_1)
assert expected_schema_1 == json_schema_1
with open(self.expected_schema_path_2) as json_file_2:
json_str_2 = json_file_2.read()
json_schema_2 = json.loads(json_str_2)
assert expected_schema_2 == json_schema_2
def teardown(self):
"""Tears down resources created in setup().
"""
os.remove(self.expected_schema_path_1)
os.remove(self.expected_schema_path_2) | 0.600423 | 0.37522 |
import requests
import json
from requests.adapters import HTTPAdapter
# Hackerrank API endpoint
RUN_API_ENDPOINT = 'http://api.hackerrank.com/checker/submission.json'
# supported languages and their codes passed to API
LANG_CODE = {'fsharp': 33, 'javascript': 20, 'whitespace': 41, 'python': 5, 'lolcode': 38, 'mysql': 10, 'fortran': 54,
'tcl': 40, 'oracle': 11, 'pascal': 25, 'haskell': 12, 'cobol': 36, 'octave': 46, 'csharp': 9, 'go': 21,
'php': 7, 'ruby': 8, 'java8': 43, 'bash': 14, 'visualbasic': 37, 'groovy': 31, 'c': 1, 'erlang': 16,
'java': 3, 'd': 22, 'scala': 15, 'tsql': 42, 'ocaml': 23, 'perl': 6, 'lua': 18, 'xquery': 48, 'r': 24,
'swift': 51, 'sbcl': 26, 'smalltalk': 39, 'racket': 49, 'cpp': 2, 'db2': 44, 'objectivec': 32,
'clojure': 13, 'python3': 30, 'rust': 50}
class HackerRankAPI():
# initialize the API object
def __init__(self, api_key):
self.params_dict = {}
self.params_dict['api_key'] = api_key
self.params_dict['format'] = 'json'
# run given piece of code
def run(self, code):
self.manage_params(code)
response = self.__request(RUN_API_ENDPOINT, self.params_dict)
result = Result(response.json()['result']) # create a result object of Result class
return result
# update params_dict with code data
def manage_params(self, code):
self.params_dict['source'] = code['source']
self.params_dict['lang'] = self.getLangCode(code['lang'])
if 'testcases' in code:
self.params_dict['testcases'] = json.dumps(code['testcases'])
else:
self.params_dict['testcases'] = json.dumps([""]) # empty testcase
# send API request
def __request(self, url, params):
try:
s=requests.Session()
a=HTTPAdapter(max_retries=20)
s.mount('http://',a)
response = s.post(url, data=params)
return response
except Exception as e:
print(e)
# utility function to get language code to be passed as parameter to API
def getLangCode(self, lang):
try:
return LANG_CODE[lang]
except KeyError:
print(
"%s language not recognized.Use function supportedlanguages() to see the list of proper names of allowed languages." % lang)
return -1
# get list of all supported languages
def supportedlanguages(self):
return LANG_CODE.keys()
# to convert json to a class object of Result
class Result():
def __init__(self, result):
self.error = result['stderr']
self.output = result['stdout']
self.memory = result['memory']
self.time = result['time']
self.message = result['compilemessage'] | helper.py | import requests
import json
from requests.adapters import HTTPAdapter
# Hackerrank API endpoint
RUN_API_ENDPOINT = 'http://api.hackerrank.com/checker/submission.json'
# supported languages and their codes passed to API
LANG_CODE = {'fsharp': 33, 'javascript': 20, 'whitespace': 41, 'python': 5, 'lolcode': 38, 'mysql': 10, 'fortran': 54,
'tcl': 40, 'oracle': 11, 'pascal': 25, 'haskell': 12, 'cobol': 36, 'octave': 46, 'csharp': 9, 'go': 21,
'php': 7, 'ruby': 8, 'java8': 43, 'bash': 14, 'visualbasic': 37, 'groovy': 31, 'c': 1, 'erlang': 16,
'java': 3, 'd': 22, 'scala': 15, 'tsql': 42, 'ocaml': 23, 'perl': 6, 'lua': 18, 'xquery': 48, 'r': 24,
'swift': 51, 'sbcl': 26, 'smalltalk': 39, 'racket': 49, 'cpp': 2, 'db2': 44, 'objectivec': 32,
'clojure': 13, 'python3': 30, 'rust': 50}
class HackerRankAPI():
# initialize the API object
def __init__(self, api_key):
self.params_dict = {}
self.params_dict['api_key'] = api_key
self.params_dict['format'] = 'json'
# run given piece of code
def run(self, code):
self.manage_params(code)
response = self.__request(RUN_API_ENDPOINT, self.params_dict)
result = Result(response.json()['result']) # create a result object of Result class
return result
# update params_dict with code data
def manage_params(self, code):
self.params_dict['source'] = code['source']
self.params_dict['lang'] = self.getLangCode(code['lang'])
if 'testcases' in code:
self.params_dict['testcases'] = json.dumps(code['testcases'])
else:
self.params_dict['testcases'] = json.dumps([""]) # empty testcase
# send API request
def __request(self, url, params):
try:
s=requests.Session()
a=HTTPAdapter(max_retries=20)
s.mount('http://',a)
response = s.post(url, data=params)
return response
except Exception as e:
print(e)
# utility function to get language code to be passed as parameter to API
def getLangCode(self, lang):
try:
return LANG_CODE[lang]
except KeyError:
print(
"%s language not recognized.Use function supportedlanguages() to see the list of proper names of allowed languages." % lang)
return -1
# get list of all supported languages
def supportedlanguages(self):
return LANG_CODE.keys()
# to convert json to a class object of Result
class Result():
def __init__(self, result):
self.error = result['stderr']
self.output = result['stdout']
self.memory = result['memory']
self.time = result['time']
self.message = result['compilemessage'] | 0.464416 | 0.08374 |
import gym
from .reversible_action_wrapper import ReversibleActionWrapper
import numpy as np
class AnssiActionShaping(ReversibleActionWrapper):
def __init__(
self,
env: gym.Env,
camera_angle: int = 10,
always_attack: bool = False,
camera_margin: int = 5,
):
"""
Arguments:
env: The env to wrap.
camera_angle: Discretized actions will tilt the camera by this number of
degrees.
always_attack: If True, then always send attack=1 to the wrapped environment.
camera_margin: Used by self.wrap_action. If the continuous camera angle change
in a dataset action is at least `camera_margin`, then the dataset action
is discretized as a camera-change action.
"""
super().__init__(env)
self.camera_angle = camera_angle
self.camera_margin = camera_margin
self.always_attack = always_attack
self._actions = [
[('attack', 1)],
[('forward', 1)],
[('forward', 1), ('jump', 1)],
[('camera', [-self.camera_angle, 0])],
[('camera', [self.camera_angle, 0])],
[('camera', [0, self.camera_angle])],
[('camera', [0, -self.camera_angle])],
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def reverse_action(self, action: dict) -> np.ndarray:
camera_actions = action["camera"].squeeze()
attack_actions = action["attack"].squeeze()
forward_actions = action["forward"].squeeze()
jump_actions = action["jump"].squeeze()
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
for i in range(len(camera_actions)):
# Moving camera is most important (horizontal first)
if camera_actions[i][0] < -self.camera_margin:
actions[i] = 3
elif camera_actions[i][0] > self.camera_margin:
actions[i] = 4
elif camera_actions[i][1] > self.camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -self.camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
else:
# No reasonable mapping (would be no-op)
actions[i] = -1
return actions | basalt_utils/src/basalt_utils/wrappers/anssi_wrapper.py | import gym
from .reversible_action_wrapper import ReversibleActionWrapper
import numpy as np
class AnssiActionShaping(ReversibleActionWrapper):
def __init__(
self,
env: gym.Env,
camera_angle: int = 10,
always_attack: bool = False,
camera_margin: int = 5,
):
"""
Arguments:
env: The env to wrap.
camera_angle: Discretized actions will tilt the camera by this number of
degrees.
always_attack: If True, then always send attack=1 to the wrapped environment.
camera_margin: Used by self.wrap_action. If the continuous camera angle change
in a dataset action is at least `camera_margin`, then the dataset action
is discretized as a camera-change action.
"""
super().__init__(env)
self.camera_angle = camera_angle
self.camera_margin = camera_margin
self.always_attack = always_attack
self._actions = [
[('attack', 1)],
[('forward', 1)],
[('forward', 1), ('jump', 1)],
[('camera', [-self.camera_angle, 0])],
[('camera', [self.camera_angle, 0])],
[('camera', [0, self.camera_angle])],
[('camera', [0, -self.camera_angle])],
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def reverse_action(self, action: dict) -> np.ndarray:
camera_actions = action["camera"].squeeze()
attack_actions = action["attack"].squeeze()
forward_actions = action["forward"].squeeze()
jump_actions = action["jump"].squeeze()
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
for i in range(len(camera_actions)):
# Moving camera is most important (horizontal first)
if camera_actions[i][0] < -self.camera_margin:
actions[i] = 3
elif camera_actions[i][0] > self.camera_margin:
actions[i] = 4
elif camera_actions[i][1] > self.camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -self.camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
else:
# No reasonable mapping (would be no-op)
actions[i] = -1
return actions | 0.787605 | 0.418162 |
import numpy as np
import networkx as nx
import graph_partitioning.partitioners.utils as putils
class ScotchData:
def __init__(self):
self._initialize()
def _initialize(self):
self.verttab = []
self.edgetab = []
self.edlotab = []
self.velotab = []
self.vlbltab = []
self.vertexweights = []
self.parttab = []
self._verttab = None
self._edgetab = None
self._edlotab = None
self._velotab = None
self._vlbltab = None
self._vertexweights = None
self._parttab = None
self.vertnbr = 0
self.edgenbr = 0
self.baseval = 0
def debugPrint(self):
print('vertnbr', self.vertnbr)
print('edgenbr', self.edgenbr)
print('baseval', self.baseval)
print('len verttab', len(self.verttab))
print('verttab', self.verttab)
print('len velotab', len(self.velotab))
print('velotab', self.velotab)
print('len vlbltab', len(self.vlbltab))
print('vlbltab', self.vlbltab)
#print('len vertweights', len(self.vertexweights))
#print('vertweights', self.vertexweights)
print('len edgetab', len(self.edgetab))
print('edgetab', self.edgetab)
print('len edlotab', len(self.edlotab))
print('edlotab', self.edlotab)
print('len parttab', len(self.parttab))
print('parttab', self.parttab)
def isValid(self):
# TODO complete this
if self.vertnbr + 1 != len(self._verttab):
return False
if self.vertnbr != len(self._velotab):
return False
if self.edgenbr != len(self._edgetab):
return False
if self.edgenbr != len(self._edlotab):
return False
# deep check
for edgeID in self._edgetab:
if edgeID not in self.vlbltab:
print('EdgeID not in vlbltab', edgeID)
return False
return True
def clearData(self):
self._initialize()
def fromNetworkxGraph(self, nxGraph, baseval=1, parttab=None, vlbltab=None):
if isinstance(nxGraph, nx.Graph) == False:
print('Error, cannot load networkx graph from datatype', type(metisGraph).__name__)
return False
# number_of_nodes
# size() ? number of edges
self.vertnbr = nxGraph.number_of_nodes()
self.edgenbr = nxGraph.size() * 2
self.baseval = baseval
self.verttab = putils.genArray(nxGraph.number_of_nodes() + 1)
self.edgetab = putils.genArray(nxGraph.size() * 2)
self.edlotab = putils.genArray(nxGraph.size() * 2)
self.velotab = putils.genArray(nxGraph.number_of_nodes())
if(vlbltab is None):
self.vlbltab = putils.genArray(nxGraph.number_of_nodes())
#self.vlbltab = []
else:
if len(vlbltab) == self.vertnbr:
self.vlbltab = vlbltab
else:
self.vlbltab = putils.genArray(nxGraph.number_of_nodes())
if parttab is None:
self.parttab = putils.genArray(nxGraph.number_of_nodes(), -1)
else:
if len(parttab) == self.vertnbr:
self.parttab = parttab
else:
self.parttab = putils.genArray(nxGraph.number_of_nodes(), -1)
vtabID = 0
nodes = sorted(nxGraph.nodes())
vertCount = 0
for vertexID in range(self.baseval, len(nodes) + self.baseval):
vertex = nodes[vertexID - self.baseval]
adjustedID = vertexID - self.baseval
self.vlbltab[vertCount] = nodes[vertexID - self.baseval] # store the lable for this vertex as vertCount != adjustID
vertCount += 1
#vertex.printData(False)
self.verttab[adjustedID] = vtabID
vWeight = 1
try:
vWeight = int(nxGraph.node[vertex]['weight'])
except KeyError as ke:
pass
self.velotab[adjustedID] = vWeight
indexedEdges = {}
edgeIndeces = sorted(nxGraph.neighbors(vertex))
edgeCount = 0
for edgeID in edgeIndeces:
edgeWeight = 1
try:
edgeWeight = int(nxGraph.edge[adjustedID][edgeID]['weight'])
except Exception as e:
edgeWeight = 1
self.edgetab[vtabID + edgeCount] = edgeID - self.baseval
self.edlotab[vtabID + edgeCount] = edgeWeight
#print('edge:', vertex, edgeID - self.baseval)
edgeCount += 1
vtabID += len(edgeIndeces)
self.verttab[nxGraph.number_of_nodes()] = vtabID
# update vertex IDs
updateEdgeIDSUsingLabels = False
if updateEdgeIDSUsingLabels:
lblmap = {}
for newVertID in range(0, len(self.vlbltab)):
oldVertID = self.vlbltab[newVertID]
lblmap[oldVertID] = newVertID
for i in range(0, len(self.edgetab)):
newVal = lblmap[self.edgetab[i]]
self.edgetab[i] = newVal
self._exportArrays()
def setFixedVertices(self, parttab):
if(len(parttab) == self.vertnbr):
self.parttab = parttab
self._parttab = putils.exportArrayToNumpyArray(parttab)
return True
return False
def _exportArrays(self):
self._verttab = putils.exportArrayToNumpyArray(self.verttab)
self._edgetab = putils.exportArrayToNumpyArray(self.edgetab)
self._edlotab = putils.exportArrayToNumpyArray(self.edlotab)
self._velotab = putils.exportArrayToNumpyArray(self.velotab)
self._parttab = putils.exportArrayToNumpyArray(self.parttab)
self._vertexweights = putils.exportArrayToNumpyArray(self.vertexweights)
if(len(self.vlbltab) == self.vertnbr):
self._vlbltab = putils.exportArrayToNumpyArray(self.vlbltab) | graph_partitioning/partitioners/scotch/scotch_data.py | import numpy as np
import networkx as nx
import graph_partitioning.partitioners.utils as putils
class ScotchData:
def __init__(self):
self._initialize()
def _initialize(self):
self.verttab = []
self.edgetab = []
self.edlotab = []
self.velotab = []
self.vlbltab = []
self.vertexweights = []
self.parttab = []
self._verttab = None
self._edgetab = None
self._edlotab = None
self._velotab = None
self._vlbltab = None
self._vertexweights = None
self._parttab = None
self.vertnbr = 0
self.edgenbr = 0
self.baseval = 0
def debugPrint(self):
print('vertnbr', self.vertnbr)
print('edgenbr', self.edgenbr)
print('baseval', self.baseval)
print('len verttab', len(self.verttab))
print('verttab', self.verttab)
print('len velotab', len(self.velotab))
print('velotab', self.velotab)
print('len vlbltab', len(self.vlbltab))
print('vlbltab', self.vlbltab)
#print('len vertweights', len(self.vertexweights))
#print('vertweights', self.vertexweights)
print('len edgetab', len(self.edgetab))
print('edgetab', self.edgetab)
print('len edlotab', len(self.edlotab))
print('edlotab', self.edlotab)
print('len parttab', len(self.parttab))
print('parttab', self.parttab)
def isValid(self):
# TODO complete this
if self.vertnbr + 1 != len(self._verttab):
return False
if self.vertnbr != len(self._velotab):
return False
if self.edgenbr != len(self._edgetab):
return False
if self.edgenbr != len(self._edlotab):
return False
# deep check
for edgeID in self._edgetab:
if edgeID not in self.vlbltab:
print('EdgeID not in vlbltab', edgeID)
return False
return True
def clearData(self):
self._initialize()
def fromNetworkxGraph(self, nxGraph, baseval=1, parttab=None, vlbltab=None):
if isinstance(nxGraph, nx.Graph) == False:
print('Error, cannot load networkx graph from datatype', type(metisGraph).__name__)
return False
# number_of_nodes
# size() ? number of edges
self.vertnbr = nxGraph.number_of_nodes()
self.edgenbr = nxGraph.size() * 2
self.baseval = baseval
self.verttab = putils.genArray(nxGraph.number_of_nodes() + 1)
self.edgetab = putils.genArray(nxGraph.size() * 2)
self.edlotab = putils.genArray(nxGraph.size() * 2)
self.velotab = putils.genArray(nxGraph.number_of_nodes())
if(vlbltab is None):
self.vlbltab = putils.genArray(nxGraph.number_of_nodes())
#self.vlbltab = []
else:
if len(vlbltab) == self.vertnbr:
self.vlbltab = vlbltab
else:
self.vlbltab = putils.genArray(nxGraph.number_of_nodes())
if parttab is None:
self.parttab = putils.genArray(nxGraph.number_of_nodes(), -1)
else:
if len(parttab) == self.vertnbr:
self.parttab = parttab
else:
self.parttab = putils.genArray(nxGraph.number_of_nodes(), -1)
vtabID = 0
nodes = sorted(nxGraph.nodes())
vertCount = 0
for vertexID in range(self.baseval, len(nodes) + self.baseval):
vertex = nodes[vertexID - self.baseval]
adjustedID = vertexID - self.baseval
self.vlbltab[vertCount] = nodes[vertexID - self.baseval] # store the lable for this vertex as vertCount != adjustID
vertCount += 1
#vertex.printData(False)
self.verttab[adjustedID] = vtabID
vWeight = 1
try:
vWeight = int(nxGraph.node[vertex]['weight'])
except KeyError as ke:
pass
self.velotab[adjustedID] = vWeight
indexedEdges = {}
edgeIndeces = sorted(nxGraph.neighbors(vertex))
edgeCount = 0
for edgeID in edgeIndeces:
edgeWeight = 1
try:
edgeWeight = int(nxGraph.edge[adjustedID][edgeID]['weight'])
except Exception as e:
edgeWeight = 1
self.edgetab[vtabID + edgeCount] = edgeID - self.baseval
self.edlotab[vtabID + edgeCount] = edgeWeight
#print('edge:', vertex, edgeID - self.baseval)
edgeCount += 1
vtabID += len(edgeIndeces)
self.verttab[nxGraph.number_of_nodes()] = vtabID
# update vertex IDs
updateEdgeIDSUsingLabels = False
if updateEdgeIDSUsingLabels:
lblmap = {}
for newVertID in range(0, len(self.vlbltab)):
oldVertID = self.vlbltab[newVertID]
lblmap[oldVertID] = newVertID
for i in range(0, len(self.edgetab)):
newVal = lblmap[self.edgetab[i]]
self.edgetab[i] = newVal
self._exportArrays()
def setFixedVertices(self, parttab):
if(len(parttab) == self.vertnbr):
self.parttab = parttab
self._parttab = putils.exportArrayToNumpyArray(parttab)
return True
return False
def _exportArrays(self):
self._verttab = putils.exportArrayToNumpyArray(self.verttab)
self._edgetab = putils.exportArrayToNumpyArray(self.edgetab)
self._edlotab = putils.exportArrayToNumpyArray(self.edlotab)
self._velotab = putils.exportArrayToNumpyArray(self.velotab)
self._parttab = putils.exportArrayToNumpyArray(self.parttab)
self._vertexweights = putils.exportArrayToNumpyArray(self.vertexweights)
if(len(self.vlbltab) == self.vertnbr):
self._vlbltab = putils.exportArrayToNumpyArray(self.vlbltab) | 0.189334 | 0.22595 |
import pygame
from pygame.locals import *
from random import randint as ri
import copy
pygame.init()
GAME_RES = WIDTH, HEIGHT = 480, 480
FPS = 60
GAME_TITLE = 'Game of Life - MarconiGames'
window = pygame.display.set_mode(GAME_RES, HWACCEL|HWSURFACE|DOUBLEBUF)
pygame.display.set_caption(GAME_TITLE)
clock = pygame.time.Clock()
# Game Values
background_color = (0, 0, 0)
cell_color = (255, 255, 255)
cell_dim = 10
class Cell:
def __init__(self, x, y):
self.x = x
self.y = y
self.state = ri(0, 1)
class Grid:
def __init__(self, width, height):
self.width = width
self.height = height
self.grid = [
[Cell(x, y) for x in range(0, width)] for y in range(0, height)
]
def drop_new_cells(self):
self.grid = [
[Cell(x, y) for x in range(0, self.width)] for y in range(0, self.height)
]
def apply_rules(self):
futuregrid = copy.deepcopy(self.grid)
for x in range(0, len(self.grid)):
for y in range(0, len(self.grid[0])):
s = 0
try: s += self.grid[x - 1][y - 1].state;
except: pass
try: s += self.grid[x][y - 1].state
except: pass
try: s += self.grid[x + 1][y - 1].state
except: pass
try: s += self.grid[x - 1][y].state
except: pass
try: s += self.grid[x + 1][y].state
except: pass
try: s += self.grid[x - 1][y + 1].state
except: pass
try: s += self.grid[x][y + 1].state
except: pass
try: s += self.grid[x + 1][y + 1].state
except: pass
if self.grid[x][y].state == 1 and (s < 2 or s > 3):
futuregrid[x][y].state = 0
elif self.grid[x][y].state == 0 and s == 3:
futuregrid[x][y].state = 1
self.grid = futuregrid
def draw(self, surface):
for x in range(0, len(self.grid)):
for y in range(0, len(self.grid[0])):
if self.grid[x][y].state == 1:
pygame.draw.rect(surface, cell_color, (
self.grid[x][y].x * cell_dim,
self.grid[x][y].y * cell_dim,
cell_dim,
cell_dim
)
)
grid = Grid(WIDTH // cell_dim, HEIGHT // cell_dim)
# End of Game Values
# Game loop
game_ended = False
while not game_ended:
# Event handling
for event in pygame.event.get():
if event.type == QUIT:
game_ended = True
break
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
game_ended = True
break
if event.key == K_SPACE:
grid.drop_new_cells()
# Game logic
grid.apply_rules()
# Display update
pygame.Surface.fill(window, background_color)
grid.draw(window)
pygame.display.update()
clock.tick(FPS)
pygame.quit()
exit(0) | 10_GameOfLife/main.py | import pygame
from pygame.locals import *
from random import randint as ri
import copy
pygame.init()
GAME_RES = WIDTH, HEIGHT = 480, 480
FPS = 60
GAME_TITLE = 'Game of Life - MarconiGames'
window = pygame.display.set_mode(GAME_RES, HWACCEL|HWSURFACE|DOUBLEBUF)
pygame.display.set_caption(GAME_TITLE)
clock = pygame.time.Clock()
# Game Values
background_color = (0, 0, 0)
cell_color = (255, 255, 255)
cell_dim = 10
class Cell:
def __init__(self, x, y):
self.x = x
self.y = y
self.state = ri(0, 1)
class Grid:
def __init__(self, width, height):
self.width = width
self.height = height
self.grid = [
[Cell(x, y) for x in range(0, width)] for y in range(0, height)
]
def drop_new_cells(self):
self.grid = [
[Cell(x, y) for x in range(0, self.width)] for y in range(0, self.height)
]
def apply_rules(self):
futuregrid = copy.deepcopy(self.grid)
for x in range(0, len(self.grid)):
for y in range(0, len(self.grid[0])):
s = 0
try: s += self.grid[x - 1][y - 1].state;
except: pass
try: s += self.grid[x][y - 1].state
except: pass
try: s += self.grid[x + 1][y - 1].state
except: pass
try: s += self.grid[x - 1][y].state
except: pass
try: s += self.grid[x + 1][y].state
except: pass
try: s += self.grid[x - 1][y + 1].state
except: pass
try: s += self.grid[x][y + 1].state
except: pass
try: s += self.grid[x + 1][y + 1].state
except: pass
if self.grid[x][y].state == 1 and (s < 2 or s > 3):
futuregrid[x][y].state = 0
elif self.grid[x][y].state == 0 and s == 3:
futuregrid[x][y].state = 1
self.grid = futuregrid
def draw(self, surface):
for x in range(0, len(self.grid)):
for y in range(0, len(self.grid[0])):
if self.grid[x][y].state == 1:
pygame.draw.rect(surface, cell_color, (
self.grid[x][y].x * cell_dim,
self.grid[x][y].y * cell_dim,
cell_dim,
cell_dim
)
)
grid = Grid(WIDTH // cell_dim, HEIGHT // cell_dim)
# End of Game Values
# Game loop
game_ended = False
while not game_ended:
# Event handling
for event in pygame.event.get():
if event.type == QUIT:
game_ended = True
break
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
game_ended = True
break
if event.key == K_SPACE:
grid.drop_new_cells()
# Game logic
grid.apply_rules()
# Display update
pygame.Surface.fill(window, background_color)
grid.draw(window)
pygame.display.update()
clock.tick(FPS)
pygame.quit()
exit(0) | 0.292899 | 0.296788 |
import streamlit as st
import numpy as np
import cv2
import imageio
from perceiver import perceiver, io_processors
import OpticalFlow
import Classification
# Extract flows from imageio reader
def GetFlows(reader, sampleRate=5, start=0, length=1):
# Get first frame from reader
firstFrame = reader.get_data(start)
# Get original dimensions for output
width, height = reader.get_meta_data()["source_size"]
# Create feedback for process
my_bar = st.progress(0)
# Create writer for video file
w = imageio.get_writer('outputs/flowing.mp4',
format='FFMPEG', mode='I', fps=int(FPS/sampleRate))
# Loop through input video
for idx in range(length):
# Grab next frame
secondFrame = reader.get_data((idx+start+1)*sampleRate)
# Extract flow using notebook example
flow = OpticalFlow.ExtractFlow(firstFrame, secondFrame)[0]
# Move to next frame
firstFrame = secondFrame
# Convert flow to BGR image
flowImage = OpticalFlow.visualize_flow(flow)
# Write resized flow output to video file
w.append_data(cv2.resize(flowImage, (width, height)))
# Increment progress bar
my_bar.progress(idx/(length-1))
w.close()
return
st.set_page_config(page_title='Perceiver IO', page_icon='docs/deepmind-logo.png',
layout="wide", initial_sidebar_state="auto", menu_items=None)
st.sidebar.header('Perceiver IO')
program = st.sidebar.selectbox('Choose a function', [
'Optical Flow',
'Image Classification',
'Language Modeling',
'Video Autoencoding'], 0)
st.header(program)
left, right = st.columns(2)
if program == 'Optical Flow':
inputFile = st.file_uploader("Upload a video")
if inputFile is not None:
inputVideo = inputFile.getvalue()
reader = imageio.get_reader(inputVideo, '.mp4')
FPS = reader.get_meta_data()["fps"]
vidLength = reader.get_meta_data()["duration"]
sampleRate = st.slider('Sample rate (frames)', 1, 10, 3, 1)
startTime = int(st.slider('Start time (s)', 0.,
vidLength, 0., 0.1)*FPS/sampleRate)
length = int(st.slider('Length (s)', 0.1, vidLength -
startTime, 1., 0.1)*FPS/sampleRate)
left.video(inputVideo)
flows = GetFlows(reader, sampleRate, startTime, length)
right.video('outputs/flowing.mp4')
elif program == 'Image Classification':
inputFile = st.file_uploader("Upload an image to classify")
if inputFile is not None:
inputImage = inputFile.getvalue()
inputArray = imageio.imread(inputImage)
left.image(inputImage)
results = Classification.ClassifyImage(inputArray)
right.write(results)
elif program == 'Language Modeling':
st.header('In progress...')
elif program == 'Video Autoencoding':
st.header('In progress...') | app.py | import streamlit as st
import numpy as np
import cv2
import imageio
from perceiver import perceiver, io_processors
import OpticalFlow
import Classification
# Extract flows from imageio reader
def GetFlows(reader, sampleRate=5, start=0, length=1):
# Get first frame from reader
firstFrame = reader.get_data(start)
# Get original dimensions for output
width, height = reader.get_meta_data()["source_size"]
# Create feedback for process
my_bar = st.progress(0)
# Create writer for video file
w = imageio.get_writer('outputs/flowing.mp4',
format='FFMPEG', mode='I', fps=int(FPS/sampleRate))
# Loop through input video
for idx in range(length):
# Grab next frame
secondFrame = reader.get_data((idx+start+1)*sampleRate)
# Extract flow using notebook example
flow = OpticalFlow.ExtractFlow(firstFrame, secondFrame)[0]
# Move to next frame
firstFrame = secondFrame
# Convert flow to BGR image
flowImage = OpticalFlow.visualize_flow(flow)
# Write resized flow output to video file
w.append_data(cv2.resize(flowImage, (width, height)))
# Increment progress bar
my_bar.progress(idx/(length-1))
w.close()
return
st.set_page_config(page_title='Perceiver IO', page_icon='docs/deepmind-logo.png',
layout="wide", initial_sidebar_state="auto", menu_items=None)
st.sidebar.header('Perceiver IO')
program = st.sidebar.selectbox('Choose a function', [
'Optical Flow',
'Image Classification',
'Language Modeling',
'Video Autoencoding'], 0)
st.header(program)
left, right = st.columns(2)
if program == 'Optical Flow':
inputFile = st.file_uploader("Upload a video")
if inputFile is not None:
inputVideo = inputFile.getvalue()
reader = imageio.get_reader(inputVideo, '.mp4')
FPS = reader.get_meta_data()["fps"]
vidLength = reader.get_meta_data()["duration"]
sampleRate = st.slider('Sample rate (frames)', 1, 10, 3, 1)
startTime = int(st.slider('Start time (s)', 0.,
vidLength, 0., 0.1)*FPS/sampleRate)
length = int(st.slider('Length (s)', 0.1, vidLength -
startTime, 1., 0.1)*FPS/sampleRate)
left.video(inputVideo)
flows = GetFlows(reader, sampleRate, startTime, length)
right.video('outputs/flowing.mp4')
elif program == 'Image Classification':
inputFile = st.file_uploader("Upload an image to classify")
if inputFile is not None:
inputImage = inputFile.getvalue()
inputArray = imageio.imread(inputImage)
left.image(inputImage)
results = Classification.ClassifyImage(inputArray)
right.write(results)
elif program == 'Language Modeling':
st.header('In progress...')
elif program == 'Video Autoencoding':
st.header('In progress...') | 0.499023 | 0.321993 |
from selenium import webdriver
import datetime
import time
import telegram_send
import os
import pymysql
os.system('printf "<token_key>\<PASSWORD>/amazPT" | telegram-send --configure-channel')
def get_driver():
"""Start web driver"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(10)
return driver
def fetch_table():
rows = []
conn = pymysql.connect(host='mysqldb', port=3306, user='root', passwd='password', db='proddb')
cur = conn.cursor()
cur.execute("SELECT * FROM product")
for r in cur:
rows.append(r)
cur.close()
conn.close()
print(f"rows={rows}")
return rows
# chromeWebDriverPath = './chromedriver'
# priceXPath = '//*[@id=\"priceblock_ourprice\"]'
priceXPath = '//*[@id="tp-tool-tip-price"]/span[2]/span[3]/*'
# priceXPath2 = '//*[@id="tp_price_block_total_price_ww"]/span[1]'
# driver = webdriver.Chrome('/Users/ashish.ranjan/Documents/chromedriver')
driver = get_driver() # use this if using docker
# productFile = open('products.txt', 'r')
# Lines = productFile.readlines()
import re
import locale
def clean_price_string(s):
decimal_point_char = locale.localeconv()['decimal_point']
clean = re.sub(r'[^0-9'+decimal_point_char+r']+', '', str(s))
return float(clean)
# outputFile = open('prices.txt', 'a')
telegram_send.send(messages=["deployed model..."])
while True:
for productName, productUrl, productThresh in fetch_table():
print(f"got {productName}, {productUrl}, {productThresh}")
try:
driver.get(productUrl)
print('got producturl')
element = driver.find_element_by_xpath(priceXPath)
# element2 = driver.find_element_by_xpath(priceXPath2)
print(f"element = {element}")
print(f"element = {element.text}")
print(f"cleaned element = {clean_price_string(element.text)}")
# print(f"element2 = {element2}")
# print(f"element2 = {element2.text}")
# print(f"cleaned element2 = {clean_price_string(element2.text)}")
price = float(clean_price_string(element.text)) #element.text.split(' ')[1]
if price <= productThresh:
telegram_send.send(messages=[str(productName) + ': ' + str(price)])
print(productName, price)
except Exception as e:
print(e)
pass
# outputFile.write(str(datetime.datetime.now()) + ': ' + productName+'@'+str(price)+'\n')
time.sleep(30)
# productFile.close()
# outputFile.close()
driver.close() | webscrapper_telegram/webscrapper.py | from selenium import webdriver
import datetime
import time
import telegram_send
import os
import pymysql
os.system('printf "<token_key>\<PASSWORD>/amazPT" | telegram-send --configure-channel')
def get_driver():
"""Start web driver"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(10)
return driver
def fetch_table():
rows = []
conn = pymysql.connect(host='mysqldb', port=3306, user='root', passwd='password', db='proddb')
cur = conn.cursor()
cur.execute("SELECT * FROM product")
for r in cur:
rows.append(r)
cur.close()
conn.close()
print(f"rows={rows}")
return rows
# chromeWebDriverPath = './chromedriver'
# priceXPath = '//*[@id=\"priceblock_ourprice\"]'
priceXPath = '//*[@id="tp-tool-tip-price"]/span[2]/span[3]/*'
# priceXPath2 = '//*[@id="tp_price_block_total_price_ww"]/span[1]'
# driver = webdriver.Chrome('/Users/ashish.ranjan/Documents/chromedriver')
driver = get_driver() # use this if using docker
# productFile = open('products.txt', 'r')
# Lines = productFile.readlines()
import re
import locale
def clean_price_string(s):
decimal_point_char = locale.localeconv()['decimal_point']
clean = re.sub(r'[^0-9'+decimal_point_char+r']+', '', str(s))
return float(clean)
# outputFile = open('prices.txt', 'a')
telegram_send.send(messages=["deployed model..."])
while True:
for productName, productUrl, productThresh in fetch_table():
print(f"got {productName}, {productUrl}, {productThresh}")
try:
driver.get(productUrl)
print('got producturl')
element = driver.find_element_by_xpath(priceXPath)
# element2 = driver.find_element_by_xpath(priceXPath2)
print(f"element = {element}")
print(f"element = {element.text}")
print(f"cleaned element = {clean_price_string(element.text)}")
# print(f"element2 = {element2}")
# print(f"element2 = {element2.text}")
# print(f"cleaned element2 = {clean_price_string(element2.text)}")
price = float(clean_price_string(element.text)) #element.text.split(' ')[1]
if price <= productThresh:
telegram_send.send(messages=[str(productName) + ': ' + str(price)])
print(productName, price)
except Exception as e:
print(e)
pass
# outputFile.write(str(datetime.datetime.now()) + ': ' + productName+'@'+str(price)+'\n')
time.sleep(30)
# productFile.close()
# outputFile.close()
driver.close() | 0.117876 | 0.064388 |
import json
import logging
from oslo_concurrency import processutils as putils
from oslo_utils import encodeutils
from oslo_utils import excutils
from taskflow.patterns import linear_flow as lf
from xmonitor.async import utils
from xmonitor.i18n import _LE
LOG = logging.getLogger(__name__)
class _Introspect(utils.OptionalTask):
"""Taskflow to pull the embedded metadata out of image file"""
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Introspect, self).__init__(
name='%s-Introspect-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
"""Does the actual introspection
:param image_id: Glance image ID
:param file_path: Path to the file being introspected
"""
try:
stdout, stderr = putils.trycmd('qemu-img', 'info',
'--output=json', file_path,
log_errors=putils.LOG_ALL_ERRORS)
except OSError as exc:
# NOTE(flaper87): errno == 2 means the executable file
# was not found. For now, log an error and move forward
# until we have a better way to enable/disable optional
# tasks.
if exc.errno != 2:
with excutils.save_and_reraise_exception():
exc_message = encodeutils.exception_to_unicode(exc)
msg = (_LE('Failed to execute introspection '
'%(task_id)s: %(exc)s') %
{'task_id': self.task_id, 'exc': exc_message})
LOG.error(msg)
return
if stderr:
raise RuntimeError(stderr)
metadata = json.loads(stdout)
new_image = self.image_repo.get(image_id)
new_image.virtual_size = metadata.get('virtual-size', 0)
new_image.disk_format = metadata.get('format')
self.image_repo.save(new_image)
LOG.debug("%(task_id)s: Introspection successful: %(file)s",
{'task_id': self.task_id, 'file': file_path})
return new_image
def get_flow(**kwargs):
"""Return task flow for introspecting images to obtain metadata about the
image.
:param task_id: Task ID
:param task_type: Type of the task.
:param image_repo: Image repository used.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s",
{'task_type': task_type, 'id': task_id, 'repo': image_repo})
return lf.Flow(task_type).add(
_Introspect(task_id, task_type, image_repo),
) | xmonitor/async/flows/introspect.py |
import json
import logging
from oslo_concurrency import processutils as putils
from oslo_utils import encodeutils
from oslo_utils import excutils
from taskflow.patterns import linear_flow as lf
from xmonitor.async import utils
from xmonitor.i18n import _LE
LOG = logging.getLogger(__name__)
class _Introspect(utils.OptionalTask):
"""Taskflow to pull the embedded metadata out of image file"""
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Introspect, self).__init__(
name='%s-Introspect-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
"""Does the actual introspection
:param image_id: Glance image ID
:param file_path: Path to the file being introspected
"""
try:
stdout, stderr = putils.trycmd('qemu-img', 'info',
'--output=json', file_path,
log_errors=putils.LOG_ALL_ERRORS)
except OSError as exc:
# NOTE(flaper87): errno == 2 means the executable file
# was not found. For now, log an error and move forward
# until we have a better way to enable/disable optional
# tasks.
if exc.errno != 2:
with excutils.save_and_reraise_exception():
exc_message = encodeutils.exception_to_unicode(exc)
msg = (_LE('Failed to execute introspection '
'%(task_id)s: %(exc)s') %
{'task_id': self.task_id, 'exc': exc_message})
LOG.error(msg)
return
if stderr:
raise RuntimeError(stderr)
metadata = json.loads(stdout)
new_image = self.image_repo.get(image_id)
new_image.virtual_size = metadata.get('virtual-size', 0)
new_image.disk_format = metadata.get('format')
self.image_repo.save(new_image)
LOG.debug("%(task_id)s: Introspection successful: %(file)s",
{'task_id': self.task_id, 'file': file_path})
return new_image
def get_flow(**kwargs):
"""Return task flow for introspecting images to obtain metadata about the
image.
:param task_id: Task ID
:param task_type: Type of the task.
:param image_repo: Image repository used.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s",
{'task_type': task_type, 'id': task_id, 'repo': image_repo})
return lf.Flow(task_type).add(
_Introspect(task_id, task_type, image_repo),
) | 0.518059 | 0.138084 |
import numpy as _np
from .abstract_sampler import AbstractSampler
from .._C_netket import sampler as c_sampler
from .._C_netket.utils import random_engine
from ..stats import mean as _mean
from netket import random as _random
from numba import jit, int64, float64
from .._jitclass import jitclass
class PyMetropolisHastings(AbstractSampler):
"""
``MetropolisHastings`` is a generic Metropolis-Hastings sampler using
a local transition kernel to perform moves in the Markov Chain.
The transition kernel is used to generate
a proposed state :math:`s^\prime`, starting from the current state :math:`s`.
The move is accepted with probability
.. math::
A(s\rightarrow s^\prime) = \mathrm{min}\left (1,\frac{P(s^\prime)}{P(s)} F(e^{L(s,s^\prime)})\right),
where the probability being sampled is :math:`F(\Psi(s))` (by default :math:`F(x)=|x|^2`)
and :math:`L(s,s^\prime)` is a correcting factor computed by the transition kernel.
"""
def __init__(
self, machine, transition_kernel, n_chains=16, sweep_size=None, batch_size=None
):
"""
Constructs a new ``MetropolisHastings`` sampler given a machine and
a transition kernel.
Args:
machine: A machine :math:`\Psi(s)` used for the sampling.
The probability distribution being sampled
from is :math:`F(\Psi(s))`, where the function
$$F(X)$$, is arbitrary, by default :math:`F(X)=|X|^2`.
transition_kernel: A function to generate a transition.
This should take as an input the current state (in batches)
and return a modified state (also in batches).
This function must also return an array containing the
`log_prob_corrections` :math:`L(s,s^\prime)`.
n_chains: The number of Markov Chain to be run in parallel on a single process.
sweep_size: The number of exchanges that compose a single sweep.
If None, sweep_size is equal to the number of degrees of freedom (n_visible).
batch_size: The batch size to be used when calling log_val on the given Machine.
If None, batch_size is equal to the number Markov chains (n_chains).
"""
self.machine = machine
self.n_chains = n_chains
self.sweep_size = sweep_size
self._kernel = transition_kernel
self.machine_pow = 2.0
super().__init__(machine, n_chains)
@property
def n_chains(self):
return self._n_chains
@n_chains.setter
def n_chains(self, n_chains):
if n_chains < 0:
raise ValueError("Expected a positive integer for n_chains ")
self._n_chains = n_chains
self._state = _np.zeros((n_chains, self._n_visible))
self._state1 = _np.copy(self._state)
self._log_values = _np.zeros(n_chains, dtype=_np.complex128)
self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)
self._log_prob_corr = _np.zeros(n_chains)
@property
def machine_pow(self):
return self._machine_pow
@machine_pow.setter
def machine_pow(self, m_power):
self._machine_pow = m_power
@property
def sweep_size(self):
return self._sweep_size
@sweep_size.setter
def sweep_size(self, sweep_size):
self._sweep_size = sweep_size if sweep_size != None else self._n_visible
if self._sweep_size < 0:
raise ValueError("Expected a positive integer for sweep_size ")
@property
def machine(self):
return self._machine
@machine.setter
def machine(self, machine):
self._machine = machine
self._n_visible = machine.hilbert.size
self._hilbert = machine.hilbert
def reset(self, init_random=False):
if init_random:
for state in self._state:
self._hilbert.random_vals(state, random_engine())
self.machine.log_val(self._state, out=self._log_values)
self._accepted_samples = 0
self._total_samples = 0
@staticmethod
@jit(nopython=True)
def acceptance_kernel(
state, state1, log_values, log_values_1, log_prob_corr, machine_pow
):
accepted = 0
for i in range(state.shape[0]):
prob = _np.exp(
machine_pow *
(log_values_1[i] - log_values[i] + log_prob_corr[i]).real
)
if prob > _random.uniform(0, 1):
log_values[i] = log_values_1[i]
state[i] = state1[i]
accepted += 1
return accepted
def __next__(self):
_log_val = self.machine.log_val
_acc_kernel = self.acceptance_kernel
_state = self._state
_state1 = self._state1
_log_values = self._log_values
_log_values_1 = self._log_values_1
_log_prob_corr = self._log_prob_corr
_machine_pow = self._machine_pow
_accepted_samples = self._accepted_samples
_t_kernel = self._kernel.apply
for sweep in range(self.sweep_size):
# Propose a new state using the transition kernel
_t_kernel(_state, _state1, _log_prob_corr)
_log_val(_state1, out=_log_values_1)
# Acceptance Kernel
acc = _acc_kernel(
_state,
_state1,
_log_values,
_log_values_1,
_log_prob_corr,
_machine_pow,
)
_accepted_samples += acc
self._total_samples += self.sweep_size * self.n_chains
return self._state
@property
def acceptance(self):
"""The measured acceptance probability."""
return _mean(self._accepted_samples) / _mean(self._total_samples) | netket/sampler/metropolis_hastings.py | import numpy as _np
from .abstract_sampler import AbstractSampler
from .._C_netket import sampler as c_sampler
from .._C_netket.utils import random_engine
from ..stats import mean as _mean
from netket import random as _random
from numba import jit, int64, float64
from .._jitclass import jitclass
class PyMetropolisHastings(AbstractSampler):
"""
``MetropolisHastings`` is a generic Metropolis-Hastings sampler using
a local transition kernel to perform moves in the Markov Chain.
The transition kernel is used to generate
a proposed state :math:`s^\prime`, starting from the current state :math:`s`.
The move is accepted with probability
.. math::
A(s\rightarrow s^\prime) = \mathrm{min}\left (1,\frac{P(s^\prime)}{P(s)} F(e^{L(s,s^\prime)})\right),
where the probability being sampled is :math:`F(\Psi(s))` (by default :math:`F(x)=|x|^2`)
and :math:`L(s,s^\prime)` is a correcting factor computed by the transition kernel.
"""
def __init__(
self, machine, transition_kernel, n_chains=16, sweep_size=None, batch_size=None
):
"""
Constructs a new ``MetropolisHastings`` sampler given a machine and
a transition kernel.
Args:
machine: A machine :math:`\Psi(s)` used for the sampling.
The probability distribution being sampled
from is :math:`F(\Psi(s))`, where the function
$$F(X)$$, is arbitrary, by default :math:`F(X)=|X|^2`.
transition_kernel: A function to generate a transition.
This should take as an input the current state (in batches)
and return a modified state (also in batches).
This function must also return an array containing the
`log_prob_corrections` :math:`L(s,s^\prime)`.
n_chains: The number of Markov Chain to be run in parallel on a single process.
sweep_size: The number of exchanges that compose a single sweep.
If None, sweep_size is equal to the number of degrees of freedom (n_visible).
batch_size: The batch size to be used when calling log_val on the given Machine.
If None, batch_size is equal to the number Markov chains (n_chains).
"""
self.machine = machine
self.n_chains = n_chains
self.sweep_size = sweep_size
self._kernel = transition_kernel
self.machine_pow = 2.0
super().__init__(machine, n_chains)
@property
def n_chains(self):
return self._n_chains
@n_chains.setter
def n_chains(self, n_chains):
if n_chains < 0:
raise ValueError("Expected a positive integer for n_chains ")
self._n_chains = n_chains
self._state = _np.zeros((n_chains, self._n_visible))
self._state1 = _np.copy(self._state)
self._log_values = _np.zeros(n_chains, dtype=_np.complex128)
self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)
self._log_prob_corr = _np.zeros(n_chains)
@property
def machine_pow(self):
return self._machine_pow
@machine_pow.setter
def machine_pow(self, m_power):
self._machine_pow = m_power
@property
def sweep_size(self):
return self._sweep_size
@sweep_size.setter
def sweep_size(self, sweep_size):
self._sweep_size = sweep_size if sweep_size != None else self._n_visible
if self._sweep_size < 0:
raise ValueError("Expected a positive integer for sweep_size ")
@property
def machine(self):
return self._machine
@machine.setter
def machine(self, machine):
self._machine = machine
self._n_visible = machine.hilbert.size
self._hilbert = machine.hilbert
def reset(self, init_random=False):
if init_random:
for state in self._state:
self._hilbert.random_vals(state, random_engine())
self.machine.log_val(self._state, out=self._log_values)
self._accepted_samples = 0
self._total_samples = 0
@staticmethod
@jit(nopython=True)
def acceptance_kernel(
state, state1, log_values, log_values_1, log_prob_corr, machine_pow
):
accepted = 0
for i in range(state.shape[0]):
prob = _np.exp(
machine_pow *
(log_values_1[i] - log_values[i] + log_prob_corr[i]).real
)
if prob > _random.uniform(0, 1):
log_values[i] = log_values_1[i]
state[i] = state1[i]
accepted += 1
return accepted
def __next__(self):
_log_val = self.machine.log_val
_acc_kernel = self.acceptance_kernel
_state = self._state
_state1 = self._state1
_log_values = self._log_values
_log_values_1 = self._log_values_1
_log_prob_corr = self._log_prob_corr
_machine_pow = self._machine_pow
_accepted_samples = self._accepted_samples
_t_kernel = self._kernel.apply
for sweep in range(self.sweep_size):
# Propose a new state using the transition kernel
_t_kernel(_state, _state1, _log_prob_corr)
_log_val(_state1, out=_log_values_1)
# Acceptance Kernel
acc = _acc_kernel(
_state,
_state1,
_log_values,
_log_values_1,
_log_prob_corr,
_machine_pow,
)
_accepted_samples += acc
self._total_samples += self.sweep_size * self.n_chains
return self._state
@property
def acceptance(self):
"""The measured acceptance probability."""
return _mean(self._accepted_samples) / _mean(self._total_samples) | 0.908904 | 0.634996 |
import concurrent.futures
import gc
import logging
import os
from itertools import repeat
from pathlib import Path
import cv2
import numpy as np
from openslide import OpenSlide
from openslide.deepzoom import DeepZoomGenerator
from tifffile import TiffWriter
SUBFILETYPE_NONE = 0
SUBFILETYPE_REDUCEDIMAGE = 1
logger = logging.getLogger(__name__)
def filter_tile(
tiles, dim_index, index, tile_size, output_array
):
try:
x, y = index
tile = tiles.get_tile(dim_index, index)
tile_width, tile_height = tile.size
# Make image the same size for inference
if tile.size != (tile_size, tile_size):
tile = tile.crop((0, 0, tile_size, tile_size))
ax = x * tile_size
ay = y * tile_size
tile_arr = np.array(tile) # H x W x C
output_array[ay: ay + tile_height, ax: ax + tile_width, :] = tile_arr[
:tile_height, :tile_width]
except Exception as e:
logger.exception(e)
def svs2tif(input_file, output_folder, tile_size, overlap,
num_workers=os.cpu_count(), output_filename="image.tif"):
output_folder = str(output_folder)
logger.info("Parameters")
logger.info(" input file: %s", input_file)
logger.info(" output folder: %s", output_folder)
logger.info(" tile size: %d", tile_size)
logger.info(" overlap: %d", overlap)
logger.info(" num_workers: %d", num_workers)
logger.info(" output filename: %s", output_filename)
with OpenSlide(input_file) as slide:
properties = slide.properties
slide_dimensions = slide.dimensions
tiles = DeepZoomGenerator(
slide, tile_size=tile_size, overlap=overlap, limit_bounds=False
)
output_file = Path(output_folder) / output_filename
np_memmap = []
width, height = slide_dimensions
img_w, img_h = width, height
for level in range(tiles.level_count):
memmap_filename = Path(output_folder, "level{}.mmap".format(level))
memmap_shape = (img_h, img_w, 3)
np_memmap_arr = np.memmap(
memmap_filename, dtype=np.uint8, mode="w+", shape=memmap_shape
)
np_memmap.append(np_memmap_arr)
logger.info(" Created %s %s", memmap_filename, repr(memmap_shape))
img_w = round(img_w / 2)
img_h = round(img_h / 2)
if max(img_w, img_h) < tile_size:
break
try:
# Multithread processing for each tile in the largest
# image (index 0)
logger.info("Processing tiles...")
dim_index = tiles.level_count - 1
tile_pos_x, tile_pos_y = tiles.level_tiles[dim_index]
index_iter = np.ndindex(tile_pos_x, tile_pos_y)
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers) as executor:
executor.map(
filter_tile,
repeat(tiles),
repeat(dim_index),
index_iter,
repeat(tile_size),
repeat(np_memmap[0]),
)
logger.info("Storing low resolution images...")
for index in range(1, len(np_memmap)):
src_arr = np_memmap[index - 1]
target_arr = np_memmap[index]
target_arr[:] = cv2.resize(
src_arr, (0, 0), fx=0.5, fy=0.5,
interpolation=cv2.INTER_AREA
)
# th, tw = target_arr.shape[:2]
# target_arr[:] = src_arr[
# : th * 2 : 2, : tw * 2 : 2, :
# ] # Fast resizing. No anti-aliasing.
logger.info(" Level %d: %s", index, repr(target_arr.shape))
# Calculate resolution
if (
properties.get("tiff.ResolutionUnit")
and properties.get("tiff.XResolution")
and properties.get("tiff.YResolution")
):
resolution_unit = properties.get("tiff.ResolutionUnit")
x_resolution = float(properties.get("tiff.XResolution"))
y_resolution = float(properties.get("tiff.YResolution"))
else:
resolution_unit = properties.get("tiff.ResolutionUnit", "inch")
if properties.get("tiff.ResolutionUnit",
"inch").lower() == "inch":
numerator = 25400 # Microns in Inch
else:
numerator = 10000 # Microns in CM
x_resolution = int(numerator
// float(properties.get('openslide.mpp-x',
1)))
y_resolution = int(numerator
// float(properties.get('openslide.mpp-y',
1)))
# Write TIFF file
with TiffWriter(output_file, bigtiff=True) as tif:
# Save from the largest image (openslide requires that)
for level in range(len(np_memmap)):
src_arr = np_memmap[level]
height, width = src_arr.shape[:2]
logger.info("Saving Level %d image (%d x %d)...",
level, width, height)
if level:
subfiletype = SUBFILETYPE_REDUCEDIMAGE
else:
subfiletype = SUBFILETYPE_NONE
tif.save(
src_arr,
software="Glencoe/Faas pyramid",
metadata={"axes": "YXC"},
tile=(tile_size, tile_size),
photometric="RGB",
planarconfig="CONTIG",
resolution=(
x_resolution // 2 ** level,
y_resolution // 2 ** level,
resolution_unit,
),
compress=("jpeg", 95), # requires imagecodecs
subfiletype=subfiletype,
)
logger.info("Done.")
finally:
# Remove memory-mapped file
logger.info("Removing memmapped files...")
src_arr = None
target_arr = None
np_memmap_arr = None
del np_memmap
gc.collect()
mmap_file_iter = Path(output_folder).glob("*.mmap")
for fp in mmap_file_iter:
fp.unlink() | python/cucim/src/cucim/clara/converter/tiff.py |
import concurrent.futures
import gc
import logging
import os
from itertools import repeat
from pathlib import Path
import cv2
import numpy as np
from openslide import OpenSlide
from openslide.deepzoom import DeepZoomGenerator
from tifffile import TiffWriter
SUBFILETYPE_NONE = 0
SUBFILETYPE_REDUCEDIMAGE = 1
logger = logging.getLogger(__name__)
def filter_tile(
tiles, dim_index, index, tile_size, output_array
):
try:
x, y = index
tile = tiles.get_tile(dim_index, index)
tile_width, tile_height = tile.size
# Make image the same size for inference
if tile.size != (tile_size, tile_size):
tile = tile.crop((0, 0, tile_size, tile_size))
ax = x * tile_size
ay = y * tile_size
tile_arr = np.array(tile) # H x W x C
output_array[ay: ay + tile_height, ax: ax + tile_width, :] = tile_arr[
:tile_height, :tile_width]
except Exception as e:
logger.exception(e)
def svs2tif(input_file, output_folder, tile_size, overlap,
num_workers=os.cpu_count(), output_filename="image.tif"):
output_folder = str(output_folder)
logger.info("Parameters")
logger.info(" input file: %s", input_file)
logger.info(" output folder: %s", output_folder)
logger.info(" tile size: %d", tile_size)
logger.info(" overlap: %d", overlap)
logger.info(" num_workers: %d", num_workers)
logger.info(" output filename: %s", output_filename)
with OpenSlide(input_file) as slide:
properties = slide.properties
slide_dimensions = slide.dimensions
tiles = DeepZoomGenerator(
slide, tile_size=tile_size, overlap=overlap, limit_bounds=False
)
output_file = Path(output_folder) / output_filename
np_memmap = []
width, height = slide_dimensions
img_w, img_h = width, height
for level in range(tiles.level_count):
memmap_filename = Path(output_folder, "level{}.mmap".format(level))
memmap_shape = (img_h, img_w, 3)
np_memmap_arr = np.memmap(
memmap_filename, dtype=np.uint8, mode="w+", shape=memmap_shape
)
np_memmap.append(np_memmap_arr)
logger.info(" Created %s %s", memmap_filename, repr(memmap_shape))
img_w = round(img_w / 2)
img_h = round(img_h / 2)
if max(img_w, img_h) < tile_size:
break
try:
# Multithread processing for each tile in the largest
# image (index 0)
logger.info("Processing tiles...")
dim_index = tiles.level_count - 1
tile_pos_x, tile_pos_y = tiles.level_tiles[dim_index]
index_iter = np.ndindex(tile_pos_x, tile_pos_y)
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers) as executor:
executor.map(
filter_tile,
repeat(tiles),
repeat(dim_index),
index_iter,
repeat(tile_size),
repeat(np_memmap[0]),
)
logger.info("Storing low resolution images...")
for index in range(1, len(np_memmap)):
src_arr = np_memmap[index - 1]
target_arr = np_memmap[index]
target_arr[:] = cv2.resize(
src_arr, (0, 0), fx=0.5, fy=0.5,
interpolation=cv2.INTER_AREA
)
# th, tw = target_arr.shape[:2]
# target_arr[:] = src_arr[
# : th * 2 : 2, : tw * 2 : 2, :
# ] # Fast resizing. No anti-aliasing.
logger.info(" Level %d: %s", index, repr(target_arr.shape))
# Calculate resolution
if (
properties.get("tiff.ResolutionUnit")
and properties.get("tiff.XResolution")
and properties.get("tiff.YResolution")
):
resolution_unit = properties.get("tiff.ResolutionUnit")
x_resolution = float(properties.get("tiff.XResolution"))
y_resolution = float(properties.get("tiff.YResolution"))
else:
resolution_unit = properties.get("tiff.ResolutionUnit", "inch")
if properties.get("tiff.ResolutionUnit",
"inch").lower() == "inch":
numerator = 25400 # Microns in Inch
else:
numerator = 10000 # Microns in CM
x_resolution = int(numerator
// float(properties.get('openslide.mpp-x',
1)))
y_resolution = int(numerator
// float(properties.get('openslide.mpp-y',
1)))
# Write TIFF file
with TiffWriter(output_file, bigtiff=True) as tif:
# Save from the largest image (openslide requires that)
for level in range(len(np_memmap)):
src_arr = np_memmap[level]
height, width = src_arr.shape[:2]
logger.info("Saving Level %d image (%d x %d)...",
level, width, height)
if level:
subfiletype = SUBFILETYPE_REDUCEDIMAGE
else:
subfiletype = SUBFILETYPE_NONE
tif.save(
src_arr,
software="Glencoe/Faas pyramid",
metadata={"axes": "YXC"},
tile=(tile_size, tile_size),
photometric="RGB",
planarconfig="CONTIG",
resolution=(
x_resolution // 2 ** level,
y_resolution // 2 ** level,
resolution_unit,
),
compress=("jpeg", 95), # requires imagecodecs
subfiletype=subfiletype,
)
logger.info("Done.")
finally:
# Remove memory-mapped file
logger.info("Removing memmapped files...")
src_arr = None
target_arr = None
np_memmap_arr = None
del np_memmap
gc.collect()
mmap_file_iter = Path(output_folder).glob("*.mmap")
for fp in mmap_file_iter:
fp.unlink() | 0.485844 | 0.290591 |
from __future__ import print_function
import pyaudio
import wave
from six.moves import queue
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 512
# MicrophoneStream - original code in https://goo.gl/7Xy3TT
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
# [END audio_stream]
def play_file(fname):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
chunk = 1024
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate() | python3/MicrophoneStream.py | from __future__ import print_function
import pyaudio
import wave
from six.moves import queue
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 512
# MicrophoneStream - original code in https://goo.gl/7Xy3TT
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
# [END audio_stream]
def play_file(fname):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
chunk = 1024
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate() | 0.710327 | 0.199737 |
import datetime
import json
_singleton = {}
def singleton(key, value=None):
"""Gets or sets a singleton instance.
:param key: the key used to identify this singleton.
:param value: (optional) if set this is the value for the singleton.
:return: singleton value for key.
"""
global _singleton
if value is not None:
if key in _singleton:
raise Exception('Singleton %s was already set.' % key)
_singleton[key] = value
if key not in _singleton:
raise Exception('Singleton %s has not been set.' % key)
return _singleton[key]
def cache(timedelta):
"""Returns a decorator that memoizes the wrapped function (based on
JSON serialization of positional and keyword args).
:param timedelta: A datetime.timedelta instance for cache lifetime.
:returns: A callable that can be used as a decorator for a function.
"""
cache = {}
cache_expiry = {}
if type(timedelta) is not datetime.timedelta:
raise Exception('timedelta argument must have type datetime.timedelta')
def _decorator(fn):
def _cache(*args, **kwargs):
key = json.dumps(args) + json.dumps(kwargs)
expires = cache_expiry.get(key, None)
if expires:
if datetime.datetime.now() < expires:
return cache[key]
else:
cache[key] = None
cache_expiry[key] = None
cache[key] = fn(*args, **kwargs)
cache_expiry[key] = datetime.datetime.now() + timedelta
return cache[key]
return _cache
return _decorator
def appendnewline(fn):
"""Returns a function that calls the given function and appends an ascii
newline byte to the result (if the result is not a list or dict). The
result of the function is also interpreted as bytes.
:param fn: A callable to be wrapped.
:return: A callable that invokes fn and appends a newline byte.
"""
def _decorator(*a, **k):
r = fn(*a, **k)
if type(r) not in (list, dict):
r = bytes(r) + b'\n'
return r
return _decorator
def to_2dp(fn):
"""Converts pence (e.g. 1000) to 2dp (e.g. 10.00)."""
return lambda *a, **k: '%.02f' % (float(fn(*a, **k)) / 100.0) | monzo_fs/decorators.py | import datetime
import json
_singleton = {}
def singleton(key, value=None):
"""Gets or sets a singleton instance.
:param key: the key used to identify this singleton.
:param value: (optional) if set this is the value for the singleton.
:return: singleton value for key.
"""
global _singleton
if value is not None:
if key in _singleton:
raise Exception('Singleton %s was already set.' % key)
_singleton[key] = value
if key not in _singleton:
raise Exception('Singleton %s has not been set.' % key)
return _singleton[key]
def cache(timedelta):
"""Returns a decorator that memoizes the wrapped function (based on
JSON serialization of positional and keyword args).
:param timedelta: A datetime.timedelta instance for cache lifetime.
:returns: A callable that can be used as a decorator for a function.
"""
cache = {}
cache_expiry = {}
if type(timedelta) is not datetime.timedelta:
raise Exception('timedelta argument must have type datetime.timedelta')
def _decorator(fn):
def _cache(*args, **kwargs):
key = json.dumps(args) + json.dumps(kwargs)
expires = cache_expiry.get(key, None)
if expires:
if datetime.datetime.now() < expires:
return cache[key]
else:
cache[key] = None
cache_expiry[key] = None
cache[key] = fn(*args, **kwargs)
cache_expiry[key] = datetime.datetime.now() + timedelta
return cache[key]
return _cache
return _decorator
def appendnewline(fn):
"""Returns a function that calls the given function and appends an ascii
newline byte to the result (if the result is not a list or dict). The
result of the function is also interpreted as bytes.
:param fn: A callable to be wrapped.
:return: A callable that invokes fn and appends a newline byte.
"""
def _decorator(*a, **k):
r = fn(*a, **k)
if type(r) not in (list, dict):
r = bytes(r) + b'\n'
return r
return _decorator
def to_2dp(fn):
"""Converts pence (e.g. 1000) to 2dp (e.g. 10.00)."""
return lambda *a, **k: '%.02f' % (float(fn(*a, **k)) / 100.0) | 0.788461 | 0.260295 |
import requests
import threading
import uuid
import webbrowser
from .server import StoppableHTTPServer, AuthHandler
from shortcircuit.model.logger import Logger
class ESI:
'''
ESI
We are bad boys here.
What should have been done is proxy auth server with code request, storage and all that stuff.
Instead we just follow implicit flow and ask to relogin every time.
From Russia with love.
'''
ENDPOINT_ESI_VERIFY = 'https://esi.evetech.net/verify'
ENDPOINT_ESI_LOCATION_FORMAT = 'https://esi.evetech.net/latest/characters/{}/location/'
ENDPOINT_ESI_UNIVERSE_NAMES = 'https://esi.evetech.net/latest/universe/names/'
ENDPOINT_ESI_UI_WAYPOINT = 'https://esi.evetech.net/latest/ui/autopilot/waypoint/'
ENDPOINT_EVE_AUTH_FORMAT = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri={}&client_id={}&scope={}&state={}'
CLIENT_CALLBACK = 'http://127.0.0.1:7444/callback/'
CLIENT_ID = 'd802bba44b7c4f6cbfa2944b0e5ea83f'
CLIENT_SCOPES = [
'esi-location.read_location.v1',
'esi-ui.write_waypoint.v1',
]
def __init__(self, login_callback, logout_callback):
self.login_callback = login_callback
self.logout_callback = logout_callback
self.httpd = None
self.state = None
self.token = None
self.char_id = None
self.char_name = None
self.sso_timer = None
def start_server(self):
if not self.httpd:
# Server not running - restart it
Logger.debug('Starting server')
self.httpd = StoppableHTTPServer(
server_address=('127.0.0.1', 7444),
request_handler_class=AuthHandler,
timeout_callback=self.timeout_server
)
server_thread = threading.Thread(target=self.httpd.serve, args=(self.handle_login, ))
server_thread.setDaemon(True)
server_thread.start()
self.state = str(uuid.uuid4())
else:
# Server already running - reset timeout counter
self.httpd.tries = 0
scopes = ' '.join(ESI.CLIENT_SCOPES)
endpoint_auth = ESI.ENDPOINT_EVE_AUTH_FORMAT.format(ESI.CLIENT_CALLBACK, ESI.CLIENT_ID, scopes, self.state)
return webbrowser.open(endpoint_auth)
def timeout_server(self):
self.httpd = None
def stop_server(self):
Logger.debug('Stopping server')
if self.httpd:
self.httpd.stop()
self.httpd = None
def handle_login(self, message):
if not message:
return
if 'state' in message:
if message['state'][0] != self.state:
Logger.warning('OAUTH state mismatch')
return
if 'access_token' in message:
self.token = message['access_token'][0]
self.sso_timer = threading.Timer(int(message['expires_in'][0]), self._logout)
self.sso_timer.setDaemon(True)
self.sso_timer.start()
r = requests.get(ESI.ENDPOINT_ESI_VERIFY, headers=self._get_headers())
if r.status_code == requests.codes.ok:
data = r.json()
self.char_id = data['CharacterID']
self.char_name = data['CharacterName']
else:
self.token = None
self.sso_timer = None
self.char_id = None
self.char_name = None
self.login_callback(self.char_name)
self.stop_server()
def _get_headers(self):
return {
'User-Agent': 'Short Circuit (minimally maintained by @Second_Fry), <EMAIL>',
'Authorization': 'Bearer {}'.format(self.token),
}
def get_char_location(self):
if not self.token:
return None
current_location_name = None
current_location_id = None
r = requests.get(ESI.ENDPOINT_ESI_LOCATION_FORMAT.format(self.char_id), headers=self._get_headers())
if r.status_code == requests.codes.ok:
current_location_id = r.json()['solar_system_id']
r = requests.post(ESI.ENDPOINT_ESI_UNIVERSE_NAMES, json=[str(current_location_id)])
if r.status_code == requests.codes.ok:
current_location_name = r.json()[0]['name']
return current_location_name
def set_char_destination(self, sys_id):
if not self.token:
return False
success = False
r = requests.post('{}?add_to_beginning=false&clear_other_waypoints=true&destination_id={}'.format(ESI.ENDPOINT_ESI_UI_WAYPOINT, sys_id), headers=self._get_headers())
if r.status_code == 204:
success = True
return success
def logout(self):
if self.sso_timer:
self.sso_timer.cancel()
self._logout()
def _logout(self):
self.token = None
self.char_id = None
self.char_name = None
self.logout_callback()
def login_cb(char_name):
print('Welcome, {}'.format(char_name))
def logout_cb():
print('Session expired')
def main():
import code
implicit = True
client_id = ''
client_secret = ''
esi = ESI(login_cb, logout_cb)
print(esi.start_server())
gvars = globals().copy()
gvars.update(locals())
shell = code.InteractiveConsole(gvars)
shell.interact()
if __name__ == '__main__':
main() | src/shortcircuit/model/esi/esi.py |
import requests
import threading
import uuid
import webbrowser
from .server import StoppableHTTPServer, AuthHandler
from shortcircuit.model.logger import Logger
class ESI:
'''
ESI
We are bad boys here.
What should have been done is proxy auth server with code request, storage and all that stuff.
Instead we just follow implicit flow and ask to relogin every time.
From Russia with love.
'''
ENDPOINT_ESI_VERIFY = 'https://esi.evetech.net/verify'
ENDPOINT_ESI_LOCATION_FORMAT = 'https://esi.evetech.net/latest/characters/{}/location/'
ENDPOINT_ESI_UNIVERSE_NAMES = 'https://esi.evetech.net/latest/universe/names/'
ENDPOINT_ESI_UI_WAYPOINT = 'https://esi.evetech.net/latest/ui/autopilot/waypoint/'
ENDPOINT_EVE_AUTH_FORMAT = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri={}&client_id={}&scope={}&state={}'
CLIENT_CALLBACK = 'http://127.0.0.1:7444/callback/'
CLIENT_ID = 'd802bba44b7c4f6cbfa2944b0e5ea83f'
CLIENT_SCOPES = [
'esi-location.read_location.v1',
'esi-ui.write_waypoint.v1',
]
def __init__(self, login_callback, logout_callback):
self.login_callback = login_callback
self.logout_callback = logout_callback
self.httpd = None
self.state = None
self.token = None
self.char_id = None
self.char_name = None
self.sso_timer = None
def start_server(self):
if not self.httpd:
# Server not running - restart it
Logger.debug('Starting server')
self.httpd = StoppableHTTPServer(
server_address=('127.0.0.1', 7444),
request_handler_class=AuthHandler,
timeout_callback=self.timeout_server
)
server_thread = threading.Thread(target=self.httpd.serve, args=(self.handle_login, ))
server_thread.setDaemon(True)
server_thread.start()
self.state = str(uuid.uuid4())
else:
# Server already running - reset timeout counter
self.httpd.tries = 0
scopes = ' '.join(ESI.CLIENT_SCOPES)
endpoint_auth = ESI.ENDPOINT_EVE_AUTH_FORMAT.format(ESI.CLIENT_CALLBACK, ESI.CLIENT_ID, scopes, self.state)
return webbrowser.open(endpoint_auth)
def timeout_server(self):
self.httpd = None
def stop_server(self):
Logger.debug('Stopping server')
if self.httpd:
self.httpd.stop()
self.httpd = None
def handle_login(self, message):
if not message:
return
if 'state' in message:
if message['state'][0] != self.state:
Logger.warning('OAUTH state mismatch')
return
if 'access_token' in message:
self.token = message['access_token'][0]
self.sso_timer = threading.Timer(int(message['expires_in'][0]), self._logout)
self.sso_timer.setDaemon(True)
self.sso_timer.start()
r = requests.get(ESI.ENDPOINT_ESI_VERIFY, headers=self._get_headers())
if r.status_code == requests.codes.ok:
data = r.json()
self.char_id = data['CharacterID']
self.char_name = data['CharacterName']
else:
self.token = None
self.sso_timer = None
self.char_id = None
self.char_name = None
self.login_callback(self.char_name)
self.stop_server()
def _get_headers(self):
return {
'User-Agent': 'Short Circuit (minimally maintained by @Second_Fry), <EMAIL>',
'Authorization': 'Bearer {}'.format(self.token),
}
def get_char_location(self):
if not self.token:
return None
current_location_name = None
current_location_id = None
r = requests.get(ESI.ENDPOINT_ESI_LOCATION_FORMAT.format(self.char_id), headers=self._get_headers())
if r.status_code == requests.codes.ok:
current_location_id = r.json()['solar_system_id']
r = requests.post(ESI.ENDPOINT_ESI_UNIVERSE_NAMES, json=[str(current_location_id)])
if r.status_code == requests.codes.ok:
current_location_name = r.json()[0]['name']
return current_location_name
def set_char_destination(self, sys_id):
if not self.token:
return False
success = False
r = requests.post('{}?add_to_beginning=false&clear_other_waypoints=true&destination_id={}'.format(ESI.ENDPOINT_ESI_UI_WAYPOINT, sys_id), headers=self._get_headers())
if r.status_code == 204:
success = True
return success
def logout(self):
if self.sso_timer:
self.sso_timer.cancel()
self._logout()
def _logout(self):
self.token = None
self.char_id = None
self.char_name = None
self.logout_callback()
def login_cb(char_name):
print('Welcome, {}'.format(char_name))
def logout_cb():
print('Session expired')
def main():
import code
implicit = True
client_id = ''
client_secret = ''
esi = ESI(login_cb, logout_cb)
print(esi.start_server())
gvars = globals().copy()
gvars.update(locals())
shell = code.InteractiveConsole(gvars)
shell.interact()
if __name__ == '__main__':
main() | 0.349866 | 0.067393 |
import json
import jsonschema
import math
from pulp import *
def mathoptformat_to_pulp(node, name):
sp = node['subproblem']
# Create the problem
sense = LpMaximize if sp['objective']['sense'] == 'max' else LpMinimize
prob = LpProblem(name, sense)
# Initialize the variables
vars = {}
for x in sp['variables']:
vars[x['name']] = LpVariable(x['name'])
# Add the objective function
obj = sp['objective']['function']
if obj['head'] == 'SingleVariable':
prob += vars[obj['variable']]
elif obj['head'] == 'ScalarAffineFunction':
prob += lpSum(
term['coefficient'] * vars[term['variable']] for term in obj['terms']
) + obj['constant']
else:
raise(Exception('Unsupported objective: ' + str(obj)))
# Add the constraints
for c in sp['constraints']:
f, s = c['function'], c['set']
if f['head'] == 'SingleVariable':
x = f['variable']
if s['head'] == 'GreaterThan':
vars[x].lowBound = s['lower']
elif s['head'] == 'LessThan':
vars[x].upBound = s['upper']
elif s['head'] == 'EqualTo':
vars[x].lowBound = s['value']
vars[x].upBound = s['value']
elif s['head'] == 'Interval':
vars[x].lowBound = s['lower']
vars[x].upBound = s['upper']
else:
raise(Exception('Unsupported set: ' + str(s)))
elif f['head'] == 'ScalarAffineFunction':
lhs = lpSum(
term['coefficient'] * vars[term['variable']] for term in f['terms']
) + f['constant']
if s['head'] == 'GreaterThan':
prob += lhs >= s['lower']
elif s['head'] == 'LessThan':
prob += lhs <= s['upper']
elif s['head'] == 'EqualTo':
prob += lhs == s['value']
elif s['head'] == 'Interval':
prob += lhs <= s['upper']
prob += lhs >= s['lower']
else:
raise(Exception('Unsupported set: ' + str(s)))
else:
raise(Exception('Unsupported function: ' + str(f)))
return {
'prob': prob,
'vars': vars,
'state_variables': node['state_variables'],
'realizations': node['realizations'],
}
def solve_second_stage(node, state, noise):
for (name, s) in node['state_variables'].items():
v = node['vars'][s['in']]
v.lowBound = state[name]
v.upBound = state[name]
for (name, w) in noise.items():
p = node['vars'][name]
p.lowBound = w
p.upBound = w
node['prob'].solve()
return {
'objective': value(node['prob'].objective),
'pi': {
name: node['vars'][s['in']].dj
for (name, s) in node['state_variables'].items()
}
}
def solve_first_stage(node):
node['prob'].solve()
return {
name: node['vars'][s['out']].varValue
for (name, s) in node['state_variables'].items()
}
def add_cut(first_stage, x, ret):
cut_term = lpSum(
p * r['objective'] +
p * lpSum(
r['pi'][name] * (first_stage['vars'][s['out']] - x[name])
for (name, s) in first_stage['state_variables'].items()
)
for (p, r) in ret
)
if first_stage['prob'].sense == -1:
first_stage['prob'] += first_stage['theta'] <= cut_term
else:
first_stage['prob'] += first_stage['theta'] >= cut_term
def load_two_stage_problem(filename):
with open(filename, 'r') as io:
data = json.load(io)
assert(data['version']['major'] == 0)
assert(data['version']['minor'] == 1)
assert(len(data['nodes']) == 2)
assert(len(data['edges']) == 2)
nodes = {
name: mathoptformat_to_pulp(node, name)
for (name, node) in data['nodes'].items()
}
first_stage, second_stage = None, None
for edge in data['edges']:
if edge['from'] == data['root']['name']:
first_stage = nodes[edge['to']]
else:
second_stage = nodes[edge['to']]
for (name, init) in data['root']['state_variables'].items():
x = first_stage['vars'][first_stage['state_variables'][name]['in']]
x.lowBound = init['initial_value']
x.upBound = init['initial_value']
first_stage['theta'] = LpVariable("theta", -10**6, 10**6)
first_stage['prob'].objective += first_stage['theta']
return first_stage, second_stage
def benders(first_stage, second_stage, iteration_limit = 20):
bounds = []
for iter in range(iteration_limit):
x = solve_first_stage(first_stage)
ret = [(
noise['probability'],
solve_second_stage(second_stage, x, noise['support'])
) for noise in second_stage['realizations']]
add_cut(first_stage, x, ret)
det_bound = value(first_stage['prob'].objective)
stat_bound = det_bound - first_stage['theta'].varValue + sum(
p * value(r['objective']) for (p, r) in ret
)
bounds.append((det_bound, stat_bound))
if abs(det_bound - stat_bound) < 1e-6:
break
return bounds
def validate(filename):
with open(filename, 'r') as io:
instance = json.load(io)
with open('../sof.schema.json', 'r') as io:
schema = json.load(io)
jsonschema.validate(instance = instance, schema = schema)
validate('news_vendor.sof.json')
first_stage, second_stage = load_two_stage_problem('news_vendor.sof.json')
ret = benders(first_stage, second_stage)
# Check solution!
x = solve_first_stage(first_stage)
assert(x['x'] == 10)
print(ret) | examples/news_vendor.py |
import json
import jsonschema
import math
from pulp import *
def mathoptformat_to_pulp(node, name):
sp = node['subproblem']
# Create the problem
sense = LpMaximize if sp['objective']['sense'] == 'max' else LpMinimize
prob = LpProblem(name, sense)
# Initialize the variables
vars = {}
for x in sp['variables']:
vars[x['name']] = LpVariable(x['name'])
# Add the objective function
obj = sp['objective']['function']
if obj['head'] == 'SingleVariable':
prob += vars[obj['variable']]
elif obj['head'] == 'ScalarAffineFunction':
prob += lpSum(
term['coefficient'] * vars[term['variable']] for term in obj['terms']
) + obj['constant']
else:
raise(Exception('Unsupported objective: ' + str(obj)))
# Add the constraints
for c in sp['constraints']:
f, s = c['function'], c['set']
if f['head'] == 'SingleVariable':
x = f['variable']
if s['head'] == 'GreaterThan':
vars[x].lowBound = s['lower']
elif s['head'] == 'LessThan':
vars[x].upBound = s['upper']
elif s['head'] == 'EqualTo':
vars[x].lowBound = s['value']
vars[x].upBound = s['value']
elif s['head'] == 'Interval':
vars[x].lowBound = s['lower']
vars[x].upBound = s['upper']
else:
raise(Exception('Unsupported set: ' + str(s)))
elif f['head'] == 'ScalarAffineFunction':
lhs = lpSum(
term['coefficient'] * vars[term['variable']] for term in f['terms']
) + f['constant']
if s['head'] == 'GreaterThan':
prob += lhs >= s['lower']
elif s['head'] == 'LessThan':
prob += lhs <= s['upper']
elif s['head'] == 'EqualTo':
prob += lhs == s['value']
elif s['head'] == 'Interval':
prob += lhs <= s['upper']
prob += lhs >= s['lower']
else:
raise(Exception('Unsupported set: ' + str(s)))
else:
raise(Exception('Unsupported function: ' + str(f)))
return {
'prob': prob,
'vars': vars,
'state_variables': node['state_variables'],
'realizations': node['realizations'],
}
def solve_second_stage(node, state, noise):
for (name, s) in node['state_variables'].items():
v = node['vars'][s['in']]
v.lowBound = state[name]
v.upBound = state[name]
for (name, w) in noise.items():
p = node['vars'][name]
p.lowBound = w
p.upBound = w
node['prob'].solve()
return {
'objective': value(node['prob'].objective),
'pi': {
name: node['vars'][s['in']].dj
for (name, s) in node['state_variables'].items()
}
}
def solve_first_stage(node):
node['prob'].solve()
return {
name: node['vars'][s['out']].varValue
for (name, s) in node['state_variables'].items()
}
def add_cut(first_stage, x, ret):
cut_term = lpSum(
p * r['objective'] +
p * lpSum(
r['pi'][name] * (first_stage['vars'][s['out']] - x[name])
for (name, s) in first_stage['state_variables'].items()
)
for (p, r) in ret
)
if first_stage['prob'].sense == -1:
first_stage['prob'] += first_stage['theta'] <= cut_term
else:
first_stage['prob'] += first_stage['theta'] >= cut_term
def load_two_stage_problem(filename):
with open(filename, 'r') as io:
data = json.load(io)
assert(data['version']['major'] == 0)
assert(data['version']['minor'] == 1)
assert(len(data['nodes']) == 2)
assert(len(data['edges']) == 2)
nodes = {
name: mathoptformat_to_pulp(node, name)
for (name, node) in data['nodes'].items()
}
first_stage, second_stage = None, None
for edge in data['edges']:
if edge['from'] == data['root']['name']:
first_stage = nodes[edge['to']]
else:
second_stage = nodes[edge['to']]
for (name, init) in data['root']['state_variables'].items():
x = first_stage['vars'][first_stage['state_variables'][name]['in']]
x.lowBound = init['initial_value']
x.upBound = init['initial_value']
first_stage['theta'] = LpVariable("theta", -10**6, 10**6)
first_stage['prob'].objective += first_stage['theta']
return first_stage, second_stage
def benders(first_stage, second_stage, iteration_limit = 20):
bounds = []
for iter in range(iteration_limit):
x = solve_first_stage(first_stage)
ret = [(
noise['probability'],
solve_second_stage(second_stage, x, noise['support'])
) for noise in second_stage['realizations']]
add_cut(first_stage, x, ret)
det_bound = value(first_stage['prob'].objective)
stat_bound = det_bound - first_stage['theta'].varValue + sum(
p * value(r['objective']) for (p, r) in ret
)
bounds.append((det_bound, stat_bound))
if abs(det_bound - stat_bound) < 1e-6:
break
return bounds
def validate(filename):
with open(filename, 'r') as io:
instance = json.load(io)
with open('../sof.schema.json', 'r') as io:
schema = json.load(io)
jsonschema.validate(instance = instance, schema = schema)
validate('news_vendor.sof.json')
first_stage, second_stage = load_two_stage_problem('news_vendor.sof.json')
ret = benders(first_stage, second_stage)
# Check solution!
x = solve_first_stage(first_stage)
assert(x['x'] == 10)
print(ret) | 0.366476 | 0.236098 |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from tqdm import tqdm
class EvolutionaryParameterOptimizer:
"""
Evolutionary Algorithm Optimizer for arbitrary parameter_discretization which allows it to solve integer optimizations
"""
def __init__(self, parameter_ranges, parameter_discretization, fitness_function, population_size=20,
replacement_proportion=0.5, generations=20, minimize=False, show_evolution=False,
random_variation_probability=0.1, crossover_probability=0.7):
self.minimize = minimize
self.show_evolution = show_evolution
if self.show_evolution:
plt.ion()
self.cb = None
self.population_size = population_size
assert 0 < self.population_size
self.replacement_proportion = replacement_proportion
assert 0.0 < self.replacement_proportion <= 1.0
self.parameter_ranges = np.array(parameter_ranges)
self.parameter_discretization = np.array(parameter_discretization)
assert len(self.parameter_ranges) == len(self.parameter_discretization)
self.fitness_function = fitness_function
self.best_individual = None
self.best_fitness = float("inf") if self.minimize else -float("inf")
self.fitnesses = np.zeros(self.population_size)
self.parameter_interval = self.parameter_ranges[:, 1] - self.parameter_ranges[:, 0]
self.population = np.random.random((self.population_size, len(self.parameter_ranges))) *\
self.parameter_interval + self.parameter_ranges[:, 0]
self.population -= (self.population % self.parameter_discretization)
self.generations = generations
assert 0 < self.generations
self.random_variation_probability = random_variation_probability
assert 0.0 <= self.random_variation_probability <= 1.0
self.crossover_probability = self.random_variation_probability + crossover_probability
assert 0.0 <= self.crossover_probability <= 1.0
self.fitness_cache = dict()
self.best_fitnesses = []
self.optimize()
def evaluate_fitness(self):
fitnesses = []
for individual in self.population:
if tuple(individual) in self.fitness_cache:
fitness = self.fitness_cache[tuple(individual)]
else:
fitness = self.fitness_function(*individual)
self.fitness_cache[tuple(individual)] = fitness
fitnesses.append(fitness)
if (self.minimize and fitness < self.best_fitness) or (not self.minimize and fitness > self.best_fitness):
self.best_fitness = fitness
self.best_individual = individual
self.fitnesses = np.array(fitnesses)
self.best_fitnesses.append(self.best_fitness)
def replacement(self):
population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01)
survivors_inx = sorted(range(self.population_size),
key=lambda x: population_fitness_variation[x],
reverse=True)[0:int(self.population_size * self.replacement_proportion)]
self.population = self.population[survivors_inx]
self.fitnesses = self.fitnesses[survivors_inx]
def get_population_fitness_variation(self, noise_level=0.0):
population_fitness_variation = -1 * self.fitnesses if self.minimize else self.fitnesses
population_fitness_variation += np.random.normal(0, noise_level, population_fitness_variation.shape)
population_fitness_variation -= population_fitness_variation.min()
return population_fitness_variation
def population_variation(self):
population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01)
new_population = list(self.population)
total_fitness_variation = sum(population_fitness_variation)
_selection_weights = (population_fitness_variation / total_fitness_variation)
while len(new_population) < self.population_size:
rnd = np.random.random()
if rnd < self.random_variation_probability: # go random (random_variation_probability of time)
child = np.random.random((len(self.parameter_ranges))) * self.parameter_interval + self.parameter_ranges[:, 0]
child -= (child % self.parameter_discretization)
elif rnd < self.crossover_probability: # sexual reproduction (crossover_probability of time)
father_index, mother_index = np.random.choice(range(len(self.population)), 2,
replace=False, p=_selection_weights)
father, mother = self.population[father_index], self.population[mother_index]
child = (father + mother) / 2
child -= (child % self.parameter_discretization)
else: # asexual reproduction (rest of time)
parent_index = np.random.choice(range(len(self.population)), 1,
replace=False, p=_selection_weights)[0]
parent = self.population[parent_index]
child = []
for i in range(len(parent)):
s = int(np.std(self.population[:, i]) + 1)
d = s * int(np.random.normal(0, 10)) * self.parameter_discretization[i]
child_param = parent[i] + d
child_param = min(self.parameter_ranges[i][1], child_param)
child_param = max(self.parameter_ranges[i][0], child_param)
child.append(child_param)
new_population.append(np.array(child))
self.population = np.array(new_population)
def optimize(self):
for _ in tqdm(range(self.generations)):
self.population_variation()
self.evaluate_fitness()
if self.show_evolution:
self.show()
self.replacement()
def show(self):
if len(self.best_individual) == 2:
plt.cla()
xs = self.population
x, y = xs[:, 0], xs[:, 1]
z = self.fitnesses
sc = plt.scatter(x, y, c=z, marker='o', cmap=cm.jet, label="all_fitnesses")
if self.best_individual is not None:
plt.scatter(self.best_individual[0], self.best_individual[1], c='r', marker='^', label="best_fitness")
if self.cb is None:
self.cb = plt.colorbar(sc)
plt.xlim(*self.parameter_ranges[0])
plt.ylim(*self.parameter_ranges[1])
plt.pause(0.00001)
else:
plt.cla()
plt.plot(self.best_fitnesses)
plt.pause(0.00001) | evolutionary_optimization/evolutionary_parameter_optimization.py | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from tqdm import tqdm
class EvolutionaryParameterOptimizer:
"""
Evolutionary Algorithm Optimizer for arbitrary parameter_discretization which allows it to solve integer optimizations
"""
def __init__(self, parameter_ranges, parameter_discretization, fitness_function, population_size=20,
replacement_proportion=0.5, generations=20, minimize=False, show_evolution=False,
random_variation_probability=0.1, crossover_probability=0.7):
self.minimize = minimize
self.show_evolution = show_evolution
if self.show_evolution:
plt.ion()
self.cb = None
self.population_size = population_size
assert 0 < self.population_size
self.replacement_proportion = replacement_proportion
assert 0.0 < self.replacement_proportion <= 1.0
self.parameter_ranges = np.array(parameter_ranges)
self.parameter_discretization = np.array(parameter_discretization)
assert len(self.parameter_ranges) == len(self.parameter_discretization)
self.fitness_function = fitness_function
self.best_individual = None
self.best_fitness = float("inf") if self.minimize else -float("inf")
self.fitnesses = np.zeros(self.population_size)
self.parameter_interval = self.parameter_ranges[:, 1] - self.parameter_ranges[:, 0]
self.population = np.random.random((self.population_size, len(self.parameter_ranges))) *\
self.parameter_interval + self.parameter_ranges[:, 0]
self.population -= (self.population % self.parameter_discretization)
self.generations = generations
assert 0 < self.generations
self.random_variation_probability = random_variation_probability
assert 0.0 <= self.random_variation_probability <= 1.0
self.crossover_probability = self.random_variation_probability + crossover_probability
assert 0.0 <= self.crossover_probability <= 1.0
self.fitness_cache = dict()
self.best_fitnesses = []
self.optimize()
def evaluate_fitness(self):
fitnesses = []
for individual in self.population:
if tuple(individual) in self.fitness_cache:
fitness = self.fitness_cache[tuple(individual)]
else:
fitness = self.fitness_function(*individual)
self.fitness_cache[tuple(individual)] = fitness
fitnesses.append(fitness)
if (self.minimize and fitness < self.best_fitness) or (not self.minimize and fitness > self.best_fitness):
self.best_fitness = fitness
self.best_individual = individual
self.fitnesses = np.array(fitnesses)
self.best_fitnesses.append(self.best_fitness)
def replacement(self):
population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01)
survivors_inx = sorted(range(self.population_size),
key=lambda x: population_fitness_variation[x],
reverse=True)[0:int(self.population_size * self.replacement_proportion)]
self.population = self.population[survivors_inx]
self.fitnesses = self.fitnesses[survivors_inx]
def get_population_fitness_variation(self, noise_level=0.0):
population_fitness_variation = -1 * self.fitnesses if self.minimize else self.fitnesses
population_fitness_variation += np.random.normal(0, noise_level, population_fitness_variation.shape)
population_fitness_variation -= population_fitness_variation.min()
return population_fitness_variation
def population_variation(self):
population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01)
new_population = list(self.population)
total_fitness_variation = sum(population_fitness_variation)
_selection_weights = (population_fitness_variation / total_fitness_variation)
while len(new_population) < self.population_size:
rnd = np.random.random()
if rnd < self.random_variation_probability: # go random (random_variation_probability of time)
child = np.random.random((len(self.parameter_ranges))) * self.parameter_interval + self.parameter_ranges[:, 0]
child -= (child % self.parameter_discretization)
elif rnd < self.crossover_probability: # sexual reproduction (crossover_probability of time)
father_index, mother_index = np.random.choice(range(len(self.population)), 2,
replace=False, p=_selection_weights)
father, mother = self.population[father_index], self.population[mother_index]
child = (father + mother) / 2
child -= (child % self.parameter_discretization)
else: # asexual reproduction (rest of time)
parent_index = np.random.choice(range(len(self.population)), 1,
replace=False, p=_selection_weights)[0]
parent = self.population[parent_index]
child = []
for i in range(len(parent)):
s = int(np.std(self.population[:, i]) + 1)
d = s * int(np.random.normal(0, 10)) * self.parameter_discretization[i]
child_param = parent[i] + d
child_param = min(self.parameter_ranges[i][1], child_param)
child_param = max(self.parameter_ranges[i][0], child_param)
child.append(child_param)
new_population.append(np.array(child))
self.population = np.array(new_population)
def optimize(self):
for _ in tqdm(range(self.generations)):
self.population_variation()
self.evaluate_fitness()
if self.show_evolution:
self.show()
self.replacement()
def show(self):
if len(self.best_individual) == 2:
plt.cla()
xs = self.population
x, y = xs[:, 0], xs[:, 1]
z = self.fitnesses
sc = plt.scatter(x, y, c=z, marker='o', cmap=cm.jet, label="all_fitnesses")
if self.best_individual is not None:
plt.scatter(self.best_individual[0], self.best_individual[1], c='r', marker='^', label="best_fitness")
if self.cb is None:
self.cb = plt.colorbar(sc)
plt.xlim(*self.parameter_ranges[0])
plt.ylim(*self.parameter_ranges[1])
plt.pause(0.00001)
else:
plt.cla()
plt.plot(self.best_fitnesses)
plt.pause(0.00001) | 0.63307 | 0.657593 |
__author__ = 'lily'
import numpy as np
from sklearn.decomposition import ProjectedGradientNMF
import recsys
import evaluate
import similarity
from sklearn import decomposition
from numpy.linalg import inv
from sklearn.metrics.pairwise import pairwise_distances
#feature helper and user_feature are derived from lambda functions
class content(recsys.recsys):
def __init__(self,X, similarity_helper = None, feature_helper = None, score_helper = None, \
item_feat = None, user_feat = None, cluster=None):
super(content, self).__init__(X)
self.feature_helper = feature_helper
self.score_helper = score_helper
self.item_feat = item_feat
self.user_feat = user_feat
self.similarity_helper = similarity_helper
def get_helper2(self, name, function):
super(content, self).get_helper2(name, function)
def get_parameters(self):
pass
def predict_for_user(self, user_ratings, user_feat, k, feature_transform_all =None):
#feature_transform_all refers to items
# shape return the rows and colonms of the matrix
Nitems, Nusers = self.X.shape
#W represents a tranformed feature_helper function
if (feature_transform_all == None):
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = user_feat
else:
item_transform, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat, user_feat = user_feat)
else:
item_transform= feature_transform_all
lol, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat[:, 1], user_feat = user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
predicted_values = S
predicted_values[np.asarray(user_ratings)] = 0
result = np.argsort(predicted_values)
return result[0:k]
def fit(self, train_indices = None, test_indices = None):
super(content, self).transform_training(train_indices, test_indices)#setting up training data
# shape return the rows and colonms of the matrix
#unpack constants from dictionary here
#setting constants
#some how accomodate constants for two different constants
#create the symmetric matrix
#W represents a tranformed feature_helper function
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = self.user_feat
else:
item_transform, user_transform = self.feature_helper(X=self.X_train, item_feat = self.item_feat, user_feat = self.user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
S[self.X_train == 1] =1
self.X_predict = S
def score(self, truth_index):
return super(content, self).score(truth_index)
def user_to_item(X_train, item_feat, user_feat, start, end):
#creates a nice lambda function
START = start
END = end+1#remember to +1 as an offset
#stores that mallls belong into
#creating a new item_transform matrix
# LONG_IND is the colomn index of the user feature matrix
user_transform = user_feat[:, START:END]
item_transform = np.zeros((X_train.shape[0], END - START))
#possibly faster if you use a join and a group in pandas
for i in np.arange(X_train.shape[0]): #go through all stores
mall_indexes = (X_train[i, :] == 1) #finds all the malls that have store i
store_features = user_feat[mall_indexes, : ][:, START:END] #get coordinates fast
if( np.sum(np.nonzero(mall_indexes)) == 0):
test = .001*np.ones(store_features.shape[1])
else:
test = np.average(store_features, axis=0)
item_transform[i, :]= test
return (item_transform, user_transform)
#helper that extracts columns from a the mall matrix
def user_to_item_helper(start, end):
return lambda X, item_feat, user_feat : user_to_item(X, item_feat, user_feat, start, end)
#This is for testing purposes
# X = np.array([[1, 1, 1, 1] , [1, 1, 0, 0], [1, 0, 1, 0]])
# user_feat = np.array([[1, 1, 1, 2, 3], [0, 0, 4, 5, 6], [1, 0, 7, 8, 9], [0,1 , 10, 11, 12]])
# item_feat = None
# fun = user_to_item_helper(2, 4)
# cosine = similarity.cosine()
# test = content(X, similarity_helper=cosine, user_feat=user_feat, item_feat=item_feat, feature_helper=fun)
# test.fit() | recomendation system/content.py | __author__ = 'lily'
import numpy as np
from sklearn.decomposition import ProjectedGradientNMF
import recsys
import evaluate
import similarity
from sklearn import decomposition
from numpy.linalg import inv
from sklearn.metrics.pairwise import pairwise_distances
#feature helper and user_feature are derived from lambda functions
class content(recsys.recsys):
def __init__(self,X, similarity_helper = None, feature_helper = None, score_helper = None, \
item_feat = None, user_feat = None, cluster=None):
super(content, self).__init__(X)
self.feature_helper = feature_helper
self.score_helper = score_helper
self.item_feat = item_feat
self.user_feat = user_feat
self.similarity_helper = similarity_helper
def get_helper2(self, name, function):
super(content, self).get_helper2(name, function)
def get_parameters(self):
pass
def predict_for_user(self, user_ratings, user_feat, k, feature_transform_all =None):
#feature_transform_all refers to items
# shape return the rows and colonms of the matrix
Nitems, Nusers = self.X.shape
#W represents a tranformed feature_helper function
if (feature_transform_all == None):
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = user_feat
else:
item_transform, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat, user_feat = user_feat)
else:
item_transform= feature_transform_all
lol, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat[:, 1], user_feat = user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
predicted_values = S
predicted_values[np.asarray(user_ratings)] = 0
result = np.argsort(predicted_values)
return result[0:k]
def fit(self, train_indices = None, test_indices = None):
super(content, self).transform_training(train_indices, test_indices)#setting up training data
# shape return the rows and colonms of the matrix
#unpack constants from dictionary here
#setting constants
#some how accomodate constants for two different constants
#create the symmetric matrix
#W represents a tranformed feature_helper function
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = self.user_feat
else:
item_transform, user_transform = self.feature_helper(X=self.X_train, item_feat = self.item_feat, user_feat = self.user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
S[self.X_train == 1] =1
self.X_predict = S
def score(self, truth_index):
return super(content, self).score(truth_index)
def user_to_item(X_train, item_feat, user_feat, start, end):
#creates a nice lambda function
START = start
END = end+1#remember to +1 as an offset
#stores that mallls belong into
#creating a new item_transform matrix
# LONG_IND is the colomn index of the user feature matrix
user_transform = user_feat[:, START:END]
item_transform = np.zeros((X_train.shape[0], END - START))
#possibly faster if you use a join and a group in pandas
for i in np.arange(X_train.shape[0]): #go through all stores
mall_indexes = (X_train[i, :] == 1) #finds all the malls that have store i
store_features = user_feat[mall_indexes, : ][:, START:END] #get coordinates fast
if( np.sum(np.nonzero(mall_indexes)) == 0):
test = .001*np.ones(store_features.shape[1])
else:
test = np.average(store_features, axis=0)
item_transform[i, :]= test
return (item_transform, user_transform)
#helper that extracts columns from a the mall matrix
def user_to_item_helper(start, end):
return lambda X, item_feat, user_feat : user_to_item(X, item_feat, user_feat, start, end)
#This is for testing purposes
# X = np.array([[1, 1, 1, 1] , [1, 1, 0, 0], [1, 0, 1, 0]])
# user_feat = np.array([[1, 1, 1, 2, 3], [0, 0, 4, 5, 6], [1, 0, 7, 8, 9], [0,1 , 10, 11, 12]])
# item_feat = None
# fun = user_to_item_helper(2, 4)
# cosine = similarity.cosine()
# test = content(X, similarity_helper=cosine, user_feat=user_feat, item_feat=item_feat, feature_helper=fun)
# test.fit() | 0.436622 | 0.409516 |
from __future__ import print_function
import cx_Oracle
import datetime
import calendar
import sys
import logging
import CondCore.Utilities.conddb_serialization_metadata as sm
import CondCore.Utilities.credentials as auth
import CondCore.Utilities.conddb_time as conddb_time
import os
authPathEnvVar = 'COND_AUTH_PATH'
prod_db_service = ('cms_orcon_prod',{'w':'cms_orcon_prod/cms_cond_general_w','r':'cms_orcon_prod/cms_cond_general_r'})
adg_db_service = ('cms_orcon_adg',{'r':'cms_orcon_adg/cms_cond_general_r'})
dev_db_service = ('cms_orcoff_prep',{'w':'cms_orcoff_prep/cms_cond_general_w','r':'cms_orcoff_prep/cms_cond_general_r'})
schema_name = 'CMS_CONDITIONS'
fmt_str = "[%(asctime)s] %(levelname)s: %(message)s"
logLevel = logging.INFO
logFormatter = logging.Formatter(fmt_str)
def print_table( headers, table ):
ws = []
for h in headers:
ws.append(len(h))
for row in table:
ind = 0
for c in row:
c = str(c)
if ind<len(ws):
if len(c)> ws[ind]:
ws[ind] = len(c)
ind += 1
def printf( row ):
line = ''
ind = 0
for w in ws:
fmt = '{:<%s}' %w
if ind<len(ws):
line += (fmt.format( row[ind] )+' ')
ind += 1
print(line)
printf( headers )
hsep = ''
for w in ws:
fmt = '{:-<%s}' %w
hsep += (fmt.format('')+' ')
print(hsep)
for row in table:
printf( row )
class version_db(object):
def __init__(self, db ):
self.db = db
self.cmssw_boost_map = {}
self.boost_run_map = []
def fetch_cmssw_boost_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT BOOST_VERSION, CMSSW_VERSION FROM CMSSW_BOOST_MAP');
rows = cursor.fetchall()
self.cmssw_boost_map = {}
for r in rows:
self.cmssw_boost_map[r[1]]=r[0]
return self.cmssw_boost_map
def fetch_boost_run_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME FROM BOOST_RUN_MAP ORDER BY RUN_NUMBER, INSERTION_TIME')
rows = cursor.fetchall()
self.boost_run_map = []
for r in rows:
self.boost_run_map.append( (r[0],r[1],r[2],str(r[3])) )
return self.boost_run_map
def insert_boost_run_range( self, run, boost_version, min_ts ):
cursor = self.db.cursor()
cursor.execute('SELECT MIN(RUN_NUMBER) FROM RUN_INFO WHERE RUN_NUMBER >= :RUN',(run,))
res = cursor.fetchone()
if res is not None and res[0] is not None:
min_run = res[0]
cursor.execute('SELECT START_TIME FROM RUN_INFO WHERE RUN_NUMBER=:RUN',(min_run,))
min_run_time = cursor.fetchone()[0]
min_run_ts = calendar.timegm( min_run_time.utctimetuple() ) << 32
else:
min_run = run
min_run_ts = conddb_time.string_to_timestamp(min_ts)
now = datetime.datetime.utcnow()
cursor.execute('INSERT INTO BOOST_RUN_MAP ( RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME ) VALUES (:RUN, :RUN_START_T, :BOOST, :TIME)',(run,min_run_ts,boost_version,now) )
def insert_cmssw_boost( self, cmssw_version,boost_version ):
cursor = self.db.cursor()
cursor.execute('INSERT INTO CMSSW_BOOST_MAP ( CMSSW_VERSION, BOOST_VERSION ) VALUES ( :CMSSW_VERSION, :BOOST_VERSION )',(cmssw_version,boost_version))
def lookup_boost_in_cmssw( self, cmssw_version ):
cmssw_v = sm.check_cmssw_version( cmssw_version )
the_arch = None
releaseRoot = None
if sm.is_release_cycle( cmssw_v ):
cmssw_v = sm.strip_cmssw_version( cmssw_v )
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
if releaseRoot is None:
for arch in archs:
the_arch = arch
releaseRoot = sm.get_release_root( cmssw_v, arch )
for r in sorted (os.listdir( releaseRoot )):
if r.startswith(cmssw_v):
cmssw_v = r
logging.debug('Boost version will be verified in release %s' %cmssw_v)
if cmssw_v in self.cmssw_boost_map.keys():
return self.cmssw_boost_map[cmssw_v]
if releaseRoot is None:
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
logging.debug('Release path: %s' %releaseRoot)
boost_version = sm.get_cmssw_boost( the_arch, '%s/%s' %(releaseRoot,cmssw_v) )
if not boost_version is None:
self.cmssw_boost_map[cmssw_v] = boost_version
self.insert_cmssw_boost( cmssw_v,boost_version )
return boost_version
def populate_for_gts( self ):
cursor = self.db.cursor()
cursor.execute('SELECT DISTINCT(RELEASE) FROM GLOBAL_TAG')
rows = cursor.fetchall()
for r in rows:
self.lookup_boost_in_cmssw( r[0] )
class conddb_tool(object):
def __init__( self ):
self.db = None
self.version_db = None
self.args = None
self.logger = logging.getLogger()
self.logger.setLevel(logLevel)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
self.iovs = None
self.versionIovs = None
def connect( self ):
if self.args.db is None:
self.args.db = 'pro'
if self.args.db == 'dev' or self.args.db == 'oradev' :
db_service = dev_db_service
elif self.args.db == 'orapro':
db_service = adg_db_service
elif self.args.db != 'onlineorapro' or self.args.db != 'pro':
db_service = prod_db_service
else:
raise Exception("Database '%s' is not known." %args.db )
if self.args.accessType not in db_service[1].keys():
raise Exception('The specified database connection %s does not support the requested action.' %db_service[0])
service = db_service[1][self.args.accessType]
creds = auth.get_credentials( authPathEnvVar, service, self.args.auth )
if creds is None:
raise Exception("Could not find credentials for service %s" %service)
(username, account, pwd) = creds
connStr = '%s/%s@%s' %(username,pwd,db_service[0])
self.db = cx_Oracle.connect(connStr)
logging.info('Connected to %s as user %s' %(db_service[0],username))
self.db.current_schema = schema_name
def process_tag_boost_version( self, t, timetype, tagBoostVersion, minIov, timeCut, validate ):
if self.iovs is None:
self.iovs = []
cursor = self.db.cursor()
stmt = 'SELECT IOV.SINCE SINCE, IOV.INSERTION_TIME INSERTION_TIME, P.STREAMER_INFO STREAMER_INFO FROM TAG, IOV, PAYLOAD P WHERE TAG.NAME = IOV.TAG_NAME AND P.HASH = IOV.PAYLOAD_HASH AND TAG.NAME = :TAG_NAME'
params = (t,)
if timeCut and tagBoostVersion is not None and not validate:
whereClauseOnSince = ' AND IOV.INSERTION_TIME>:TIME_CUT'
stmt = stmt + whereClauseOnSince
params = params + (timeCut,)
stmt = stmt + ' ORDER BY SINCE'
logging.debug('Executing: "%s"' %stmt)
cursor.execute(stmt,params)
for r in cursor:
streamer_info = str(r[2].read())
self.iovs.append((r[0],r[1],streamer_info))
niovs = 0
self.versionIovs = []
lastBoost = None
update = False
if tagBoostVersion is not None:
update = True
for iov in self.iovs:
if validate and timeCut is not None and timeCut < iov[1]:
continue
niovs += 1
iovBoostVersion, tagBoostVersion = sm.update_tag_boost_version( tagBoostVersion, minIov, iov[2], iov[0], timetype, self.version_db.boost_run_map )
if minIov is None or iov[0]<minIov:
minIov = iov[0]
logging.debug('iov: %s - inserted on %s - streamer: %s' %(iov[0],iov[1],iov[2]))
logging.debug('current tag boost version: %s minIov: %s' %(tagBoostVersion,minIov))
if lastBoost is None or lastBoost!=iovBoostVersion:
self.versionIovs.append((iov[0],iovBoostVersion))
lastBoost = iovBoostVersion
if tagBoostVersion is None:
if niovs == 0:
logging.warning( 'No iovs found. boost version cannot be determined.')
return None, None
else:
logging.error('Could not determine the tag boost version.' )
return None, None
else:
if niovs == 0:
logging.info('Tag boost version has not changed.')
else:
msg = 'Found tag boost version %s ( min iov: %s ) combining payloads from %s iovs' %(tagBoostVersion,minIov,niovs)
if timeCut is not None:
if update:
msg += ' (iov insertion time>%s)' %str(timeCut)
else:
msg += ' (iov insertion time<%s)' %str(timeCut)
logging.info( msg )
return tagBoostVersion, minIov
def validate_boost_version( self, t, timetype, tagBoostVersion ):
cursor = self.db.cursor()
cursor.execute('SELECT GT.NAME, GT.RELEASE, GT.SNAPSHOT_TIME FROM GLOBAL_TAG GT, GLOBAL_TAG_MAP GTM WHERE GT.NAME = GTM.GLOBAL_TAG_NAME AND GTM.TAG_NAME = :TAG_NAME',(t,))
rows = cursor.fetchall()
invalid_gts = []
ngt = 0
gts = []
for r in rows:
gts.append((r[0],r[1],r[2]))
if len(gts)>0:
logging.info('validating %s gts.' %len(gts))
boost_snapshot_map = {}
for gt in gts:
ngt += 1
logging.debug('Validating for GT %s (release %s)' %(gt[0],gt[1]))
gtCMSSWVersion = sm.check_cmssw_version( gt[1] )
gtBoostVersion = self.version_db.lookup_boost_in_cmssw( gtCMSSWVersion )
if sm.cmp_boost_version( gtBoostVersion, tagBoostVersion )<0:
logging.warning( 'The boost version computed from all the iovs in the tag (%s) is incompatible with the gt [%s] %s (consuming ver: %s, snapshot: %s)' %(tagBoostVersion,ngt,gt[0],gtBoostVersion,str(gt[2])))
if str(gt[2]) not in boost_snapshot_map.keys():
tagSnapshotBoostVersion = None
minIov = None
tagSnapshotBoostVersion, minIov = self.process_tag_boost_version(t, timetype, tagSnapshotBoostVersion, minIov, gt[2])
if tagSnapshotBoostVersion is not None:
boost_snapshot_map[str(gt[2])] = tagSnapshotBoostVersion
else:
continue
else:
tagSnapshotBoostVersion = boost_snapshot_map[str(gt[2])]
if sm.cmp_boost_version( gtBoostVersion, tagSnapshotBoostVersion )<0:
logging.error('The snapshot from tag used by gt %s (consuming ver: %s) has an incompatible combined boost version %s' %(gt[0],gtBoostVersion,tagSnapshotBoostVersion))
invalid_gts.append( ( gt[0], gtBoostVersion ) )
if len(invalid_gts)==0:
if ngt>0:
logging.info('boost version for the tag validated in %s referencing Gts' %(ngt))
else:
logging.info('No GT referencing this tag found.')
else:
logging.error( 'boost version for the tag is invalid.')
return invalid_gts
def update_tag_boost_version_in_db( self, t, tagBoostVersion, minIov, update ):
cursor = self.db.cursor()
now = datetime.datetime.utcnow()
if update:
cursor.execute('UPDATE TAG_METADATA SET MIN_SERIALIZATION_V=:BOOST_V, MIN_SINCE=:MIN_IOV, MODIFICATION_TIME=:NOW WHERE TAG_NAME = :NAME',( tagBoostVersion,minIov,now,t))
else:
cursor.execute('INSERT INTO TAG_METADATA ( TAG_NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME ) VALUES ( :NAME, :BOOST_V, :MIN_IOV, :NOW )',(t, tagBoostVersion,minIov,now))
logging.info('Minimum boost version for the tag updated.')
def update_tags( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_cmssw_boost_map()
self.version_db.fetch_boost_run_map()
tags = {}
wpars = ()
if self.args.name is not None:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME = :TAG_NAME'
wpars = (self.args.name,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
found = False
for r in rows:
found = True
break
if not found:
raise Exception('Tag %s does not exists in the database.' %self.args.name )
tags[self.args.name] = None
stmt1 = 'SELECT MIN_SERIALIZATION_V, MIN_SINCE, CAST(MODIFICATION_TIME AS TIMESTAMP(0)) FROM TAG_METADATA WHERE TAG_NAME = :NAME'
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
for r in rows:
tags[self.args.name] = (r[0],r[1],r[2])
else:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA) ORDER BY NAME'
nmax = 100
if self.args.max is not None:
nmax = self.args.max
if self.args.all:
nmax = -1
if nmax >=0:
stmt0 = 'SELECT NAME FROM (SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA ) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
for r in rows:
tags[r[0]] = None
stmt1 = 'SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME'
nmax = nmax-len(tags)
if nmax >=0:
stmt1 = 'SELECT NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM (SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
i = 0
for r in rows:
i += 1
if nmax >=0 and i>nmax:
break
tags[r[0]] = (r[1],r[2],r[3])
logging.info( 'Processing boost version for %s tags' %len(tags))
count = 0
for t in sorted(tags.keys()):
count += 1
try:
update = False
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(t,))
timetype = cursor.fetchone()[0]
self.iovs = None
logging.info('************************************************************************')
logging.info('Tag [%s] %s - timetype: %s' %(count,t,timetype))
tagBoostVersion = None
minIov = None
timeCut = None
if tags[t] is not None:
update = True
tagBoostVersion = tags[t][0]
minIov = tags[t][1]
timeCut = tags[t][2]
tagBoostVersion, minIov = self.process_tag_boost_version( t, timetype, tagBoostVersion, minIov, timeCut, self.args.validate )
if tagBoostVersion is None:
continue
logging.debug('boost versions in the %s iovs: %s' %(len(self.iovs),str(self.versionIovs)))
if self.args.validate:
invalid_gts = self.validate_boost_version( t, timetype, tagBoostVersion )
if len(invalid_gts)>0:
with open('invalid_tags_in_gts.txt','a') as error_file:
for gt in invalid_gts:
error_file.write('Tag %s (boost %s) is invalid for GT %s ( boost %s) \n' %(t,tagBoostVersion,gt[0],gt[1]))
if len(self.iovs):
if self.iovs[0][0]<minIov:
minIov = self.iovs[0]
self.update_tag_boost_version_in_db( t, tagBoostVersion, minIov, update )
self.db.commit()
except Exception as e:
logging.error(str(e))
def insert_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
if self.args.min_ts is None:
raise Exception("Run %s has not been found in the database - please provide an explicit TimeType value with the min_ts parameter ." %self.args.since )
self.version_db.insert_boost_run_range( self.args.since, self.args.label, self.args.min_ts )
self.db.commit()
logging.info('boost version %s inserted with since %s' %(self.args.label,self.args.since))
def list_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
headers = ['Run','Run start time','Boost Version','Insertion time']
print_table( headers, self.version_db.boost_run_map )
def show_tag_boost_version( self ):
cursor = self.db.cursor()
tag = self.args.tag_name
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
timeType = None
t_modificationTime = None
for r in rows:
timeType = r[0]
if timeType is None:
raise Exception("Tag %s does not exist in the database." %tag)
cursor.execute('SELECT MAX(INSERTION_TIME) FROM IOV WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
for r in rows:
t_modificationTime = r[0]
if t_modificationTime is None:
raise Exception("Tag %s does not have any iov stored." %tag)
logging.info('Tag %s - timetype: %s' %(tag,timeType))
cursor.execute('SELECT MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM TAG_METADATA WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
tagBoostVersion = None
minIov = None
v_modificationTime = None
for r in rows:
tagBoostVersion = r[0]
minIov = r[1]
v_modificationTime = r[2]
if v_modificationTime is not None:
if t_modificationTime > v_modificationTime:
logging.warning('The minimum boost version stored is out of date.')
else:
logging.info('The minimum boost version stored is up to date.')
mt = '-'
if v_modificationTime is not None:
mt = str(v_modificationTime)
r_tagBoostVersion = None
if self.args.rebuild or self.args.full:
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
timeCut = None
logging.info('Calculating minimum boost version for the available iovs...')
r_tagBoostVersion, r_minIov = self.process_tag_boost_version( tag, timeType, tagBoostVersion, minIov, timeCut )
print('# Currently stored: %s (min iov:%s)' %(tagBoostVersion,minIov))
print('# Last update: %s' %mt)
print('# Last update on the iovs: %s' %str(t_modificationTime))
if self.args.rebuild or self.args.full:
print('# Based on the %s available IOVs: %s (min iov:%s)' %(len(self.iovs),r_tagBoostVersion,r_minIov))
if self.args.full:
headers = ['Run','Boost Version']
print_table( headers, self.versionIovs )
import optparse
import argparse
def main():
tool = conddb_tool()
parser = argparse.ArgumentParser(description='CMS conddb command-line tool for serialiation metadata. For general help (manual page), use the help subcommand.')
parser.add_argument('--db', type=str, help='The target database: pro ( for prod ) or dev ( for prep ). default=pro')
parser.add_argument("--auth","-a", type=str, help="The path of the authentication file")
parser.add_argument('--verbose', '-v', action='count', help='The verbosity level')
parser_subparsers = parser.add_subparsers(title='Available subcommands')
parser_update_tags = parser_subparsers.add_parser('update_tags', description='Update the existing tag headers with the boost version')
parser_update_tags.add_argument('--name', '-n', type=str, help='Name of the specific tag to process (default=None - in this case all of the tags will be processed.')
parser_update_tags.add_argument('--max', '-m', type=int, help='the maximum number of tags processed',default=100)
parser_update_tags.add_argument('--all',action='store_true', help='process all of the tags with boost_version = None')
parser_update_tags.add_argument('--validate',action='store_true', help='validate the tag/boost version under processing')
parser_update_tags.set_defaults(func=tool.update_tags,accessType='w')
parser_insert_boost_version = parser_subparsers.add_parser('insert', description='Insert a new boost version range in the run map')
parser_insert_boost_version.add_argument('--label', '-l',type=str, help='The boost version label',required=True)
parser_insert_boost_version.add_argument('--since', '-s',type=int, help='The since validity (run number)',required=True)
parser_insert_boost_version.add_argument('--min_ts', '-t',type=str, help='The since validity (Time timetype)', required=False)
parser_insert_boost_version.set_defaults(func=tool.insert_boost_run,accessType='w')
parser_list_boost_versions = parser_subparsers.add_parser('list', description='list the boost versions in the run map')
parser_list_boost_versions.set_defaults(func=tool.list_boost_run,accessType='r')
parser_show_version = parser_subparsers.add_parser('show_tag', description='Display the minimum boost version for the specified tag (the value stored, by default)')
parser_show_version.add_argument('tag_name',help='The name of the tag')
parser_show_version.add_argument('--rebuild','-r',action='store_true',default=False,help='Re-calculate the minimum boost versio ')
parser_show_version.add_argument('--full',action='store_true',default=False,help='Recalulate the minimum boost version, listing the versions in the iov sequence')
parser_show_version.set_defaults(func=tool.show_tag_boost_version,accessType='r')
args = parser.parse_args()
tool.args = args
if args.verbose >=1:
tool.logger.setLevel(logging.DEBUG)
tool.connect()
return args.func()
else:
try:
tool.connect()
sys.exit( args.func())
except Exception as e:
logging.error(e)
sys.exit(1)
if __name__ == '__main__':
main() | CondCore/Utilities/scripts/conddb_version_mgr.py |
from __future__ import print_function
import cx_Oracle
import datetime
import calendar
import sys
import logging
import CondCore.Utilities.conddb_serialization_metadata as sm
import CondCore.Utilities.credentials as auth
import CondCore.Utilities.conddb_time as conddb_time
import os
authPathEnvVar = 'COND_AUTH_PATH'
prod_db_service = ('cms_orcon_prod',{'w':'cms_orcon_prod/cms_cond_general_w','r':'cms_orcon_prod/cms_cond_general_r'})
adg_db_service = ('cms_orcon_adg',{'r':'cms_orcon_adg/cms_cond_general_r'})
dev_db_service = ('cms_orcoff_prep',{'w':'cms_orcoff_prep/cms_cond_general_w','r':'cms_orcoff_prep/cms_cond_general_r'})
schema_name = 'CMS_CONDITIONS'
fmt_str = "[%(asctime)s] %(levelname)s: %(message)s"
logLevel = logging.INFO
logFormatter = logging.Formatter(fmt_str)
def print_table( headers, table ):
ws = []
for h in headers:
ws.append(len(h))
for row in table:
ind = 0
for c in row:
c = str(c)
if ind<len(ws):
if len(c)> ws[ind]:
ws[ind] = len(c)
ind += 1
def printf( row ):
line = ''
ind = 0
for w in ws:
fmt = '{:<%s}' %w
if ind<len(ws):
line += (fmt.format( row[ind] )+' ')
ind += 1
print(line)
printf( headers )
hsep = ''
for w in ws:
fmt = '{:-<%s}' %w
hsep += (fmt.format('')+' ')
print(hsep)
for row in table:
printf( row )
class version_db(object):
def __init__(self, db ):
self.db = db
self.cmssw_boost_map = {}
self.boost_run_map = []
def fetch_cmssw_boost_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT BOOST_VERSION, CMSSW_VERSION FROM CMSSW_BOOST_MAP');
rows = cursor.fetchall()
self.cmssw_boost_map = {}
for r in rows:
self.cmssw_boost_map[r[1]]=r[0]
return self.cmssw_boost_map
def fetch_boost_run_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME FROM BOOST_RUN_MAP ORDER BY RUN_NUMBER, INSERTION_TIME')
rows = cursor.fetchall()
self.boost_run_map = []
for r in rows:
self.boost_run_map.append( (r[0],r[1],r[2],str(r[3])) )
return self.boost_run_map
def insert_boost_run_range( self, run, boost_version, min_ts ):
cursor = self.db.cursor()
cursor.execute('SELECT MIN(RUN_NUMBER) FROM RUN_INFO WHERE RUN_NUMBER >= :RUN',(run,))
res = cursor.fetchone()
if res is not None and res[0] is not None:
min_run = res[0]
cursor.execute('SELECT START_TIME FROM RUN_INFO WHERE RUN_NUMBER=:RUN',(min_run,))
min_run_time = cursor.fetchone()[0]
min_run_ts = calendar.timegm( min_run_time.utctimetuple() ) << 32
else:
min_run = run
min_run_ts = conddb_time.string_to_timestamp(min_ts)
now = datetime.datetime.utcnow()
cursor.execute('INSERT INTO BOOST_RUN_MAP ( RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME ) VALUES (:RUN, :RUN_START_T, :BOOST, :TIME)',(run,min_run_ts,boost_version,now) )
def insert_cmssw_boost( self, cmssw_version,boost_version ):
cursor = self.db.cursor()
cursor.execute('INSERT INTO CMSSW_BOOST_MAP ( CMSSW_VERSION, BOOST_VERSION ) VALUES ( :CMSSW_VERSION, :BOOST_VERSION )',(cmssw_version,boost_version))
def lookup_boost_in_cmssw( self, cmssw_version ):
cmssw_v = sm.check_cmssw_version( cmssw_version )
the_arch = None
releaseRoot = None
if sm.is_release_cycle( cmssw_v ):
cmssw_v = sm.strip_cmssw_version( cmssw_v )
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
if releaseRoot is None:
for arch in archs:
the_arch = arch
releaseRoot = sm.get_release_root( cmssw_v, arch )
for r in sorted (os.listdir( releaseRoot )):
if r.startswith(cmssw_v):
cmssw_v = r
logging.debug('Boost version will be verified in release %s' %cmssw_v)
if cmssw_v in self.cmssw_boost_map.keys():
return self.cmssw_boost_map[cmssw_v]
if releaseRoot is None:
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
logging.debug('Release path: %s' %releaseRoot)
boost_version = sm.get_cmssw_boost( the_arch, '%s/%s' %(releaseRoot,cmssw_v) )
if not boost_version is None:
self.cmssw_boost_map[cmssw_v] = boost_version
self.insert_cmssw_boost( cmssw_v,boost_version )
return boost_version
def populate_for_gts( self ):
cursor = self.db.cursor()
cursor.execute('SELECT DISTINCT(RELEASE) FROM GLOBAL_TAG')
rows = cursor.fetchall()
for r in rows:
self.lookup_boost_in_cmssw( r[0] )
class conddb_tool(object):
def __init__( self ):
self.db = None
self.version_db = None
self.args = None
self.logger = logging.getLogger()
self.logger.setLevel(logLevel)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
self.iovs = None
self.versionIovs = None
def connect( self ):
if self.args.db is None:
self.args.db = 'pro'
if self.args.db == 'dev' or self.args.db == 'oradev' :
db_service = dev_db_service
elif self.args.db == 'orapro':
db_service = adg_db_service
elif self.args.db != 'onlineorapro' or self.args.db != 'pro':
db_service = prod_db_service
else:
raise Exception("Database '%s' is not known." %args.db )
if self.args.accessType not in db_service[1].keys():
raise Exception('The specified database connection %s does not support the requested action.' %db_service[0])
service = db_service[1][self.args.accessType]
creds = auth.get_credentials( authPathEnvVar, service, self.args.auth )
if creds is None:
raise Exception("Could not find credentials for service %s" %service)
(username, account, pwd) = creds
connStr = '%s/%s@%s' %(username,pwd,db_service[0])
self.db = cx_Oracle.connect(connStr)
logging.info('Connected to %s as user %s' %(db_service[0],username))
self.db.current_schema = schema_name
def process_tag_boost_version( self, t, timetype, tagBoostVersion, minIov, timeCut, validate ):
if self.iovs is None:
self.iovs = []
cursor = self.db.cursor()
stmt = 'SELECT IOV.SINCE SINCE, IOV.INSERTION_TIME INSERTION_TIME, P.STREAMER_INFO STREAMER_INFO FROM TAG, IOV, PAYLOAD P WHERE TAG.NAME = IOV.TAG_NAME AND P.HASH = IOV.PAYLOAD_HASH AND TAG.NAME = :TAG_NAME'
params = (t,)
if timeCut and tagBoostVersion is not None and not validate:
whereClauseOnSince = ' AND IOV.INSERTION_TIME>:TIME_CUT'
stmt = stmt + whereClauseOnSince
params = params + (timeCut,)
stmt = stmt + ' ORDER BY SINCE'
logging.debug('Executing: "%s"' %stmt)
cursor.execute(stmt,params)
for r in cursor:
streamer_info = str(r[2].read())
self.iovs.append((r[0],r[1],streamer_info))
niovs = 0
self.versionIovs = []
lastBoost = None
update = False
if tagBoostVersion is not None:
update = True
for iov in self.iovs:
if validate and timeCut is not None and timeCut < iov[1]:
continue
niovs += 1
iovBoostVersion, tagBoostVersion = sm.update_tag_boost_version( tagBoostVersion, minIov, iov[2], iov[0], timetype, self.version_db.boost_run_map )
if minIov is None or iov[0]<minIov:
minIov = iov[0]
logging.debug('iov: %s - inserted on %s - streamer: %s' %(iov[0],iov[1],iov[2]))
logging.debug('current tag boost version: %s minIov: %s' %(tagBoostVersion,minIov))
if lastBoost is None or lastBoost!=iovBoostVersion:
self.versionIovs.append((iov[0],iovBoostVersion))
lastBoost = iovBoostVersion
if tagBoostVersion is None:
if niovs == 0:
logging.warning( 'No iovs found. boost version cannot be determined.')
return None, None
else:
logging.error('Could not determine the tag boost version.' )
return None, None
else:
if niovs == 0:
logging.info('Tag boost version has not changed.')
else:
msg = 'Found tag boost version %s ( min iov: %s ) combining payloads from %s iovs' %(tagBoostVersion,minIov,niovs)
if timeCut is not None:
if update:
msg += ' (iov insertion time>%s)' %str(timeCut)
else:
msg += ' (iov insertion time<%s)' %str(timeCut)
logging.info( msg )
return tagBoostVersion, minIov
def validate_boost_version( self, t, timetype, tagBoostVersion ):
cursor = self.db.cursor()
cursor.execute('SELECT GT.NAME, GT.RELEASE, GT.SNAPSHOT_TIME FROM GLOBAL_TAG GT, GLOBAL_TAG_MAP GTM WHERE GT.NAME = GTM.GLOBAL_TAG_NAME AND GTM.TAG_NAME = :TAG_NAME',(t,))
rows = cursor.fetchall()
invalid_gts = []
ngt = 0
gts = []
for r in rows:
gts.append((r[0],r[1],r[2]))
if len(gts)>0:
logging.info('validating %s gts.' %len(gts))
boost_snapshot_map = {}
for gt in gts:
ngt += 1
logging.debug('Validating for GT %s (release %s)' %(gt[0],gt[1]))
gtCMSSWVersion = sm.check_cmssw_version( gt[1] )
gtBoostVersion = self.version_db.lookup_boost_in_cmssw( gtCMSSWVersion )
if sm.cmp_boost_version( gtBoostVersion, tagBoostVersion )<0:
logging.warning( 'The boost version computed from all the iovs in the tag (%s) is incompatible with the gt [%s] %s (consuming ver: %s, snapshot: %s)' %(tagBoostVersion,ngt,gt[0],gtBoostVersion,str(gt[2])))
if str(gt[2]) not in boost_snapshot_map.keys():
tagSnapshotBoostVersion = None
minIov = None
tagSnapshotBoostVersion, minIov = self.process_tag_boost_version(t, timetype, tagSnapshotBoostVersion, minIov, gt[2])
if tagSnapshotBoostVersion is not None:
boost_snapshot_map[str(gt[2])] = tagSnapshotBoostVersion
else:
continue
else:
tagSnapshotBoostVersion = boost_snapshot_map[str(gt[2])]
if sm.cmp_boost_version( gtBoostVersion, tagSnapshotBoostVersion )<0:
logging.error('The snapshot from tag used by gt %s (consuming ver: %s) has an incompatible combined boost version %s' %(gt[0],gtBoostVersion,tagSnapshotBoostVersion))
invalid_gts.append( ( gt[0], gtBoostVersion ) )
if len(invalid_gts)==0:
if ngt>0:
logging.info('boost version for the tag validated in %s referencing Gts' %(ngt))
else:
logging.info('No GT referencing this tag found.')
else:
logging.error( 'boost version for the tag is invalid.')
return invalid_gts
def update_tag_boost_version_in_db( self, t, tagBoostVersion, minIov, update ):
cursor = self.db.cursor()
now = datetime.datetime.utcnow()
if update:
cursor.execute('UPDATE TAG_METADATA SET MIN_SERIALIZATION_V=:BOOST_V, MIN_SINCE=:MIN_IOV, MODIFICATION_TIME=:NOW WHERE TAG_NAME = :NAME',( tagBoostVersion,minIov,now,t))
else:
cursor.execute('INSERT INTO TAG_METADATA ( TAG_NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME ) VALUES ( :NAME, :BOOST_V, :MIN_IOV, :NOW )',(t, tagBoostVersion,minIov,now))
logging.info('Minimum boost version for the tag updated.')
def update_tags( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_cmssw_boost_map()
self.version_db.fetch_boost_run_map()
tags = {}
wpars = ()
if self.args.name is not None:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME = :TAG_NAME'
wpars = (self.args.name,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
found = False
for r in rows:
found = True
break
if not found:
raise Exception('Tag %s does not exists in the database.' %self.args.name )
tags[self.args.name] = None
stmt1 = 'SELECT MIN_SERIALIZATION_V, MIN_SINCE, CAST(MODIFICATION_TIME AS TIMESTAMP(0)) FROM TAG_METADATA WHERE TAG_NAME = :NAME'
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
for r in rows:
tags[self.args.name] = (r[0],r[1],r[2])
else:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA) ORDER BY NAME'
nmax = 100
if self.args.max is not None:
nmax = self.args.max
if self.args.all:
nmax = -1
if nmax >=0:
stmt0 = 'SELECT NAME FROM (SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA ) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
for r in rows:
tags[r[0]] = None
stmt1 = 'SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME'
nmax = nmax-len(tags)
if nmax >=0:
stmt1 = 'SELECT NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM (SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
i = 0
for r in rows:
i += 1
if nmax >=0 and i>nmax:
break
tags[r[0]] = (r[1],r[2],r[3])
logging.info( 'Processing boost version for %s tags' %len(tags))
count = 0
for t in sorted(tags.keys()):
count += 1
try:
update = False
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(t,))
timetype = cursor.fetchone()[0]
self.iovs = None
logging.info('************************************************************************')
logging.info('Tag [%s] %s - timetype: %s' %(count,t,timetype))
tagBoostVersion = None
minIov = None
timeCut = None
if tags[t] is not None:
update = True
tagBoostVersion = tags[t][0]
minIov = tags[t][1]
timeCut = tags[t][2]
tagBoostVersion, minIov = self.process_tag_boost_version( t, timetype, tagBoostVersion, minIov, timeCut, self.args.validate )
if tagBoostVersion is None:
continue
logging.debug('boost versions in the %s iovs: %s' %(len(self.iovs),str(self.versionIovs)))
if self.args.validate:
invalid_gts = self.validate_boost_version( t, timetype, tagBoostVersion )
if len(invalid_gts)>0:
with open('invalid_tags_in_gts.txt','a') as error_file:
for gt in invalid_gts:
error_file.write('Tag %s (boost %s) is invalid for GT %s ( boost %s) \n' %(t,tagBoostVersion,gt[0],gt[1]))
if len(self.iovs):
if self.iovs[0][0]<minIov:
minIov = self.iovs[0]
self.update_tag_boost_version_in_db( t, tagBoostVersion, minIov, update )
self.db.commit()
except Exception as e:
logging.error(str(e))
def insert_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
if self.args.min_ts is None:
raise Exception("Run %s has not been found in the database - please provide an explicit TimeType value with the min_ts parameter ." %self.args.since )
self.version_db.insert_boost_run_range( self.args.since, self.args.label, self.args.min_ts )
self.db.commit()
logging.info('boost version %s inserted with since %s' %(self.args.label,self.args.since))
def list_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
headers = ['Run','Run start time','Boost Version','Insertion time']
print_table( headers, self.version_db.boost_run_map )
def show_tag_boost_version( self ):
cursor = self.db.cursor()
tag = self.args.tag_name
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
timeType = None
t_modificationTime = None
for r in rows:
timeType = r[0]
if timeType is None:
raise Exception("Tag %s does not exist in the database." %tag)
cursor.execute('SELECT MAX(INSERTION_TIME) FROM IOV WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
for r in rows:
t_modificationTime = r[0]
if t_modificationTime is None:
raise Exception("Tag %s does not have any iov stored." %tag)
logging.info('Tag %s - timetype: %s' %(tag,timeType))
cursor.execute('SELECT MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM TAG_METADATA WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
tagBoostVersion = None
minIov = None
v_modificationTime = None
for r in rows:
tagBoostVersion = r[0]
minIov = r[1]
v_modificationTime = r[2]
if v_modificationTime is not None:
if t_modificationTime > v_modificationTime:
logging.warning('The minimum boost version stored is out of date.')
else:
logging.info('The minimum boost version stored is up to date.')
mt = '-'
if v_modificationTime is not None:
mt = str(v_modificationTime)
r_tagBoostVersion = None
if self.args.rebuild or self.args.full:
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
timeCut = None
logging.info('Calculating minimum boost version for the available iovs...')
r_tagBoostVersion, r_minIov = self.process_tag_boost_version( tag, timeType, tagBoostVersion, minIov, timeCut )
print('# Currently stored: %s (min iov:%s)' %(tagBoostVersion,minIov))
print('# Last update: %s' %mt)
print('# Last update on the iovs: %s' %str(t_modificationTime))
if self.args.rebuild or self.args.full:
print('# Based on the %s available IOVs: %s (min iov:%s)' %(len(self.iovs),r_tagBoostVersion,r_minIov))
if self.args.full:
headers = ['Run','Boost Version']
print_table( headers, self.versionIovs )
import optparse
import argparse
def main():
tool = conddb_tool()
parser = argparse.ArgumentParser(description='CMS conddb command-line tool for serialiation metadata. For general help (manual page), use the help subcommand.')
parser.add_argument('--db', type=str, help='The target database: pro ( for prod ) or dev ( for prep ). default=pro')
parser.add_argument("--auth","-a", type=str, help="The path of the authentication file")
parser.add_argument('--verbose', '-v', action='count', help='The verbosity level')
parser_subparsers = parser.add_subparsers(title='Available subcommands')
parser_update_tags = parser_subparsers.add_parser('update_tags', description='Update the existing tag headers with the boost version')
parser_update_tags.add_argument('--name', '-n', type=str, help='Name of the specific tag to process (default=None - in this case all of the tags will be processed.')
parser_update_tags.add_argument('--max', '-m', type=int, help='the maximum number of tags processed',default=100)
parser_update_tags.add_argument('--all',action='store_true', help='process all of the tags with boost_version = None')
parser_update_tags.add_argument('--validate',action='store_true', help='validate the tag/boost version under processing')
parser_update_tags.set_defaults(func=tool.update_tags,accessType='w')
parser_insert_boost_version = parser_subparsers.add_parser('insert', description='Insert a new boost version range in the run map')
parser_insert_boost_version.add_argument('--label', '-l',type=str, help='The boost version label',required=True)
parser_insert_boost_version.add_argument('--since', '-s',type=int, help='The since validity (run number)',required=True)
parser_insert_boost_version.add_argument('--min_ts', '-t',type=str, help='The since validity (Time timetype)', required=False)
parser_insert_boost_version.set_defaults(func=tool.insert_boost_run,accessType='w')
parser_list_boost_versions = parser_subparsers.add_parser('list', description='list the boost versions in the run map')
parser_list_boost_versions.set_defaults(func=tool.list_boost_run,accessType='r')
parser_show_version = parser_subparsers.add_parser('show_tag', description='Display the minimum boost version for the specified tag (the value stored, by default)')
parser_show_version.add_argument('tag_name',help='The name of the tag')
parser_show_version.add_argument('--rebuild','-r',action='store_true',default=False,help='Re-calculate the minimum boost versio ')
parser_show_version.add_argument('--full',action='store_true',default=False,help='Recalulate the minimum boost version, listing the versions in the iov sequence')
parser_show_version.set_defaults(func=tool.show_tag_boost_version,accessType='r')
args = parser.parse_args()
tool.args = args
if args.verbose >=1:
tool.logger.setLevel(logging.DEBUG)
tool.connect()
return args.func()
else:
try:
tool.connect()
sys.exit( args.func())
except Exception as e:
logging.error(e)
sys.exit(1)
if __name__ == '__main__':
main() | 0.250363 | 0.084947 |
import director.visualization as vis
from director import filterUtils
import director.vtkAll as vtk
import director.vtkNumpy as vnp
from director.shallowCopy import shallowCopy
from director import ioUtils
import numpy as np
def createTexturedPlane():
source = vtk.vtkPlaneSource()
textureMap = vtk.vtkTextureMapToPlane()
textureMap.SetInput(source.GetOutput())
textureMap.Update()
return shallowCopy(textureMap.GetOutput())
def getSkyboxSides():
return ['top', 'bottom', 'front', 'back', 'left', 'right']
def createSkyboxPlane(side):
pd = createTexturedPlane()
t = vtk.vtkTransform()
t.PostMultiply()
if side == 'top':
t.Translate(0,0,0.5)
t.RotateZ(180)
elif side == 'bottom':
t.RotateX(180)
t.RotateY(180)
t.RotateZ(-270)
t.Translate(0,0,-0.5)
elif side == 'front':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(180)
t.Translate(0.5,0.0,0.0)
elif side == 'back':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(0)
t.Translate(-0.5,0.0,0.0)
elif side == 'left':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(-90)
t.Translate(0.0,0.5,0.0)
elif side == 'right':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(90)
t.Translate(0.0,-0.5,0.0)
pd = filterUtils.transformPolyData(pd, t)
return pd
def createSkyboxPlanes():
planes = {}
for side in getSkyboxSides():
planes[side] = createSkyboxPlane(side)
return planes
def createTexture(imageFilename):
image = ioUtils.readImage(imageFilename)
tex = vtk.vtkTexture()
tex.SetInput(image)
tex.EdgeClampOn()
tex.RepeatOff()
return tex
def createSkybox(imageMap, view):
objs = {}
planes = createSkyboxPlanes()
for side, imageFilename in imageMap.items():
texture = createTexture(imageFilename)
obj = vis.PolyDataItem('skybox %s' % side, planes[side], view=None)
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
view.backgroundRenderer().AddActor(obj.actor)
objs[side] = obj
return objs
def getSkyboxImages(baseDir):
imageMap = dict(
top = baseDir + '/topmars1.jpg',
bottom = baseDir + '/botmars1.jpg',
front = baseDir + '/frontmars1.jpg',
back = baseDir + '/backmars1.jpg',
left = baseDir + '/leftmars1.jpg',
right = baseDir + '/rightmars1.jpg')
return imageMap
def createTextureGround(imageFilename, view):
pd = createTexturedPlane()
texture = createTexture(imageFilename)
texture.RepeatOn()
tcoords = vnp.getNumpyFromVtk(pd, 'Texture Coordinates')
tcoords *= 60
t = vtk.vtkTransform()
t.PostMultiply()
t.Scale(200,200,200)
t.Translate(0,0,-0.005)
pd = filterUtils.transformPolyData(pd, t)
obj = vis.showPolyData(pd, 'ground', view=view, alpha=1.0, parent='skybox')
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
def connectSkyboxCamera(view, debug=False):
baseRen = view.backgroundRenderer()
def updateSkyboxCamera(o, e):
c = baseRen.GetActiveCamera()
c2 = view.camera()
viewDirection = np.array(c2.GetFocalPoint()) - np.array(c2.GetPosition())
viewDirection /= np.linalg.norm(viewDirection)
if debug:
c.SetPosition(c2.GetPosition())
c.SetFocalPoint(c2.GetFocalPoint())
else:
c.SetPosition(0,0,0)
c.SetFocalPoint(viewDirection)
c.SetViewUp(c2.GetViewUp())
c.SetViewAngle(c2.GetViewAngle())
view.renderWindow().AddObserver('StartEvent', updateSkyboxCamera) | src/python/director/skybox.py | import director.visualization as vis
from director import filterUtils
import director.vtkAll as vtk
import director.vtkNumpy as vnp
from director.shallowCopy import shallowCopy
from director import ioUtils
import numpy as np
def createTexturedPlane():
source = vtk.vtkPlaneSource()
textureMap = vtk.vtkTextureMapToPlane()
textureMap.SetInput(source.GetOutput())
textureMap.Update()
return shallowCopy(textureMap.GetOutput())
def getSkyboxSides():
return ['top', 'bottom', 'front', 'back', 'left', 'right']
def createSkyboxPlane(side):
pd = createTexturedPlane()
t = vtk.vtkTransform()
t.PostMultiply()
if side == 'top':
t.Translate(0,0,0.5)
t.RotateZ(180)
elif side == 'bottom':
t.RotateX(180)
t.RotateY(180)
t.RotateZ(-270)
t.Translate(0,0,-0.5)
elif side == 'front':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(180)
t.Translate(0.5,0.0,0.0)
elif side == 'back':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(0)
t.Translate(-0.5,0.0,0.0)
elif side == 'left':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(-90)
t.Translate(0.0,0.5,0.0)
elif side == 'right':
t.RotateY(90)
t.RotateX(90)
t.RotateZ(90)
t.Translate(0.0,-0.5,0.0)
pd = filterUtils.transformPolyData(pd, t)
return pd
def createSkyboxPlanes():
planes = {}
for side in getSkyboxSides():
planes[side] = createSkyboxPlane(side)
return planes
def createTexture(imageFilename):
image = ioUtils.readImage(imageFilename)
tex = vtk.vtkTexture()
tex.SetInput(image)
tex.EdgeClampOn()
tex.RepeatOff()
return tex
def createSkybox(imageMap, view):
objs = {}
planes = createSkyboxPlanes()
for side, imageFilename in imageMap.items():
texture = createTexture(imageFilename)
obj = vis.PolyDataItem('skybox %s' % side, planes[side], view=None)
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
view.backgroundRenderer().AddActor(obj.actor)
objs[side] = obj
return objs
def getSkyboxImages(baseDir):
imageMap = dict(
top = baseDir + '/topmars1.jpg',
bottom = baseDir + '/botmars1.jpg',
front = baseDir + '/frontmars1.jpg',
back = baseDir + '/backmars1.jpg',
left = baseDir + '/leftmars1.jpg',
right = baseDir + '/rightmars1.jpg')
return imageMap
def createTextureGround(imageFilename, view):
pd = createTexturedPlane()
texture = createTexture(imageFilename)
texture.RepeatOn()
tcoords = vnp.getNumpyFromVtk(pd, 'Texture Coordinates')
tcoords *= 60
t = vtk.vtkTransform()
t.PostMultiply()
t.Scale(200,200,200)
t.Translate(0,0,-0.005)
pd = filterUtils.transformPolyData(pd, t)
obj = vis.showPolyData(pd, 'ground', view=view, alpha=1.0, parent='skybox')
obj.actor.SetTexture(texture)
obj.actor.GetProperty().LightingOff()
def connectSkyboxCamera(view, debug=False):
baseRen = view.backgroundRenderer()
def updateSkyboxCamera(o, e):
c = baseRen.GetActiveCamera()
c2 = view.camera()
viewDirection = np.array(c2.GetFocalPoint()) - np.array(c2.GetPosition())
viewDirection /= np.linalg.norm(viewDirection)
if debug:
c.SetPosition(c2.GetPosition())
c.SetFocalPoint(c2.GetFocalPoint())
else:
c.SetPosition(0,0,0)
c.SetFocalPoint(viewDirection)
c.SetViewUp(c2.GetViewUp())
c.SetViewAngle(c2.GetViewAngle())
view.renderWindow().AddObserver('StartEvent', updateSkyboxCamera) | 0.566258 | 0.343589 |
from flask_restx import Namespace, Resource, fields
from flask import request
api = Namespace("Binance", description="Binance related APIs")
from main.services.jwt_service import JWTService
from main.services.user_service import UserService
from main.services.user_helper import UserHelper
from main.services.wallet_service import WalletService
from main.services.order_service import OrderService
from main.services.binance_service import BinanceService
from flask_jwt_extended import (
get_jwt_identity,
jwt_required,
)
@api.route("/binance/getLatestPrice")
class BinanceTicker(Resource):
"""docstring for Binance Price."""
def __init__(self, arg):
super(BinanceTicker, self).__init__(arg)
self.binance_service = BinanceService()
# @jwt_required
def get(self):
""" Get Latest Price from Binance """
response, status_code = self.binance_service.getAllTickers()
if status_code == 0:
return {"status": "success", "data": response}, 201
else:
return {"status": "error", "data": ""}, 400
binance_place_order = api.model(
"OrderModel",
{
"symbol": fields.String(description="Symbol", required=True),
"side": fields.String(description="Buy or Sell", required=True),
"price": fields.String(description="Unit Price", required=True),
"orderType": fields.String(description="Market or Limit", required=True),
"pairQuantity": fields.String(description="pairQuantity", required=True),
},
)
@api.route("/binance/placeOrder")
class BinancePlaceOrder(Resource):
"""docstring for Binance Place Order"""
def __init__(self, arg):
super(BinancePlaceOrder, self).__init__(arg)
self.jwt_service = JWTService()
self.user_service = UserService()
self.wallet_service = WalletService()
self.binance_service = BinanceService()
self.order_service = OrderService()
self.user_helper = UserHelper()
@api.expect(binance_place_order)
@jwt_required()
def post(self):
""" Binance Place Order API - - WIP DO NOT USE"""
email = get_jwt_identity()
user = self.user_helper.constructFullUserDetails(email)
if "symbol" not in request.json or request.json["symbol"] == "":
return api.abort(
400, "Symbol should not be empty.", status="error", status_code=400
)
symbol = request.json["symbol"]
if "side" not in request.json or request.json["side"] == "":
return api.abort(
400, "Side should be not be empty.", status="error", status_code=400
)
side = request.json["side"]
if "side" not in ["BUY", "SELL"]:
return api.abort(
400, "Side should be BUY Or SELL.", status="error", status_code=400
)
if "orderType" not in request.json or request.json["orderType"] == "":
return api.abort(
400, "Order Type should be not be empty.", status="error", status_code=400
)
order_type = request.json["orderType"]
if "order_type" not in ["MARKET"]:
return api.abort(
400, "Only MARKET type order is supported.", status="error", status_code=400
)
if "pairQuantity" not in request.json or request.json["pairQuantity"] == "":
return api.abort(
400, "Quantity should not be empty.", status="error", status_code=400
)
pair_quantity = request.json["pairQuantity"]
if "price" not in request.json or request.json["price"] == "":
return api.abort(
400, "Price should not be empty.", status="error", status_code=400
)
price = request.json["price"]
order_object = {
"user": user,
"symbol": symbol,
"price": price,
"pairQuantity": pair_quantity
}
response = self.order_service.place_order(order_object)
if response.status == -1:
return {"status": "failure", "message": response.message}, 400
else:
return {"status": "success", "message": response.message}, 200 | main/apis/binance.py | from flask_restx import Namespace, Resource, fields
from flask import request
api = Namespace("Binance", description="Binance related APIs")
from main.services.jwt_service import JWTService
from main.services.user_service import UserService
from main.services.user_helper import UserHelper
from main.services.wallet_service import WalletService
from main.services.order_service import OrderService
from main.services.binance_service import BinanceService
from flask_jwt_extended import (
get_jwt_identity,
jwt_required,
)
@api.route("/binance/getLatestPrice")
class BinanceTicker(Resource):
"""docstring for Binance Price."""
def __init__(self, arg):
super(BinanceTicker, self).__init__(arg)
self.binance_service = BinanceService()
# @jwt_required
def get(self):
""" Get Latest Price from Binance """
response, status_code = self.binance_service.getAllTickers()
if status_code == 0:
return {"status": "success", "data": response}, 201
else:
return {"status": "error", "data": ""}, 400
binance_place_order = api.model(
"OrderModel",
{
"symbol": fields.String(description="Symbol", required=True),
"side": fields.String(description="Buy or Sell", required=True),
"price": fields.String(description="Unit Price", required=True),
"orderType": fields.String(description="Market or Limit", required=True),
"pairQuantity": fields.String(description="pairQuantity", required=True),
},
)
@api.route("/binance/placeOrder")
class BinancePlaceOrder(Resource):
"""docstring for Binance Place Order"""
def __init__(self, arg):
super(BinancePlaceOrder, self).__init__(arg)
self.jwt_service = JWTService()
self.user_service = UserService()
self.wallet_service = WalletService()
self.binance_service = BinanceService()
self.order_service = OrderService()
self.user_helper = UserHelper()
@api.expect(binance_place_order)
@jwt_required()
def post(self):
""" Binance Place Order API - - WIP DO NOT USE"""
email = get_jwt_identity()
user = self.user_helper.constructFullUserDetails(email)
if "symbol" not in request.json or request.json["symbol"] == "":
return api.abort(
400, "Symbol should not be empty.", status="error", status_code=400
)
symbol = request.json["symbol"]
if "side" not in request.json or request.json["side"] == "":
return api.abort(
400, "Side should be not be empty.", status="error", status_code=400
)
side = request.json["side"]
if "side" not in ["BUY", "SELL"]:
return api.abort(
400, "Side should be BUY Or SELL.", status="error", status_code=400
)
if "orderType" not in request.json or request.json["orderType"] == "":
return api.abort(
400, "Order Type should be not be empty.", status="error", status_code=400
)
order_type = request.json["orderType"]
if "order_type" not in ["MARKET"]:
return api.abort(
400, "Only MARKET type order is supported.", status="error", status_code=400
)
if "pairQuantity" not in request.json or request.json["pairQuantity"] == "":
return api.abort(
400, "Quantity should not be empty.", status="error", status_code=400
)
pair_quantity = request.json["pairQuantity"]
if "price" not in request.json or request.json["price"] == "":
return api.abort(
400, "Price should not be empty.", status="error", status_code=400
)
price = request.json["price"]
order_object = {
"user": user,
"symbol": symbol,
"price": price,
"pairQuantity": pair_quantity
}
response = self.order_service.place_order(order_object)
if response.status == -1:
return {"status": "failure", "message": response.message}, 400
else:
return {"status": "success", "message": response.message}, 200 | 0.78083 | 0.173813 |
from py42._compat import str
from py42._compat import UserDict
from py42._compat import UserList
from py42.clients.settings import check_lock
from py42.clients.settings import SettingProperty
from py42.clients.settings import show_change
from py42.clients.settings._converters import bool_to_str
from py42.clients.settings._converters import days_to_minutes
from py42.clients.settings._converters import minutes_to_days
from py42.clients.settings._converters import str_to_bool
from py42.exceptions import Py42Error
invalid_destination_error = Py42Error(
u"Invalid destination guid or destination not offered to device's Org."
)
destination_not_added_error = Py42Error(
u"Destination is not added to device, unable to lock."
)
class DeviceSettingsDefaults(UserDict, object):
"""Class used for managing an Organization's Device Default settings. Also acts as a
base class for `DeviceSettings` to manage individual device settings."""
def __init__(self, device_dict, org_settings):
self.data = device_dict
self._org_settings = org_settings
self.changes = org_settings.changes
self._destinations = org_settings.data[u"settings"][u"destinations"]
self.data[u"settings"] = {
u"serviceBackupConfig": self.data[u"serviceBackupConfig"]
}
bs = self.data[u"serviceBackupConfig"][u"backupConfig"][u"backupSets"]
self.backup_sets = self._extract_backup_sets(bs)
def _extract_backup_sets(self, backup_sets):
if isinstance(backup_sets, dict): # number of sets are locked
backup_sets = backup_sets["backupSet"]
if isinstance(backup_sets, dict): # there's only one set configured
return [BackupSet(self, backup_sets)]
elif isinstance(backup_sets, list):
return [BackupSet(self, bs) for bs in backup_sets]
else:
raise Py42Error("Unable to extract backup sets: {}".format(backup_sets))
else:
return [BackupSet(self, bs) for bs in backup_sets]
@property
def available_destinations(self):
"""Returns a dict of destinations available to be used by devices. Dict keys are
destination guids and values are destination names.
"""
return {d[u"guid"]: d[u"destinationName"] for d in self._destinations}
warning_email_enabled = SettingProperty(
name=u"warning_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"warningEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "warning" threshold email alerts are configured for this device."""
critical_email_enabled = SettingProperty(
name=u"critical_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"severeEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "critical" threshold email alerts are configured for this device."""
warning_alert_days = SettingProperty(
name=u"warning_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilWarning"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before
"warning" alert threshold is passed.
"""
critical_alert_days = SettingProperty(
name=u"critical_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilSevere"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before "warning"
alert threshold is passed.
"""
backup_status_email_enabled = SettingProperty(
name=u"backup_status_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"backupStatusEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if the regularly scheduled backup status email is enabled."""
backup_status_email_frequency_days = SettingProperty(
name=u"backup_status_email_frequency_days",
location=[
u"settings",
u"serviceBackupConfig",
u"backupStatusEmailFreqInMinutes",
],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""Determines the frequency of the regularly scheduled backup status email."""
def __repr__(self):
return u"<DeviceSettingsDefaults: org_id: {}>".format(self._org_settings.org_id)
class DeviceSettings(DeviceSettingsDefaults):
"""Class used to manage an individual device's settings."""
def __init__(self, device_dict):
self.changes = {}
self.data = device_dict
self._destinations = device_dict[u"availableDestinations"]
bs = self.data[u"settings"][u"serviceBackupConfig"][u"backupConfig"][
u"backupSets"
]
self.backup_sets = self._extract_backup_sets(bs)
"""List of :class:`BackupSet` objects used to manage this device's backup set configurations."""
@property
def computer_id(self):
"""Identifier of this device. Read-only."""
return self.data[u"computerId"]
@property
def device_id(self):
"""Identifier of this device (alias of `.computer_id`). Read only."""
return self.computer_id
@property
def guid(self):
"""Globally unique identifier of this device. Read-only."""
return self.data[u"guid"]
@property
def org_id(self):
"""Identifier of the organization this device belongs to. Read-only."""
return self.data[u"orgId"]
@property
def user_id(self):
"""Identifier of the user this device belongs to. Read-only."""
return self.data[u"userId"]
@property
def version(self):
"""Latest reported Code42 client version number for this device. Read-only."""
return self.data[u"version"]
name = SettingProperty(name=u"name", location=[u"name"])
"""Name for this device."""
external_reference = SettingProperty(
name=u"external_reference", location=[u"computerExtRef"]
)
"""External reference field for this device."""
notes = SettingProperty(name=u"notes", location=[u"notes"])
"""Notes field for this device."""
def __repr__(self):
return u"<DeviceSettings: guid: {}, name: {}>".format(
self.data[u"guid"], self.data[u"name"]
)
class BackupSet(UserDict, object):
"""Helper class for managing device backup sets and Org device default backup sets."""
def __init__(self, settings_manager, backup_set_dict):
self._manager = settings_manager
self._changes = settings_manager.changes
self.data = backup_set_dict
includes, excludes = self._extract_file_selection_lists()
regex_excludes = self._extract_regex_exclusions()
self._included_files = TrackedFileSelectionList(
self, u"included_files", includes, self._changes
)
self._excluded_files = TrackedFileSelectionList(
self, u"excluded_files", excludes, self._changes
)
self._filename_exclusions = TrackedFileSelectionList(
self, u"filename_exclusions", regex_excludes, self._changes
)
self._orig_destinations = self.destinations
def _extract_file_selection_lists(self):
"""Converts the file selection portion of the settings dict ("pathset") into two
lists of just paths, `included` and `excluded`.
The "pathset" object is a different shape depending on how many paths it
contains and whether its locked or not:
No paths: `[{"@cleared": "true", "@os": "Linux"}]`
No paths locked: `{'@locked': 'true', 'paths': {'@cleared': 'true', '@os': 'Linux'}}`
One path: `[{"path": {"@include": "C:/"}, "@os": "Linux"}]`
One path locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': {'@include': 'C:/'}}}`
One+ paths: `[{"path": [{"@include": "C:/Users/"},{"@exclude": "C:/Users/Admin/"},],"@os": "Linux"}]`
One+ paths locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': [{'@include': 'C:/Users/'}, {'@exclude': 'C:/Users/Admin/'}]}}`
"""
pathset = self.data[u"backupPaths"][u"pathset"]
if isinstance(pathset, dict): # pathset is locked
path_list = pathset[u"paths"].get(u"path")
else:
path_list = pathset[0].get(u"path")
# no paths selected
if path_list is None:
return [], []
# one path selected
if isinstance(path_list, dict):
path_list = [path_list]
includes = [p[u"@include"] for p in path_list if u"@include" in p]
excludes = [p[u"@exclude"] for p in path_list if u"@exclude" in p]
return includes, excludes
def _extract_regex_exclusions(self):
"""Converts the filename exclusion portion ("excludeUser") of the settings dict
into a simple list of regex patterns.
The "excludeUser" object is a different shape based on the number of exclusion
patterns present and whether the setting is locked or not:
No exclusions: `[{"windows": [], "linux": [], "macintosh": []}]`
No exclusions locked: `{'@locked': 'true', 'patternList': {'windows': [], 'macintosh': [], 'linux': []}}`
One exclusion: `[{"windows": [], "pattern": {"@regex": ".*"}, "linux": [], "macintosh": []}]`
One exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': {'@regex': '.*'}, 'windows': [], 'macintosh': [], 'linux': []}}`
One+ exclusions: `[{"windows": [], "pattern": [{"@regex": ".*1"}, {"@regex": ".*2"}],"linux": [],"macintosh": []}]
One+ exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': [{'@regex': '.*1'}, {'@regex': '.*2'}], 'windows': [], 'macintosh': [], 'linux': []}}`
"""
exclude_user = self.data[u"backupPaths"][u"excludeUser"]
if isinstance(exclude_user, dict): # exclusions are locked
pattern_list = exclude_user[u"patternList"].get(u"pattern")
else:
pattern_list = exclude_user[0].get(u"pattern")
if not pattern_list:
return []
if isinstance(pattern_list, dict):
pattern_list = [pattern_list]
return [p[u"@regex"] for p in pattern_list]
def _build_file_selection(self):
"""Converts the user-friendly lists of included and excluded file paths back
into a "pathset" object the api expects. Called whenever one of the file selection
property lists (`.included_files`, `.excluded_files`) is modified.
"""
paths = {u"@os": u"Linux", u"path": []}
if not self._included_files: # ignore excluded values if nothing is included
paths[u"@cleared"] = u"true"
else:
path_list = []
for path in self._included_files:
path_list.append({u"@include": path, u"@und": u"false"})
for path in self._excluded_files:
path_list.append({u"@exclude": path, u"@und": u"false"})
paths[u"path"] = path_list
paths[u"@cleared"] = u"false"
self.data[u"backupPaths"][u"pathset"] = {u"paths": paths}
def _build_regex_exclusions(self):
"""Converts the user-friendly list of filename exclusions back into the
"excludeUser" object the api expects. Called whenever the `.filename_exclusions`
property list is modified.
"""
patterns = []
for regex in self._filename_exclusions:
patterns.append({u"@regex": regex})
user_exclude_dict = {
u"patternList": {
u"pattern": patterns,
u"windows": {u"pattern": []},
u"macintosh": {u"pattern": []},
u"linux": {u"pattern": []},
}
}
self.data[u"backupPaths"][u"excludeUser"] = user_exclude_dict
@property
def locked(self):
"""Indicates whether the backup set as a whole is locked. If True, individual
settings for this backup set (except for Destination settings), cannot be modified.
"""
return u"@locked" in self.data and str_to_bool(self.data[u"@locked"])
@property
def included_files(self):
"""Returns the list of files/folders included in the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._included_files
@included_files.setter
def included_files(self, value):
if isinstance(value, (list, tuple)):
self._included_files.clear()
self._included_files.extend(value)
else:
raise AttributeError(u"included files must be a list/tuple.")
@property
def excluded_files(self):
"""Returns the list of files/folders excluded from the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._excluded_files
@excluded_files.setter
def excluded_files(self, value):
if isinstance(value, (list, tuple)):
self._excluded_files.clear()
self._excluded_files.extend(value)
else:
raise AttributeError(u"excluded files must be a list/tuple.")
@property
def filename_exclusions(self):
"""Returns the list of regex patterns used to exclude file paths from the backup
selection. Items can be added/removed from this list via normal list methods,
or assigning a new list of patterns to this attribute to replace the existing
one.
"""
return self._filename_exclusions
@filename_exclusions.setter
def filename_exclusions(self, value):
if isinstance(value, (list, tuple)):
self._filename_exclusions.clear()
self._filename_exclusions.extend(value)
else:
raise AttributeError(u"filename exclusions must be a list/tuple.")
@property
def destinations(self):
"""Returns a dict of the destinations used for backup for the backup set. Dict
keys are the destination guids, values are the destination names.
"""
destination_dict = {}
if u"@cleared" in self.data[u"destinations"]:
return destination_dict
for d in self.data[u"destinations"]:
guid = d[u"@id"]
dest_name = self._manager.available_destinations[guid]
if u"@locked" in d:
dest_name = dest_name + u" <LOCKED>"
destination_dict[guid] = dest_name
return destination_dict
def add_destination(self, destination_guid):
"""Adds a destination to be used by this backup set. Raises a :class:`Py42Error` if
the supplied destination guid is not available to the parent device/org.
Args:
destination_guid (str, int): The globally unique identifier of the
destination to be added.
"""
destination_guid = str(destination_guid)
if destination_guid in self._manager.available_destinations:
if destination_guid not in self.destinations:
if not self.destinations: # no destinations
self.data[u"destinations"] = [{u"@id": destination_guid}]
else:
self.data[u"destinations"].append({u"@id": destination_guid})
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
else:
raise invalid_destination_error
def remove_destination(self, destination_guid):
"""Removes a destination from use by this backup set.
Args:
destination_guid (str, int): The globally unique identifier of the
destination to be removed.
"""
destination_guid = str(destination_guid)
self._raise_if_invalid_destination(destination_guid)
if destination_guid in self.destinations:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
self.data[u"destinations"].remove(d)
if not self.data[u"destinations"]: # all destinations removed
self.data[u"destinations"] = {u"@cleared": u"true"}
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
def lock_destination(self, destination_guid):
"""Locks an in-use destination, disallowing the device owner from removing this
destination from their backup. Raises a :class:`Py42Error` if the supplied destination
guid is not in use on this backup set, or not available to the parent device/org.
"""
destination_guid = str(destination_guid)
if destination_guid in self._manager.available_destinations:
if destination_guid not in self.destinations:
raise destination_not_added_error
else:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
d[u"@locked"] = u"true"
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
else:
raise invalid_destination_error
def unlock_destination(self, destination_guid):
"""Unlocks an in-use destination, allowing the device owner to remove this
destination from their backup. Raises a :class:`Py42Error` if the supplied destination
guid is not in use on this backup set, or not available to the parent device/org.
"""
destination_guid = str(destination_guid)
self._raise_if_invalid_destination(destination_guid)
if destination_guid not in self.destinations:
raise destination_not_added_error
else:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
del d[u"@locked"]
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
def _raise_if_invalid_destination(self, destination_guid):
if destination_guid not in self._manager.available_destinations:
raise invalid_destination_error
def __repr__(self):
if isinstance(self.data[u"name"], dict): # name setting locked
name = self.data[u"name"][u"#text"]
else:
name = self.data[u"name"]
return u"<BackupSet: id: {}, name: '{}'>".format(self.data[u"@id"], name)
def __str__(self):
return str(dict(self))
class TrackedFileSelectionList(UserList, object):
"""Helper class to track modifications to file selection lists."""
def __init__(self, backup_set, name, _list, changes_dict):
self.backup_set = backup_set
self.name = name
self.orig = list(_list)
self.data = _list
self._changes = changes_dict
def register_change(self):
self.backup_set._build_file_selection()
self.backup_set._build_regex_exclusions()
if set(self.orig) != set(self.data):
self._changes[self.name] = show_change(self.orig, self.data)
elif self.name in self._changes:
del self._changes[self.name]
@check_lock("backup_set")
def append(self, item):
self.data.append(item)
self.register_change()
@check_lock("backup_set")
def clear(self):
self.data.clear()
self.register_change()
@check_lock("backup_set")
def extend(self, other):
self.data.extend(other)
self.register_change()
@check_lock("backup_set")
def insert(self, i, item):
self.data.insert(i, item)
self.register_change()
@check_lock("backup_set")
def pop(self, index=-1):
value = self.data.pop(index)
self.register_change()
return value
@check_lock("backup_set")
def remove(self, value):
self.data.remove(value)
self.register_change() | src/py42/clients/settings/device_settings.py | from py42._compat import str
from py42._compat import UserDict
from py42._compat import UserList
from py42.clients.settings import check_lock
from py42.clients.settings import SettingProperty
from py42.clients.settings import show_change
from py42.clients.settings._converters import bool_to_str
from py42.clients.settings._converters import days_to_minutes
from py42.clients.settings._converters import minutes_to_days
from py42.clients.settings._converters import str_to_bool
from py42.exceptions import Py42Error
invalid_destination_error = Py42Error(
u"Invalid destination guid or destination not offered to device's Org."
)
destination_not_added_error = Py42Error(
u"Destination is not added to device, unable to lock."
)
class DeviceSettingsDefaults(UserDict, object):
"""Class used for managing an Organization's Device Default settings. Also acts as a
base class for `DeviceSettings` to manage individual device settings."""
def __init__(self, device_dict, org_settings):
self.data = device_dict
self._org_settings = org_settings
self.changes = org_settings.changes
self._destinations = org_settings.data[u"settings"][u"destinations"]
self.data[u"settings"] = {
u"serviceBackupConfig": self.data[u"serviceBackupConfig"]
}
bs = self.data[u"serviceBackupConfig"][u"backupConfig"][u"backupSets"]
self.backup_sets = self._extract_backup_sets(bs)
def _extract_backup_sets(self, backup_sets):
if isinstance(backup_sets, dict): # number of sets are locked
backup_sets = backup_sets["backupSet"]
if isinstance(backup_sets, dict): # there's only one set configured
return [BackupSet(self, backup_sets)]
elif isinstance(backup_sets, list):
return [BackupSet(self, bs) for bs in backup_sets]
else:
raise Py42Error("Unable to extract backup sets: {}".format(backup_sets))
else:
return [BackupSet(self, bs) for bs in backup_sets]
@property
def available_destinations(self):
"""Returns a dict of destinations available to be used by devices. Dict keys are
destination guids and values are destination names.
"""
return {d[u"guid"]: d[u"destinationName"] for d in self._destinations}
warning_email_enabled = SettingProperty(
name=u"warning_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"warningEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "warning" threshold email alerts are configured for this device."""
critical_email_enabled = SettingProperty(
name=u"critical_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"severeEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "critical" threshold email alerts are configured for this device."""
warning_alert_days = SettingProperty(
name=u"warning_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilWarning"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before
"warning" alert threshold is passed.
"""
critical_alert_days = SettingProperty(
name=u"critical_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilSevere"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before "warning"
alert threshold is passed.
"""
backup_status_email_enabled = SettingProperty(
name=u"backup_status_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"backupStatusEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if the regularly scheduled backup status email is enabled."""
backup_status_email_frequency_days = SettingProperty(
name=u"backup_status_email_frequency_days",
location=[
u"settings",
u"serviceBackupConfig",
u"backupStatusEmailFreqInMinutes",
],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""Determines the frequency of the regularly scheduled backup status email."""
def __repr__(self):
return u"<DeviceSettingsDefaults: org_id: {}>".format(self._org_settings.org_id)
class DeviceSettings(DeviceSettingsDefaults):
"""Class used to manage an individual device's settings."""
def __init__(self, device_dict):
self.changes = {}
self.data = device_dict
self._destinations = device_dict[u"availableDestinations"]
bs = self.data[u"settings"][u"serviceBackupConfig"][u"backupConfig"][
u"backupSets"
]
self.backup_sets = self._extract_backup_sets(bs)
"""List of :class:`BackupSet` objects used to manage this device's backup set configurations."""
@property
def computer_id(self):
"""Identifier of this device. Read-only."""
return self.data[u"computerId"]
@property
def device_id(self):
"""Identifier of this device (alias of `.computer_id`). Read only."""
return self.computer_id
@property
def guid(self):
"""Globally unique identifier of this device. Read-only."""
return self.data[u"guid"]
@property
def org_id(self):
"""Identifier of the organization this device belongs to. Read-only."""
return self.data[u"orgId"]
@property
def user_id(self):
"""Identifier of the user this device belongs to. Read-only."""
return self.data[u"userId"]
@property
def version(self):
"""Latest reported Code42 client version number for this device. Read-only."""
return self.data[u"version"]
name = SettingProperty(name=u"name", location=[u"name"])
"""Name for this device."""
external_reference = SettingProperty(
name=u"external_reference", location=[u"computerExtRef"]
)
"""External reference field for this device."""
notes = SettingProperty(name=u"notes", location=[u"notes"])
"""Notes field for this device."""
def __repr__(self):
return u"<DeviceSettings: guid: {}, name: {}>".format(
self.data[u"guid"], self.data[u"name"]
)
class BackupSet(UserDict, object):
"""Helper class for managing device backup sets and Org device default backup sets."""
def __init__(self, settings_manager, backup_set_dict):
self._manager = settings_manager
self._changes = settings_manager.changes
self.data = backup_set_dict
includes, excludes = self._extract_file_selection_lists()
regex_excludes = self._extract_regex_exclusions()
self._included_files = TrackedFileSelectionList(
self, u"included_files", includes, self._changes
)
self._excluded_files = TrackedFileSelectionList(
self, u"excluded_files", excludes, self._changes
)
self._filename_exclusions = TrackedFileSelectionList(
self, u"filename_exclusions", regex_excludes, self._changes
)
self._orig_destinations = self.destinations
def _extract_file_selection_lists(self):
"""Converts the file selection portion of the settings dict ("pathset") into two
lists of just paths, `included` and `excluded`.
The "pathset" object is a different shape depending on how many paths it
contains and whether its locked or not:
No paths: `[{"@cleared": "true", "@os": "Linux"}]`
No paths locked: `{'@locked': 'true', 'paths': {'@cleared': 'true', '@os': 'Linux'}}`
One path: `[{"path": {"@include": "C:/"}, "@os": "Linux"}]`
One path locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': {'@include': 'C:/'}}}`
One+ paths: `[{"path": [{"@include": "C:/Users/"},{"@exclude": "C:/Users/Admin/"},],"@os": "Linux"}]`
One+ paths locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': [{'@include': 'C:/Users/'}, {'@exclude': 'C:/Users/Admin/'}]}}`
"""
pathset = self.data[u"backupPaths"][u"pathset"]
if isinstance(pathset, dict): # pathset is locked
path_list = pathset[u"paths"].get(u"path")
else:
path_list = pathset[0].get(u"path")
# no paths selected
if path_list is None:
return [], []
# one path selected
if isinstance(path_list, dict):
path_list = [path_list]
includes = [p[u"@include"] for p in path_list if u"@include" in p]
excludes = [p[u"@exclude"] for p in path_list if u"@exclude" in p]
return includes, excludes
def _extract_regex_exclusions(self):
"""Converts the filename exclusion portion ("excludeUser") of the settings dict
into a simple list of regex patterns.
The "excludeUser" object is a different shape based on the number of exclusion
patterns present and whether the setting is locked or not:
No exclusions: `[{"windows": [], "linux": [], "macintosh": []}]`
No exclusions locked: `{'@locked': 'true', 'patternList': {'windows': [], 'macintosh': [], 'linux': []}}`
One exclusion: `[{"windows": [], "pattern": {"@regex": ".*"}, "linux": [], "macintosh": []}]`
One exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': {'@regex': '.*'}, 'windows': [], 'macintosh': [], 'linux': []}}`
One+ exclusions: `[{"windows": [], "pattern": [{"@regex": ".*1"}, {"@regex": ".*2"}],"linux": [],"macintosh": []}]
One+ exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': [{'@regex': '.*1'}, {'@regex': '.*2'}], 'windows': [], 'macintosh': [], 'linux': []}}`
"""
exclude_user = self.data[u"backupPaths"][u"excludeUser"]
if isinstance(exclude_user, dict): # exclusions are locked
pattern_list = exclude_user[u"patternList"].get(u"pattern")
else:
pattern_list = exclude_user[0].get(u"pattern")
if not pattern_list:
return []
if isinstance(pattern_list, dict):
pattern_list = [pattern_list]
return [p[u"@regex"] for p in pattern_list]
def _build_file_selection(self):
"""Converts the user-friendly lists of included and excluded file paths back
into a "pathset" object the api expects. Called whenever one of the file selection
property lists (`.included_files`, `.excluded_files`) is modified.
"""
paths = {u"@os": u"Linux", u"path": []}
if not self._included_files: # ignore excluded values if nothing is included
paths[u"@cleared"] = u"true"
else:
path_list = []
for path in self._included_files:
path_list.append({u"@include": path, u"@und": u"false"})
for path in self._excluded_files:
path_list.append({u"@exclude": path, u"@und": u"false"})
paths[u"path"] = path_list
paths[u"@cleared"] = u"false"
self.data[u"backupPaths"][u"pathset"] = {u"paths": paths}
def _build_regex_exclusions(self):
"""Converts the user-friendly list of filename exclusions back into the
"excludeUser" object the api expects. Called whenever the `.filename_exclusions`
property list is modified.
"""
patterns = []
for regex in self._filename_exclusions:
patterns.append({u"@regex": regex})
user_exclude_dict = {
u"patternList": {
u"pattern": patterns,
u"windows": {u"pattern": []},
u"macintosh": {u"pattern": []},
u"linux": {u"pattern": []},
}
}
self.data[u"backupPaths"][u"excludeUser"] = user_exclude_dict
@property
def locked(self):
"""Indicates whether the backup set as a whole is locked. If True, individual
settings for this backup set (except for Destination settings), cannot be modified.
"""
return u"@locked" in self.data and str_to_bool(self.data[u"@locked"])
@property
def included_files(self):
"""Returns the list of files/folders included in the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._included_files
@included_files.setter
def included_files(self, value):
if isinstance(value, (list, tuple)):
self._included_files.clear()
self._included_files.extend(value)
else:
raise AttributeError(u"included files must be a list/tuple.")
@property
def excluded_files(self):
"""Returns the list of files/folders excluded from the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._excluded_files
@excluded_files.setter
def excluded_files(self, value):
if isinstance(value, (list, tuple)):
self._excluded_files.clear()
self._excluded_files.extend(value)
else:
raise AttributeError(u"excluded files must be a list/tuple.")
@property
def filename_exclusions(self):
"""Returns the list of regex patterns used to exclude file paths from the backup
selection. Items can be added/removed from this list via normal list methods,
or assigning a new list of patterns to this attribute to replace the existing
one.
"""
return self._filename_exclusions
@filename_exclusions.setter
def filename_exclusions(self, value):
if isinstance(value, (list, tuple)):
self._filename_exclusions.clear()
self._filename_exclusions.extend(value)
else:
raise AttributeError(u"filename exclusions must be a list/tuple.")
@property
def destinations(self):
"""Returns a dict of the destinations used for backup for the backup set. Dict
keys are the destination guids, values are the destination names.
"""
destination_dict = {}
if u"@cleared" in self.data[u"destinations"]:
return destination_dict
for d in self.data[u"destinations"]:
guid = d[u"@id"]
dest_name = self._manager.available_destinations[guid]
if u"@locked" in d:
dest_name = dest_name + u" <LOCKED>"
destination_dict[guid] = dest_name
return destination_dict
def add_destination(self, destination_guid):
"""Adds a destination to be used by this backup set. Raises a :class:`Py42Error` if
the supplied destination guid is not available to the parent device/org.
Args:
destination_guid (str, int): The globally unique identifier of the
destination to be added.
"""
destination_guid = str(destination_guid)
if destination_guid in self._manager.available_destinations:
if destination_guid not in self.destinations:
if not self.destinations: # no destinations
self.data[u"destinations"] = [{u"@id": destination_guid}]
else:
self.data[u"destinations"].append({u"@id": destination_guid})
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
else:
raise invalid_destination_error
def remove_destination(self, destination_guid):
"""Removes a destination from use by this backup set.
Args:
destination_guid (str, int): The globally unique identifier of the
destination to be removed.
"""
destination_guid = str(destination_guid)
self._raise_if_invalid_destination(destination_guid)
if destination_guid in self.destinations:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
self.data[u"destinations"].remove(d)
if not self.data[u"destinations"]: # all destinations removed
self.data[u"destinations"] = {u"@cleared": u"true"}
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
def lock_destination(self, destination_guid):
"""Locks an in-use destination, disallowing the device owner from removing this
destination from their backup. Raises a :class:`Py42Error` if the supplied destination
guid is not in use on this backup set, or not available to the parent device/org.
"""
destination_guid = str(destination_guid)
if destination_guid in self._manager.available_destinations:
if destination_guid not in self.destinations:
raise destination_not_added_error
else:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
d[u"@locked"] = u"true"
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
else:
raise invalid_destination_error
def unlock_destination(self, destination_guid):
"""Unlocks an in-use destination, allowing the device owner to remove this
destination from their backup. Raises a :class:`Py42Error` if the supplied destination
guid is not in use on this backup set, or not available to the parent device/org.
"""
destination_guid = str(destination_guid)
self._raise_if_invalid_destination(destination_guid)
if destination_guid not in self.destinations:
raise destination_not_added_error
else:
for d in self.data[u"destinations"]:
if d[u"@id"] == destination_guid:
del d[u"@locked"]
self._changes[u"destinations"] = show_change(
self._orig_destinations, self.destinations
)
def _raise_if_invalid_destination(self, destination_guid):
if destination_guid not in self._manager.available_destinations:
raise invalid_destination_error
def __repr__(self):
if isinstance(self.data[u"name"], dict): # name setting locked
name = self.data[u"name"][u"#text"]
else:
name = self.data[u"name"]
return u"<BackupSet: id: {}, name: '{}'>".format(self.data[u"@id"], name)
def __str__(self):
return str(dict(self))
class TrackedFileSelectionList(UserList, object):
"""Helper class to track modifications to file selection lists."""
def __init__(self, backup_set, name, _list, changes_dict):
self.backup_set = backup_set
self.name = name
self.orig = list(_list)
self.data = _list
self._changes = changes_dict
def register_change(self):
self.backup_set._build_file_selection()
self.backup_set._build_regex_exclusions()
if set(self.orig) != set(self.data):
self._changes[self.name] = show_change(self.orig, self.data)
elif self.name in self._changes:
del self._changes[self.name]
@check_lock("backup_set")
def append(self, item):
self.data.append(item)
self.register_change()
@check_lock("backup_set")
def clear(self):
self.data.clear()
self.register_change()
@check_lock("backup_set")
def extend(self, other):
self.data.extend(other)
self.register_change()
@check_lock("backup_set")
def insert(self, i, item):
self.data.insert(i, item)
self.register_change()
@check_lock("backup_set")
def pop(self, index=-1):
value = self.data.pop(index)
self.register_change()
return value
@check_lock("backup_set")
def remove(self, value):
self.data.remove(value)
self.register_change() | 0.875521 | 0.169681 |
from __future__ import absolute_import, division, print_function
import random
import numpy as np
from glue.core import Data
from glue.logger import logger
from glue.core.layer_artist import LayerArtistBase
from glue.core.exceptions import IncompatibleAttribute
from glue.external.echo import CallbackProperty, keep_in_sync
from astropy import units as u
from astropy.coordinates import SkyCoord
from .state import WWTLayerState
__all__ = ['WWTLayer']
class WWTLayer(LayerArtistBase):
zorder = CallbackProperty()
visible = CallbackProperty()
def __init__(self, wwt_widget, viewer_state, layer_state=None, layer=None):
super(WWTLayer, self).__init__(layer)
self.layer = layer or layer_state.layer
self._wwt_widget = wwt_widget
self._viewer_state = viewer_state
# Set up a state object for the layer artist
self.state = layer_state or WWTLayerState(viewer_state=viewer_state,
layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
self.layer_id = "{0:08x}".format(random.getrandbits(32))
self.markers = self._wwt_widget.markers
self.markers.allocate(self.layer_id)
self.zorder = self.state.zorder
self.visible = self.state.visible
self._sync_zorder = keep_in_sync(self, 'zorder', self.state, 'zorder')
self._sync_visible = keep_in_sync(self, 'visible', self.state, 'visible')
self.state.add_global_callback(self.update)
self.update(force=True)
def clear(self):
self.markers.set(self.layer_id, visible=False)
def update(self, force=False, **kwargs):
logger.debug("updating WWT for %s" % self.layer.label)
coords = {}
if force or 'ra_att' in kwargs or 'dec_att' in kwargs:
try:
ra = self.layer[self.state.ra_att]
except IncompatibleAttribute:
self.disable_invalid_attributes(self.state.ra_att)
return
try:
dec = self.layer[self.state.dec_att]
except IncompatibleAttribute:
self.disable_invalid_attributes(self.state.dec_att)
return
if len(ra) > 0:
try:
coord = SkyCoord(ra, dec, unit=(u.deg, u.deg))
except ValueError as exc:
# self.disable(str(exc))
return
coord_icrs = coord.icrs
ra = coord_icrs.ra.degree
dec = coord_icrs.dec.degree
coords = {'coords': (ra, dec)}
self.enable()
self.markers.set(self.layer_id, color=self.state.color,
alpha=self.state.alpha, visible=self.visible,
zorder=self.zorder, size=self.state.size, **coords)
def center(self, *args):
self.markers.center(self.layer_id)
def redraw(self):
pass | glue_wwt/viewer/layer_artist.py | from __future__ import absolute_import, division, print_function
import random
import numpy as np
from glue.core import Data
from glue.logger import logger
from glue.core.layer_artist import LayerArtistBase
from glue.core.exceptions import IncompatibleAttribute
from glue.external.echo import CallbackProperty, keep_in_sync
from astropy import units as u
from astropy.coordinates import SkyCoord
from .state import WWTLayerState
__all__ = ['WWTLayer']
class WWTLayer(LayerArtistBase):
zorder = CallbackProperty()
visible = CallbackProperty()
def __init__(self, wwt_widget, viewer_state, layer_state=None, layer=None):
super(WWTLayer, self).__init__(layer)
self.layer = layer or layer_state.layer
self._wwt_widget = wwt_widget
self._viewer_state = viewer_state
# Set up a state object for the layer artist
self.state = layer_state or WWTLayerState(viewer_state=viewer_state,
layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
self.layer_id = "{0:08x}".format(random.getrandbits(32))
self.markers = self._wwt_widget.markers
self.markers.allocate(self.layer_id)
self.zorder = self.state.zorder
self.visible = self.state.visible
self._sync_zorder = keep_in_sync(self, 'zorder', self.state, 'zorder')
self._sync_visible = keep_in_sync(self, 'visible', self.state, 'visible')
self.state.add_global_callback(self.update)
self.update(force=True)
def clear(self):
self.markers.set(self.layer_id, visible=False)
def update(self, force=False, **kwargs):
logger.debug("updating WWT for %s" % self.layer.label)
coords = {}
if force or 'ra_att' in kwargs or 'dec_att' in kwargs:
try:
ra = self.layer[self.state.ra_att]
except IncompatibleAttribute:
self.disable_invalid_attributes(self.state.ra_att)
return
try:
dec = self.layer[self.state.dec_att]
except IncompatibleAttribute:
self.disable_invalid_attributes(self.state.dec_att)
return
if len(ra) > 0:
try:
coord = SkyCoord(ra, dec, unit=(u.deg, u.deg))
except ValueError as exc:
# self.disable(str(exc))
return
coord_icrs = coord.icrs
ra = coord_icrs.ra.degree
dec = coord_icrs.dec.degree
coords = {'coords': (ra, dec)}
self.enable()
self.markers.set(self.layer_id, color=self.state.color,
alpha=self.state.alpha, visible=self.visible,
zorder=self.zorder, size=self.state.size, **coords)
def center(self, *args):
self.markers.center(self.layer_id)
def redraw(self):
pass | 0.680135 | 0.115536 |
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from SRNN_layers.spike_neuron import mem_update_adp
b_j0 = 1.6
class spike_cov2D(nn.Module):
def __init__(self,
input_size,output_dim, kernel_size=5,strides=1,
pooling_type = None,pool_size = 2, pool_strides =2,
tauM = 20,tauAdp_inital =100, tau_initializer = 'normal',tauM_inital_std = 5,tauAdp_inital_std = 5,
is_adaptive=1,device='cpu'):
super(spike_cov2D, self).__init__()
# input_size = [c,w,h]
self.input_size = input_size
self.input_dim = input_size[0]
self.output_dim = output_dim
self.is_adaptive = is_adaptive
self.device = device
if pooling_type is not None:
if pooling_type =='max':
self.pooling = nn.MaxPool2d(kernel_size=pool_size, stride=pool_strides, padding=1)
elif pooling_type =='avg':
self.pooling = nn.AvgPool2d(kernel_size=pool_size, stride=pool_strides, padding=1)
else:
self.pooling = None
self.conv= nn.Conv2d(input_dim,output_dim,kernel_size=kernel_size,stride=strides)
self.output_size = self.compute_output_size()
self.tau_m = nn.Parameter(torch.Tensor(self.output_size))
self.tau_adp = nn.Parameter(torch.Tensor(self.output_size))
if tau_initializer == 'normal':
nn.init.normal_(self.tau_m,tauM,tauM_inital_std)
nn.init.normal_(self.tau_adp,tauAdp_inital,tauAdp_inital_std)
def parameters(self):
return [self.dense.weight,self.dense.bias,self.tau_m,self.tau_adp]
def set_neuron_state(self,batch_size):
self.mem = torch.rand(batch_size,self.output_size).to(self.device)
self.spike = torch.zeros(batch_size,self.output_size).to(self.device)
self.b = (torch.ones(batch_size,self.output_size)*b_j0).to(self.device)
def forward(self,input_spike):
d_input = self.conv(input_spike.float())
if self.pooling is not None:
d_input = self.pool(d_input)
self.mem,self.spike,theta,self.b = mem_update_adp(d_input,self.mem,self.spike,self.tau_adp,self.b,self.tau_m,device=self.device,isAdapt=self.is_adaptive)
return self.mem,self.spike
def compute_output_size(self):
x_emp = torch.randn([1,self.input_size[0],self.input_size[1],self.input_size[2]])
out = self.conv(x_emp)
if self.pooling is not None: out=self.pooling(out)
# print(self.name+'\'s size: ', out.shape[1:])
return out.shape[1:]
class spike_cov1D(nn.Module):
def __init__(self,
input_size,output_dim, kernel_size=5,strides=1,
pooling_type = None,pool_size = 2, pool_strides =2,dilation=1,
tauM = 20,tauAdp_inital =100, tau_initializer = 'normal',tauM_inital_std = 5,tauAdp_inital_std = 5,
is_adaptive=1,device='cpu'):
super(spike_cov1D, self).__init__()
# input_size = [c,h]
self.input_size = input_size
self.input_dim = input_size[0]
self.output_dim = output_dim
self.is_adaptive = is_adaptive
self.dilation = dilation
self.device = device
if pooling_type is not None:
if pooling_type =='max':
self.pooling = nn.MaxPool1d(kernel_size=pool_size, stride=pool_strides, padding=1)
elif pooling_type =='avg':
self.pooling = nn.AvgPool1d(kernel_size=pool_size, stride=pool_strides, padding=1)
else:
self.pooling = None
self.conv= nn.Conv1d(self.input_dim,self.output_dim,kernel_size=kernel_size,stride=strides,
padding=(np.ceil(((kernel_size-1)*self.dilation)/2).astype(int),),
dilation=(self.dilation,))
self.output_size = self.compute_output_size()
self.tau_m = nn.Parameter(torch.Tensor(self.output_size))
self.tau_adp = nn.Parameter(torch.Tensor(self.output_size))
if tau_initializer == 'normal':
nn.init.normal_(self.tau_m,tauM,tauM_inital_std)
nn.init.normal_(self.tau_adp,tauAdp_inital,tauAdp_inital_std)
def parameters(self):
return [self.dense.weight,self.dense.bias,self.tau_m,self.tau_adp]
def set_neuron_state(self,batch_size):
self.mem = (torch.zeros(batch_size,self.output_size[0],self.output_size[1])*b_j0).to(self.device)
self.spike = torch.zeros(batch_size,self.output_size[0],self.output_size[1]).to(self.device)
self.b = (torch.ones(batch_size,self.output_size[0],self.output_size[1])*b_j0).to(self.device)
def forward(self,input_spike):
d_input = self.conv(input_spike.float())
if self.pooling is not None:
d_input = self.pooling(d_input)
self.mem,self.spike,theta,self.b = mem_update_adp(d_input,self.mem,self.spike,self.tau_adp,self.b,self.tau_m,device=self.device,isAdapt=self.is_adaptive)
return self.mem,self.spike
def compute_output_size(self):
x_emp = torch.randn([1,self.input_size[0],self.input_size[1]])
out = self.conv(x_emp)
if self.pooling is not None: out=self.pooling(out)
# print(self.name+'\'s size: ', out.shape[1:])
return out.shape[1:] | GSC/SRNN_layers/spike_cnn.py | import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from SRNN_layers.spike_neuron import mem_update_adp
b_j0 = 1.6
class spike_cov2D(nn.Module):
def __init__(self,
input_size,output_dim, kernel_size=5,strides=1,
pooling_type = None,pool_size = 2, pool_strides =2,
tauM = 20,tauAdp_inital =100, tau_initializer = 'normal',tauM_inital_std = 5,tauAdp_inital_std = 5,
is_adaptive=1,device='cpu'):
super(spike_cov2D, self).__init__()
# input_size = [c,w,h]
self.input_size = input_size
self.input_dim = input_size[0]
self.output_dim = output_dim
self.is_adaptive = is_adaptive
self.device = device
if pooling_type is not None:
if pooling_type =='max':
self.pooling = nn.MaxPool2d(kernel_size=pool_size, stride=pool_strides, padding=1)
elif pooling_type =='avg':
self.pooling = nn.AvgPool2d(kernel_size=pool_size, stride=pool_strides, padding=1)
else:
self.pooling = None
self.conv= nn.Conv2d(input_dim,output_dim,kernel_size=kernel_size,stride=strides)
self.output_size = self.compute_output_size()
self.tau_m = nn.Parameter(torch.Tensor(self.output_size))
self.tau_adp = nn.Parameter(torch.Tensor(self.output_size))
if tau_initializer == 'normal':
nn.init.normal_(self.tau_m,tauM,tauM_inital_std)
nn.init.normal_(self.tau_adp,tauAdp_inital,tauAdp_inital_std)
def parameters(self):
return [self.dense.weight,self.dense.bias,self.tau_m,self.tau_adp]
def set_neuron_state(self,batch_size):
self.mem = torch.rand(batch_size,self.output_size).to(self.device)
self.spike = torch.zeros(batch_size,self.output_size).to(self.device)
self.b = (torch.ones(batch_size,self.output_size)*b_j0).to(self.device)
def forward(self,input_spike):
d_input = self.conv(input_spike.float())
if self.pooling is not None:
d_input = self.pool(d_input)
self.mem,self.spike,theta,self.b = mem_update_adp(d_input,self.mem,self.spike,self.tau_adp,self.b,self.tau_m,device=self.device,isAdapt=self.is_adaptive)
return self.mem,self.spike
def compute_output_size(self):
x_emp = torch.randn([1,self.input_size[0],self.input_size[1],self.input_size[2]])
out = self.conv(x_emp)
if self.pooling is not None: out=self.pooling(out)
# print(self.name+'\'s size: ', out.shape[1:])
return out.shape[1:]
class spike_cov1D(nn.Module):
def __init__(self,
input_size,output_dim, kernel_size=5,strides=1,
pooling_type = None,pool_size = 2, pool_strides =2,dilation=1,
tauM = 20,tauAdp_inital =100, tau_initializer = 'normal',tauM_inital_std = 5,tauAdp_inital_std = 5,
is_adaptive=1,device='cpu'):
super(spike_cov1D, self).__init__()
# input_size = [c,h]
self.input_size = input_size
self.input_dim = input_size[0]
self.output_dim = output_dim
self.is_adaptive = is_adaptive
self.dilation = dilation
self.device = device
if pooling_type is not None:
if pooling_type =='max':
self.pooling = nn.MaxPool1d(kernel_size=pool_size, stride=pool_strides, padding=1)
elif pooling_type =='avg':
self.pooling = nn.AvgPool1d(kernel_size=pool_size, stride=pool_strides, padding=1)
else:
self.pooling = None
self.conv= nn.Conv1d(self.input_dim,self.output_dim,kernel_size=kernel_size,stride=strides,
padding=(np.ceil(((kernel_size-1)*self.dilation)/2).astype(int),),
dilation=(self.dilation,))
self.output_size = self.compute_output_size()
self.tau_m = nn.Parameter(torch.Tensor(self.output_size))
self.tau_adp = nn.Parameter(torch.Tensor(self.output_size))
if tau_initializer == 'normal':
nn.init.normal_(self.tau_m,tauM,tauM_inital_std)
nn.init.normal_(self.tau_adp,tauAdp_inital,tauAdp_inital_std)
def parameters(self):
return [self.dense.weight,self.dense.bias,self.tau_m,self.tau_adp]
def set_neuron_state(self,batch_size):
self.mem = (torch.zeros(batch_size,self.output_size[0],self.output_size[1])*b_j0).to(self.device)
self.spike = torch.zeros(batch_size,self.output_size[0],self.output_size[1]).to(self.device)
self.b = (torch.ones(batch_size,self.output_size[0],self.output_size[1])*b_j0).to(self.device)
def forward(self,input_spike):
d_input = self.conv(input_spike.float())
if self.pooling is not None:
d_input = self.pooling(d_input)
self.mem,self.spike,theta,self.b = mem_update_adp(d_input,self.mem,self.spike,self.tau_adp,self.b,self.tau_m,device=self.device,isAdapt=self.is_adaptive)
return self.mem,self.spike
def compute_output_size(self):
x_emp = torch.randn([1,self.input_size[0],self.input_size[1]])
out = self.conv(x_emp)
if self.pooling is not None: out=self.pooling(out)
# print(self.name+'\'s size: ', out.shape[1:])
return out.shape[1:] | 0.837238 | 0.41401 |
import logging
import os.path
from driftconfig.relib import TableStore, create_backend
from driftconfig.backends import FileBackend, S3Backend, RedisBackend
from driftconfig.config import get_drift_table_store
# tenant name:
"<org name>-<tier name>-<product name>"
'''
directivegames-superkaiju-LIVENORTH.dg-api.com
directivegames-superkaiju-DEVNORTH.dg-api.com
superkaiju.dg-api.com
directivegames-borkbork
.dg-api.com
'''
logging.basicConfig(level='INFO')
config_path = os.path.join(os.path.expanduser("~"), '.drift', 'config')
print "config_path is", config_path
# Set up backends. One on local hard drive, one on S3 and one in Redis
s3_store = S3Backend('relib-test', 'directive-games', 'eu-west-1')
s3_store = create_backend('s3://relib-test/directive-games')
redis_store = RedisBackend()
# Create an empty config
ts = get_drift_table_store()
if 0:
s3_store.load(ts)
print "whee got all the config", ts
redis_store.save(ts)
print "now i have dumped all the s3 config into redis"
local_store.save(ts)
print "its also on mny local disk hes"
config_path = os.path.join(os.path.expanduser("~"), '.drift', 'config2')
FileBackend(config_path).save(ts)
import sys
sys.exit(1)
# Load from S3
#s3_store.load(ts)
#s3_store.save(ts)
# Chuck in some data
ts.get_table('domain').add({'domain_name': 'dgnorth', 'display_name': 'Directive Games North', 'origin': 's3://relib-test/directive-games-v2?region=eu-west-1'})
ts.get_table('organizations').add({'organization_name': 'directivegames', 'display_name': 'Directive Games', })
ts.get_table('tiers').add({'tier_name': 'LIVENORTH', 'organization_name': 'directivegames', 'is_live': True})
ts.get_table('tiers').add({'tier_name': 'DEVNORTH', 'organization_name': 'directivegames', 'is_live': False})
ts.get_table('tiers').add({'tier_name': 'DEVEAST', 'organization_name': 'directivegames', 'is_live': False})
ts.get_table('products').add({'product_name': 'superkaiju', 'organization_name': 'directivegames'})
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'display_name': 'Drift Core Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'vignettes', 'display_name': 'Vignettes', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'game-server', 'display_name': 'Game Server Management', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'display_name': 'The Machines Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-admin', 'display_name': 'The Machines Admin Web', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kaleo-web', 'display_name': 'Kaleo Web', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kards-backend', 'display_name': 'Kards Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kaleometrics', 'display_name': 'Kaleo Metrics', })
ts.get_table('deployables').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'is_active': True, })
ts.get_table('deployables').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'is_active': True, })
# Configure LIVENORTH
from datetime import datetime
for tenant_name in [
'superkaiju', 'superkaiju-test', 'loadout', 'loadout-test', 'default-livenorth',
'themachines', 'themachines-test', 'themacines-test2', 'nonnib-livenorth',
]:
ts.get_table('tenant-names').add({
'tenant_name': tenant_name,
'organization_name': 'directivegames', 'product_name': 'superkaiju',
'reserved_at': datetime.utcnow().isoformat(),
'reserved_by': 'prezidentbongo',
})
ts.get_table('tenants').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'tenant_name': tenant_name,})
ts.get_table('tenants').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'tenant_name': tenant_name,})
ts.get_table('tenants').add({'tier_name': 'DEVNORTH', 'deployable_name': 'drift-base', 'tenant_name': tenant_name,})
# Store locally and cache in Redis
domain_name = ts.get_table('domain')['domain_name']
print "DOMAIN NAME IS", domain_name
local_store = create_backend('file://./~/.drift/config/' + domain_name)
print "LOCAL STORE BACKEND IS", local_store, local_store.get_url()
local_store.save_table_store(ts)
s3_store.save_table_store(ts)
#redis_store.save(ts)
ts = local_store.load_table_store()
print "whee got ts", ts
'''
TODO: unit test failed testing:
- default values were overriding actual input, not vice versa. its fixed though.
- remove() function not tested.
- backend url functionality not tested.
-
''' | scripts/testconfig.py | import logging
import os.path
from driftconfig.relib import TableStore, create_backend
from driftconfig.backends import FileBackend, S3Backend, RedisBackend
from driftconfig.config import get_drift_table_store
# tenant name:
"<org name>-<tier name>-<product name>"
'''
directivegames-superkaiju-LIVENORTH.dg-api.com
directivegames-superkaiju-DEVNORTH.dg-api.com
superkaiju.dg-api.com
directivegames-borkbork
.dg-api.com
'''
logging.basicConfig(level='INFO')
config_path = os.path.join(os.path.expanduser("~"), '.drift', 'config')
print "config_path is", config_path
# Set up backends. One on local hard drive, one on S3 and one in Redis
s3_store = S3Backend('relib-test', 'directive-games', 'eu-west-1')
s3_store = create_backend('s3://relib-test/directive-games')
redis_store = RedisBackend()
# Create an empty config
ts = get_drift_table_store()
if 0:
s3_store.load(ts)
print "whee got all the config", ts
redis_store.save(ts)
print "now i have dumped all the s3 config into redis"
local_store.save(ts)
print "its also on mny local disk hes"
config_path = os.path.join(os.path.expanduser("~"), '.drift', 'config2')
FileBackend(config_path).save(ts)
import sys
sys.exit(1)
# Load from S3
#s3_store.load(ts)
#s3_store.save(ts)
# Chuck in some data
ts.get_table('domain').add({'domain_name': 'dgnorth', 'display_name': 'Directive Games North', 'origin': 's3://relib-test/directive-games-v2?region=eu-west-1'})
ts.get_table('organizations').add({'organization_name': 'directivegames', 'display_name': 'Directive Games', })
ts.get_table('tiers').add({'tier_name': 'LIVENORTH', 'organization_name': 'directivegames', 'is_live': True})
ts.get_table('tiers').add({'tier_name': 'DEVNORTH', 'organization_name': 'directivegames', 'is_live': False})
ts.get_table('tiers').add({'tier_name': 'DEVEAST', 'organization_name': 'directivegames', 'is_live': False})
ts.get_table('products').add({'product_name': 'superkaiju', 'organization_name': 'directivegames'})
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'display_name': 'Drift Core Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'vignettes', 'display_name': 'Vignettes', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'game-server', 'display_name': 'Game Server Management', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'display_name': 'The Machines Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-admin', 'display_name': 'The Machines Admin Web', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kaleo-web', 'display_name': 'Kaleo Web', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kards-backend', 'display_name': 'Kards Services', })
ts.get_table('deployable_names').add({'tier_name': 'LIVENORTH', 'deployable_name': 'kaleometrics', 'display_name': 'Kaleo Metrics', })
ts.get_table('deployables').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'is_active': True, })
ts.get_table('deployables').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'is_active': True, })
# Configure LIVENORTH
from datetime import datetime
for tenant_name in [
'superkaiju', 'superkaiju-test', 'loadout', 'loadout-test', 'default-livenorth',
'themachines', 'themachines-test', 'themacines-test2', 'nonnib-livenorth',
]:
ts.get_table('tenant-names').add({
'tenant_name': tenant_name,
'organization_name': 'directivegames', 'product_name': 'superkaiju',
'reserved_at': datetime.utcnow().isoformat(),
'reserved_by': 'prezidentbongo',
})
ts.get_table('tenants').add({'tier_name': 'LIVENORTH', 'deployable_name': 'drift-base', 'tenant_name': tenant_name,})
ts.get_table('tenants').add({'tier_name': 'LIVENORTH', 'deployable_name': 'themachines-backend', 'tenant_name': tenant_name,})
ts.get_table('tenants').add({'tier_name': 'DEVNORTH', 'deployable_name': 'drift-base', 'tenant_name': tenant_name,})
# Store locally and cache in Redis
domain_name = ts.get_table('domain')['domain_name']
print "DOMAIN NAME IS", domain_name
local_store = create_backend('file://./~/.drift/config/' + domain_name)
print "LOCAL STORE BACKEND IS", local_store, local_store.get_url()
local_store.save_table_store(ts)
s3_store.save_table_store(ts)
#redis_store.save(ts)
ts = local_store.load_table_store()
print "whee got ts", ts
'''
TODO: unit test failed testing:
- default values were overriding actual input, not vice versa. its fixed though.
- remove() function not tested.
- backend url functionality not tested.
-
''' | 0.203826 | 0.058885 |
def encode(json, schema):
payload = schema.Main()
risk1 = payload.risks.add()
risk1.dt = json[0]['dt']
risk1.coord.extend(json[0]['coord'])
risk1.weather.temp = json[0]['weather']['temp']
risk1.weather.wind_speed = json[0]['weather']['wind_speed']
risk1.weather.wind_deg = json[0]['weather']['wind_deg']
risk1.weather.precipitation_intensity = \
json[0]['weather']['precipitation_intensity']
risk1.weather.dew_point = json[0]['weather']['dew_point']
alert1 = risk1.alerts.add()
alert1.sender_name = json[0]['alerts'][0]['sender_name']
alert1.event = json[0]['alerts'][0]['event']
alert1.event_level = json[0]['alerts'][0]['event_level']
risk2 = payload.risks.add()
risk2.dt = json[1]['dt']
risk2.coord.extend(json[1]['coord'])
risk2.weather.temp = json[1]['weather']['temp']
risk2.weather.wind_speed = json[1]['weather']['wind_speed']
risk2.weather.wind_deg = json[1]['weather']['wind_deg']
risk2.weather.dew_point = json[1]['weather']['dew_point']
return payload
def decode(payload):
return [
{
'dt': payload.risks[0].dt,
'coord': list(payload.risks[0].coord),
'weather': {
'temp': payload.risks[0].weather.temp,
'wind_speed': payload.risks[0].weather.wind_speed,
'wind_deg': payload.risks[0].weather.wind_deg,
'precipitation_intensity': \
payload.risks[0].weather.precipitation_intensity,
'dew_point': payload.risks[0].weather.dew_point
},
'alerts': [
{
'sender_name': payload.risks[0].alerts[0].sender_name,
'event': payload.risks[0].alerts[0].event,
'event_level': payload.risks[0].alerts[0].event_level
}
]
},
{
'dt': payload.risks[1].dt,
'coord': list(payload.risks[1].coord),
'weather': {
'temp': payload.risks[1].weather.temp,
'wind_speed': payload.risks[1].weather.wind_speed,
'wind_deg': payload.risks[1].weather.wind_deg,
'dew_point': payload.risks[1].weather.dew_point
},
'alerts': list(payload.risks[1].alerts)
}
] | benchmark/openweatherroadrisk/protobuf/run.py | def encode(json, schema):
payload = schema.Main()
risk1 = payload.risks.add()
risk1.dt = json[0]['dt']
risk1.coord.extend(json[0]['coord'])
risk1.weather.temp = json[0]['weather']['temp']
risk1.weather.wind_speed = json[0]['weather']['wind_speed']
risk1.weather.wind_deg = json[0]['weather']['wind_deg']
risk1.weather.precipitation_intensity = \
json[0]['weather']['precipitation_intensity']
risk1.weather.dew_point = json[0]['weather']['dew_point']
alert1 = risk1.alerts.add()
alert1.sender_name = json[0]['alerts'][0]['sender_name']
alert1.event = json[0]['alerts'][0]['event']
alert1.event_level = json[0]['alerts'][0]['event_level']
risk2 = payload.risks.add()
risk2.dt = json[1]['dt']
risk2.coord.extend(json[1]['coord'])
risk2.weather.temp = json[1]['weather']['temp']
risk2.weather.wind_speed = json[1]['weather']['wind_speed']
risk2.weather.wind_deg = json[1]['weather']['wind_deg']
risk2.weather.dew_point = json[1]['weather']['dew_point']
return payload
def decode(payload):
return [
{
'dt': payload.risks[0].dt,
'coord': list(payload.risks[0].coord),
'weather': {
'temp': payload.risks[0].weather.temp,
'wind_speed': payload.risks[0].weather.wind_speed,
'wind_deg': payload.risks[0].weather.wind_deg,
'precipitation_intensity': \
payload.risks[0].weather.precipitation_intensity,
'dew_point': payload.risks[0].weather.dew_point
},
'alerts': [
{
'sender_name': payload.risks[0].alerts[0].sender_name,
'event': payload.risks[0].alerts[0].event,
'event_level': payload.risks[0].alerts[0].event_level
}
]
},
{
'dt': payload.risks[1].dt,
'coord': list(payload.risks[1].coord),
'weather': {
'temp': payload.risks[1].weather.temp,
'wind_speed': payload.risks[1].weather.wind_speed,
'wind_deg': payload.risks[1].weather.wind_deg,
'dew_point': payload.risks[1].weather.dew_point
},
'alerts': list(payload.risks[1].alerts)
}
] | 0.279238 | 0.242161 |
import os
import shutil
from pathlib2 import Path
from global_var import globalVars
from License_Plate_Localization.core.config import cfg
import xml.dom.minidom as mnd
def DealXMLFile(filePath):
domTree = mnd.parse(filePath)
# 所有annotation
rootNode = domTree.documentElement
# folder 元素
folder = rootNode.getElementsByTagName("folder")[0]
# phone 元素
filename = rootNode.getElementsByTagName("filename")[0]
# size 元素
size = rootNode.getElementsByTagName("size")[0]
height = size.getElementsByTagName("height")[0]
width = size.getElementsByTagName("width")[0]
depth = size.getElementsByTagName("depth")[0]
content = (globalVars.projectPath / Path('License_Plate_Localization', 'data', 'dataset', folder.childNodes[0].data,
filename.childNodes[0].data)).__str__() + " "
# object 元素
objects = rootNode.getElementsByTagName("object")
for object in objects:
# name 元素
name = object.getElementsByTagName('name')[0]
# bndbox 元素
bndbox = object.getElementsByTagName('bndbox')[0]
xmin = bndbox.getElementsByTagName("xmin")[0].childNodes[0].data
ymin = bndbox.getElementsByTagName("ymin")[0].childNodes[0].data
xmax = bndbox.getElementsByTagName("xmax")[0].childNodes[0].data
ymax = bndbox.getElementsByTagName("ymax")[0].childNodes[0].data
xmin = float(xmin)
ymin = float(ymin)
xmax = float(xmax)
ymax = float(ymax)
content += f"{int(xmin)},{int(ymin)},{int(xmax)},{int(ymax)},0 "
return content
class CCPDNameParams():
def __init__(self, filename):
self.index_of_low = [i for i, j in enumerate(filename) if j in ["_"]]
self.index_of_middle = [i for i, j in enumerate(filename) if j in ["-"]]
self.index_of_and = [i for i, j in enumerate(filename) if j in ["&"]]
self.index_of_point = [i for i, j in enumerate(filename) if j in ["."]]
self.horizon = int(filename[self.index_of_middle[0] + 1: self.index_of_low[0]])
self.vertical = int(filename[self.index_of_low[0] + 1: self.index_of_middle[1]])
self.xmin = int(filename[self.index_of_middle[1] + 1: self.index_of_and[0]])
self.ymin = int(filename[self.index_of_and[0] + 1: self.index_of_low[1]])
self.xmax = int(filename[self.index_of_low[1] + 1: self.index_of_and[1]])
self.ymax = int(filename[self.index_of_and[1] + 1: self.index_of_middle[2]])
self.province = int(filename[self.index_of_middle[3] + 1: self.index_of_low[5]])
self.light = int(filename[self.index_of_middle[4] + 1: self.index_of_middle[-1]])
self.blur = int(filename[self.index_of_middle[-1] + 1: self.index_of_point[0]])
def CCPDNameToLabelProcess(self):
content = f"{self.xmin},{self.ymin},{self.xmax},{self.ymax},0 "
return content
def SelectFileFromCCPD():
ccpdPath = Path("/Users/lanceren/Downloads/GD_Dataset/Raw_Data/2019/CCPD2019")
targetFolder = Path("/Users/lanceren/Desktop/CCPD_Picts/")
targetFolder_Normal = targetFolder / Path('Normal')
targetFolder_SpecialCar = targetFolder / Path('SpecialCar')
targetFolder_Weather = targetFolder / Path('Weather')
if os.path.exists(targetFolder.__str__()): shutil.rmtree(targetFolder.__str__())
os.mkdir(targetFolder.__str__())
os.mkdir(targetFolder_Normal.__str__())
os.mkdir(targetFolder_SpecialCar.__str__())
os.mkdir(targetFolder_Weather.__str__())
if os.path.exists(ccpdPath.__str__()):
totalCount = 0
standCount = 0
specialCarCount = 0
newPowerCarCount = 0
weatherCount = 0
for rt, dirs, files in os.walk(ccpdPath.__str__()):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
if rt == (ccpdPath / Path('ccpd_np')).__str__():
continue
if rt == (ccpdPath / Path('ccpd_base')).__str__():
for filename in files:
totalCount += 1
fullFileName = Path(rt, filename)
ccpdName = CCPDNameParams(filename)
if ccpdName.horizon <= 10 and ccpdName.vertical <= 10 and ccpdName.light >= 100 and ccpdName.blur >= 100:
standCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_Normal)
if ccpdName.province >= 31:
specialCarCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_SpecialCar)
if ccpdName.index_of_low.count == 12:
newPowerCarCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_SpecialCar)
if rt == (ccpdPath / Path('ccpd_weather')).__str__():
continue
if ccpdName.horizon <= 2 and ccpdName.vertical <= 2 and ccpdName.blur <= 15:
weatherCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_Weather)
print("new power : ", newPowerCarCount)
print("specialCar : ", specialCarCount)
print("weather : ", weatherCount)
print("standCount : ", standCount)
print("totalCount : ", totalCount)
def CreateDotNames():
# create gd_detect.names
dotNamePath = cfg.YOLO.CLASSES
if os.path.exists(dotNamePath.__str__()): shutil.rmtree(dotNamePath.__str__())
os.mkdir(dotNamePath.__str__())
with open(dotNamePath.__str__(), "w+", encoding='utf-8') as f:
f.writelines("Plate")
f.close()
def CreateLabelTxt():
def generateLabelTxtInMode(labelTxtPath_key, labelTxtPath_value, annotationDirPathDict):
def generateLabelTxtBySource(mode, annotationDirPath_key, annotationDirPath_value, data):
def generateContent(source, fullFilePath):
def case1():
return DealXMLFile(fullFilePath.__str__())
def case2():
ccpdName = CCPDNameParams(fullFilePath.name)
return fullFilePath.__str__() + " " + ccpdName.CCPDNameToLabelProcess()
def default():
print("mode error!")
switch = {'labelMe': case1,
'ccpd': case2}
choice = source # 获取选择
content = switch.get(choice, default)() + '\n' # 执行对应的函数,如果没有就执行默认的函数
return content
labelNum = 0
modeNum = 1 if mode == "train" else 10 # train全取,test十取一
for rt, dirs, files in os.walk(annotationDirPath_value.__str__()):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for fileName in files:
fullFileName = Path(rt, fileName)
content = generateContent(annotationDirPath_key, fullFileName)
if labelNum % modeNum == 0: data.append(content)
labelNum += 1
labelData = []
for key, value in annotationDirPathDict.items():
generateLabelTxtBySource(labelTxtPath_key, key, value, labelData)
if os.path.exists(labelTxtPath_value.__str__()): os.remove(labelTxtPath_value.__str__())
with open(labelTxtPath_value.__str__(), "w+", encoding='utf-8') as f:
f.writelines(labelData)
f.close()
annotationDirPath = {
"labelMe": globalVars.projectPath / Path('License_Plate_Localization', 'data', 'annotations', 'xml'),
"ccpd": globalVars.projectPath / Path('License_Plate_Localization', 'data', 'dataset', 'JPEGImages',
'ccpd_sample')}
labelTxtPath = {
"train": cfg.TRAIN.ANNOT_PATH,
"test": cfg.TEST.ANNOT_PATH}
for key, value in labelTxtPath.items():
generateLabelTxtInMode(key, value, annotationDirPath)
if __name__ == "__main__":
# SelectFileFromCCPD()
# CreateDotNames()
CreateLabelTxt() | License_Plate_Localization/make_data.py | import os
import shutil
from pathlib2 import Path
from global_var import globalVars
from License_Plate_Localization.core.config import cfg
import xml.dom.minidom as mnd
def DealXMLFile(filePath):
domTree = mnd.parse(filePath)
# 所有annotation
rootNode = domTree.documentElement
# folder 元素
folder = rootNode.getElementsByTagName("folder")[0]
# phone 元素
filename = rootNode.getElementsByTagName("filename")[0]
# size 元素
size = rootNode.getElementsByTagName("size")[0]
height = size.getElementsByTagName("height")[0]
width = size.getElementsByTagName("width")[0]
depth = size.getElementsByTagName("depth")[0]
content = (globalVars.projectPath / Path('License_Plate_Localization', 'data', 'dataset', folder.childNodes[0].data,
filename.childNodes[0].data)).__str__() + " "
# object 元素
objects = rootNode.getElementsByTagName("object")
for object in objects:
# name 元素
name = object.getElementsByTagName('name')[0]
# bndbox 元素
bndbox = object.getElementsByTagName('bndbox')[0]
xmin = bndbox.getElementsByTagName("xmin")[0].childNodes[0].data
ymin = bndbox.getElementsByTagName("ymin")[0].childNodes[0].data
xmax = bndbox.getElementsByTagName("xmax")[0].childNodes[0].data
ymax = bndbox.getElementsByTagName("ymax")[0].childNodes[0].data
xmin = float(xmin)
ymin = float(ymin)
xmax = float(xmax)
ymax = float(ymax)
content += f"{int(xmin)},{int(ymin)},{int(xmax)},{int(ymax)},0 "
return content
class CCPDNameParams():
def __init__(self, filename):
self.index_of_low = [i for i, j in enumerate(filename) if j in ["_"]]
self.index_of_middle = [i for i, j in enumerate(filename) if j in ["-"]]
self.index_of_and = [i for i, j in enumerate(filename) if j in ["&"]]
self.index_of_point = [i for i, j in enumerate(filename) if j in ["."]]
self.horizon = int(filename[self.index_of_middle[0] + 1: self.index_of_low[0]])
self.vertical = int(filename[self.index_of_low[0] + 1: self.index_of_middle[1]])
self.xmin = int(filename[self.index_of_middle[1] + 1: self.index_of_and[0]])
self.ymin = int(filename[self.index_of_and[0] + 1: self.index_of_low[1]])
self.xmax = int(filename[self.index_of_low[1] + 1: self.index_of_and[1]])
self.ymax = int(filename[self.index_of_and[1] + 1: self.index_of_middle[2]])
self.province = int(filename[self.index_of_middle[3] + 1: self.index_of_low[5]])
self.light = int(filename[self.index_of_middle[4] + 1: self.index_of_middle[-1]])
self.blur = int(filename[self.index_of_middle[-1] + 1: self.index_of_point[0]])
def CCPDNameToLabelProcess(self):
content = f"{self.xmin},{self.ymin},{self.xmax},{self.ymax},0 "
return content
def SelectFileFromCCPD():
ccpdPath = Path("/Users/lanceren/Downloads/GD_Dataset/Raw_Data/2019/CCPD2019")
targetFolder = Path("/Users/lanceren/Desktop/CCPD_Picts/")
targetFolder_Normal = targetFolder / Path('Normal')
targetFolder_SpecialCar = targetFolder / Path('SpecialCar')
targetFolder_Weather = targetFolder / Path('Weather')
if os.path.exists(targetFolder.__str__()): shutil.rmtree(targetFolder.__str__())
os.mkdir(targetFolder.__str__())
os.mkdir(targetFolder_Normal.__str__())
os.mkdir(targetFolder_SpecialCar.__str__())
os.mkdir(targetFolder_Weather.__str__())
if os.path.exists(ccpdPath.__str__()):
totalCount = 0
standCount = 0
specialCarCount = 0
newPowerCarCount = 0
weatherCount = 0
for rt, dirs, files in os.walk(ccpdPath.__str__()):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
if rt == (ccpdPath / Path('ccpd_np')).__str__():
continue
if rt == (ccpdPath / Path('ccpd_base')).__str__():
for filename in files:
totalCount += 1
fullFileName = Path(rt, filename)
ccpdName = CCPDNameParams(filename)
if ccpdName.horizon <= 10 and ccpdName.vertical <= 10 and ccpdName.light >= 100 and ccpdName.blur >= 100:
standCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_Normal)
if ccpdName.province >= 31:
specialCarCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_SpecialCar)
if ccpdName.index_of_low.count == 12:
newPowerCarCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_SpecialCar)
if rt == (ccpdPath / Path('ccpd_weather')).__str__():
continue
if ccpdName.horizon <= 2 and ccpdName.vertical <= 2 and ccpdName.blur <= 15:
weatherCount += 1
shutil.copy(fullFileName.__str__(), targetFolder_Weather)
print("new power : ", newPowerCarCount)
print("specialCar : ", specialCarCount)
print("weather : ", weatherCount)
print("standCount : ", standCount)
print("totalCount : ", totalCount)
def CreateDotNames():
# create gd_detect.names
dotNamePath = cfg.YOLO.CLASSES
if os.path.exists(dotNamePath.__str__()): shutil.rmtree(dotNamePath.__str__())
os.mkdir(dotNamePath.__str__())
with open(dotNamePath.__str__(), "w+", encoding='utf-8') as f:
f.writelines("Plate")
f.close()
def CreateLabelTxt():
def generateLabelTxtInMode(labelTxtPath_key, labelTxtPath_value, annotationDirPathDict):
def generateLabelTxtBySource(mode, annotationDirPath_key, annotationDirPath_value, data):
def generateContent(source, fullFilePath):
def case1():
return DealXMLFile(fullFilePath.__str__())
def case2():
ccpdName = CCPDNameParams(fullFilePath.name)
return fullFilePath.__str__() + " " + ccpdName.CCPDNameToLabelProcess()
def default():
print("mode error!")
switch = {'labelMe': case1,
'ccpd': case2}
choice = source # 获取选择
content = switch.get(choice, default)() + '\n' # 执行对应的函数,如果没有就执行默认的函数
return content
labelNum = 0
modeNum = 1 if mode == "train" else 10 # train全取,test十取一
for rt, dirs, files in os.walk(annotationDirPath_value.__str__()):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for fileName in files:
fullFileName = Path(rt, fileName)
content = generateContent(annotationDirPath_key, fullFileName)
if labelNum % modeNum == 0: data.append(content)
labelNum += 1
labelData = []
for key, value in annotationDirPathDict.items():
generateLabelTxtBySource(labelTxtPath_key, key, value, labelData)
if os.path.exists(labelTxtPath_value.__str__()): os.remove(labelTxtPath_value.__str__())
with open(labelTxtPath_value.__str__(), "w+", encoding='utf-8') as f:
f.writelines(labelData)
f.close()
annotationDirPath = {
"labelMe": globalVars.projectPath / Path('License_Plate_Localization', 'data', 'annotations', 'xml'),
"ccpd": globalVars.projectPath / Path('License_Plate_Localization', 'data', 'dataset', 'JPEGImages',
'ccpd_sample')}
labelTxtPath = {
"train": cfg.TRAIN.ANNOT_PATH,
"test": cfg.TEST.ANNOT_PATH}
for key, value in labelTxtPath.items():
generateLabelTxtInMode(key, value, annotationDirPath)
if __name__ == "__main__":
# SelectFileFromCCPD()
# CreateDotNames()
CreateLabelTxt() | 0.246806 | 0.191517 |
import FWCore.ParameterSet.Config as cms
# parameters for HIPAlignmentAlgorithm
HIPAlignmentAlgorithm = cms.PSet(
algoName = cms.string('HIPAlignmentAlgorithm'),
debug = cms.bool(False),
verbosity = cms.bool(False),
checkDbAlignmentValidity=cms.bool(False),
isCollision = cms.bool(True),
UsePreSelection = cms.bool(False),
multiIOV=cms.bool(False),
IOVrange=cms.vuint32(1,99999999),
minRelParameterError = cms.double(0),
maxRelParameterError = cms.double(-1), # -1 for no cut
minimumNumberOfHits = cms.int32(1),
maxAllowedHitPull = cms.double(-1), # -1 for no cut
applyCutsPerComponent = cms.bool(False), # Overrides settings above for the specified detectors
cutsPerComponent = cms.VPSet(
cms.PSet(
Selector = cms.PSet(
alignParams = cms.vstring(
"AllAlignables,000000" # Obligatory second string
) # can use "selected" for the already-specified alignables
),
# Parameter cuts
minRelParError = cms.double(0),
maxRelParError = cms.double(-1), # -1 for no cut
# Hit cuts
minNHits = cms.int32(0),
maxHitPull = cms.double(-1), # -1 for no cut
applyPixelProbCut = cms.bool(False),
usePixelProbXYOrProbQ = cms.bool(False), # Uses or instead of and when applying the min-max cuts
minPixelProbXY = cms.double(0),
maxPixelProbXY = cms.double(1),
minPixelProbQ = cms.double(0),
maxPixelProbQ = cms.double(1),
)
),
# APE settings
applyAPE = cms.bool(False),
apeParam = cms.VPSet(
cms.PSet(
Selector = cms.PSet(
alignParams = cms.vstring(
"AllAlignables,000000"
) # can use "selected" for the already-specified alignables
),
function = cms.string('linear'), ## linear, step or exponential
apeRPar = cms.vdouble(0, 0, 0), # cm
apeSPar = cms.vdouble(0, 0, 0), # mrad
)
),
# Re-weighting
DataGroup=cms.int32(-2),
UseReweighting = cms.bool(False),
Weight = cms.double(1),
UniformEta = cms.bool(False),
UniformEtaFormula = cms.string("1"),
ReweightPerAlignable = cms.bool(False),
# Impact angle cut
CLAngleCut = cms.double(1.571), # upper bound on collision track impact angle, default -no cut
CSAngleCut = cms.double(0), # lower bound on cosmics track impact angle, default -no cut
# Chisquare scan
setScanDet = cms.vdouble(0,0,0), # detector ID (1=all det), start,step
# File paths and names
outpath = cms.string('./'),
collectorActive = cms.bool(False),
collectorNJobs = cms.int32(0),
collectorPath = cms.string(''),
uvarFile = cms.string('IOUserVariables.root'),
alignedFile = cms.string('IOAlignedPositions.root'),
misalignedFile = cms.string('IOMisalignedPositions.root'),
trueFile = cms.string('IOTruePositions.root'),
parameterFile = cms.string('IOAlignmentParameters.root'),
iterationFile = cms.string('IOIteration.root'),
outfile2 = cms.string('HIPAlignmentAlignables.root'),
monitorConfig = cms.PSet(
outfile = cms.string('HIPAlignmentEvents.root'),
fillTrackMonitoring = cms.bool(False),
maxEventsPerJob = cms.int32(100),
fillTrackHitMonitoring = cms.bool(False),
maxHits = cms.int32(10000), # Not per track, just total
),
surveyResiduals = cms.untracked.vstring(), ## no survey constraint
surveyFile = cms.string('HIPSurveyResiduals.root'),
) | Alignment/HIPAlignmentAlgorithm/python/HIPAlignmentAlgorithm_cfi.py | import FWCore.ParameterSet.Config as cms
# parameters for HIPAlignmentAlgorithm
HIPAlignmentAlgorithm = cms.PSet(
algoName = cms.string('HIPAlignmentAlgorithm'),
debug = cms.bool(False),
verbosity = cms.bool(False),
checkDbAlignmentValidity=cms.bool(False),
isCollision = cms.bool(True),
UsePreSelection = cms.bool(False),
multiIOV=cms.bool(False),
IOVrange=cms.vuint32(1,99999999),
minRelParameterError = cms.double(0),
maxRelParameterError = cms.double(-1), # -1 for no cut
minimumNumberOfHits = cms.int32(1),
maxAllowedHitPull = cms.double(-1), # -1 for no cut
applyCutsPerComponent = cms.bool(False), # Overrides settings above for the specified detectors
cutsPerComponent = cms.VPSet(
cms.PSet(
Selector = cms.PSet(
alignParams = cms.vstring(
"AllAlignables,000000" # Obligatory second string
) # can use "selected" for the already-specified alignables
),
# Parameter cuts
minRelParError = cms.double(0),
maxRelParError = cms.double(-1), # -1 for no cut
# Hit cuts
minNHits = cms.int32(0),
maxHitPull = cms.double(-1), # -1 for no cut
applyPixelProbCut = cms.bool(False),
usePixelProbXYOrProbQ = cms.bool(False), # Uses or instead of and when applying the min-max cuts
minPixelProbXY = cms.double(0),
maxPixelProbXY = cms.double(1),
minPixelProbQ = cms.double(0),
maxPixelProbQ = cms.double(1),
)
),
# APE settings
applyAPE = cms.bool(False),
apeParam = cms.VPSet(
cms.PSet(
Selector = cms.PSet(
alignParams = cms.vstring(
"AllAlignables,000000"
) # can use "selected" for the already-specified alignables
),
function = cms.string('linear'), ## linear, step or exponential
apeRPar = cms.vdouble(0, 0, 0), # cm
apeSPar = cms.vdouble(0, 0, 0), # mrad
)
),
# Re-weighting
DataGroup=cms.int32(-2),
UseReweighting = cms.bool(False),
Weight = cms.double(1),
UniformEta = cms.bool(False),
UniformEtaFormula = cms.string("1"),
ReweightPerAlignable = cms.bool(False),
# Impact angle cut
CLAngleCut = cms.double(1.571), # upper bound on collision track impact angle, default -no cut
CSAngleCut = cms.double(0), # lower bound on cosmics track impact angle, default -no cut
# Chisquare scan
setScanDet = cms.vdouble(0,0,0), # detector ID (1=all det), start,step
# File paths and names
outpath = cms.string('./'),
collectorActive = cms.bool(False),
collectorNJobs = cms.int32(0),
collectorPath = cms.string(''),
uvarFile = cms.string('IOUserVariables.root'),
alignedFile = cms.string('IOAlignedPositions.root'),
misalignedFile = cms.string('IOMisalignedPositions.root'),
trueFile = cms.string('IOTruePositions.root'),
parameterFile = cms.string('IOAlignmentParameters.root'),
iterationFile = cms.string('IOIteration.root'),
outfile2 = cms.string('HIPAlignmentAlignables.root'),
monitorConfig = cms.PSet(
outfile = cms.string('HIPAlignmentEvents.root'),
fillTrackMonitoring = cms.bool(False),
maxEventsPerJob = cms.int32(100),
fillTrackHitMonitoring = cms.bool(False),
maxHits = cms.int32(10000), # Not per track, just total
),
surveyResiduals = cms.untracked.vstring(), ## no survey constraint
surveyFile = cms.string('HIPSurveyResiduals.root'),
) | 0.595963 | 0.282753 |
from django.conf.urls import url
from . import views
urlpatterns = [
#Para reporte en formato Excel de los pacientes que solicitaron visto bueno
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/vistoBuenoExcel/$', views.vistoBuenoExcel, name='vistoBuenoExcel-new'),
#Para reporte en formato Excel de las actividades de enfermeria
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/censoEnfermeriaExcel/$', views.reporteCensoExcel, name='censoActividadExcel-new'),
#Para reporte en formato Excel de las morbilidades de pacientes de nuevo y antiguo ingreso
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/morbilidadExcel/$', views.reporteMorbilidadExcel, name='morbilidadExcel-new'),
#Para reporte en formato Excel de los pacientes atendidos por medico
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesPorMedicoExcel/$', views.reportePacienteMedicoExcel, name='pacientesMedicoExcel-new'),
#Para reporte en formato Excel de los pacientes atendidos por especialidad
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesPorEspecialidadExcel/$', views.reporteConsultaEspecialidadExcel, name='pacientesEspecialidadExcel-new'),
#Para reporte en formato Excel de las referencias internas y externas
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesReferenciasExcel/$', views.reporteReferenciasExcel, name='referenciasExcel-new'),
#Para reporte en formato Excel de primeras y consultas subsecuentes
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesTipoConsultaExcel/$', views.reporteTipoConsultaExcel, name='tipoConsultaExcel-new'),
#Para reporte en formato Excel de consultas generales segun procedencia
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasProcedenciaExcel/$', views.reporteConsultaProcedenciaExcel, name='consultaProcedenciaExcel-new'),
#Para reporte en formato Excel de consultas generales segun genero
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasGeneroExcel/$', views.reporteConsultaGeneroExcel, name='consultaGeneroExcel-new'),
#Para reporte en formato Excel de consultas generales segun tipo de paciente
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasTipoPacienteExcel/$', views.reporteConsultaTipoPacienteExcel, name='consultaTipoPacienteExcel-new'),
url(r'^export/csv/$', views.export, name='export_excel'),
] | reportesapp/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
#Para reporte en formato Excel de los pacientes que solicitaron visto bueno
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/vistoBuenoExcel/$', views.vistoBuenoExcel, name='vistoBuenoExcel-new'),
#Para reporte en formato Excel de las actividades de enfermeria
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/censoEnfermeriaExcel/$', views.reporteCensoExcel, name='censoActividadExcel-new'),
#Para reporte en formato Excel de las morbilidades de pacientes de nuevo y antiguo ingreso
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/morbilidadExcel/$', views.reporteMorbilidadExcel, name='morbilidadExcel-new'),
#Para reporte en formato Excel de los pacientes atendidos por medico
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesPorMedicoExcel/$', views.reportePacienteMedicoExcel, name='pacientesMedicoExcel-new'),
#Para reporte en formato Excel de los pacientes atendidos por especialidad
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesPorEspecialidadExcel/$', views.reporteConsultaEspecialidadExcel, name='pacientesEspecialidadExcel-new'),
#Para reporte en formato Excel de las referencias internas y externas
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesReferenciasExcel/$', views.reporteReferenciasExcel, name='referenciasExcel-new'),
#Para reporte en formato Excel de primeras y consultas subsecuentes
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/pacientesTipoConsultaExcel/$', views.reporteTipoConsultaExcel, name='tipoConsultaExcel-new'),
#Para reporte en formato Excel de consultas generales segun procedencia
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasProcedenciaExcel/$', views.reporteConsultaProcedenciaExcel, name='consultaProcedenciaExcel-new'),
#Para reporte en formato Excel de consultas generales segun genero
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasGeneroExcel/$', views.reporteConsultaGeneroExcel, name='consultaGeneroExcel-new'),
#Para reporte en formato Excel de consultas generales segun tipo de paciente
url(r'^(?P<inicio>[0-9]+\-[0-9]+\-[0-9]+)/(?P<fin>[0-9]+\-[0-9]+\-[0-9]+)/consultasTipoPacienteExcel/$', views.reporteConsultaTipoPacienteExcel, name='consultaTipoPacienteExcel-new'),
url(r'^export/csv/$', views.export, name='export_excel'),
] | 0.187318 | 0.153264 |
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2_grpc
from typing import List
class Subnetwork(object):
def __init__(
self,
creation_timestamp: str = None,
description: str = None,
gateway_address: str = None,
ip_cidr_range: str = None,
name: str = None,
network: str = None,
fingerprint: str = None,
purpose: str = None,
role: str = None,
secondary_ip_ranges: list = None,
private_ip_google_access: bool = None,
region: str = None,
log_config: dict = None,
project: str = None,
self_link: str = None,
enable_flow_logs: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.description = description
self.ip_cidr_range = ip_cidr_range
self.name = name
self.network = network
self.purpose = purpose
self.role = role
self.secondary_ip_ranges = secondary_ip_ranges
self.private_ip_google_access = private_ip_google_access
self.region = region
self.log_config = log_config
self.project = project
self.enable_flow_logs = enable_flow_logs
self.service_account_file = service_account_file
def apply(self):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ApplyComputeBetaSubnetworkRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
request.resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaSubnetwork(request)
self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)
self.description = Primitive.from_proto(response.description)
self.gateway_address = Primitive.from_proto(response.gateway_address)
self.ip_cidr_range = Primitive.from_proto(response.ip_cidr_range)
self.name = Primitive.from_proto(response.name)
self.network = Primitive.from_proto(response.network)
self.fingerprint = Primitive.from_proto(response.fingerprint)
self.purpose = SubnetworkPurposeEnum.from_proto(response.purpose)
self.role = SubnetworkRoleEnum.from_proto(response.role)
self.secondary_ip_ranges = SubnetworkSecondaryIPRangesArray.from_proto(
response.secondary_ip_ranges
)
self.private_ip_google_access = Primitive.from_proto(
response.private_ip_google_access
)
self.region = Primitive.from_proto(response.region)
self.log_config = SubnetworkLogConfig.from_proto(response.log_config)
self.project = Primitive.from_proto(response.project)
self.self_link = Primitive.from_proto(response.self_link)
self.enable_flow_logs = Primitive.from_proto(response.enable_flow_logs)
def delete(self):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.DeleteComputeBetaSubnetworkRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
request.resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
response = stub.DeleteComputeBetaSubnetwork(request)
@classmethod
def list(self, project, region, service_account_file=""):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ListComputeBetaSubnetworkRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
return stub.ListComputeBetaSubnetwork(request).items
def to_proto(self):
resource = subnetwork_pb2.ComputeBetaSubnetwork()
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
resource.log_config.CopyFrom(SubnetworkLogConfig.to_proto(self.log_config))
else:
resource.ClearField("log_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
resource.enable_flow_logs = Primitive.to_proto(self.enable_flow_logs)
return resource
class SubnetworkSecondaryIPRanges(object):
def __init__(self, range_name: str = None, ip_cidr_range: str = None):
self.range_name = range_name
self.ip_cidr_range = ip_cidr_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeBetaSubnetworkSecondaryIPRanges()
if Primitive.to_proto(resource.range_name):
res.range_name = Primitive.to_proto(resource.range_name)
if Primitive.to_proto(resource.ip_cidr_range):
res.ip_cidr_range = Primitive.to_proto(resource.ip_cidr_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkSecondaryIPRanges(
range_name=Primitive.from_proto(resource.range_name),
ip_cidr_range=Primitive.from_proto(resource.ip_cidr_range),
)
class SubnetworkSecondaryIPRangesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkSecondaryIPRanges.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkSecondaryIPRanges.from_proto(i) for i in resources]
class SubnetworkLogConfig(object):
def __init__(
self,
aggregation_interval: str = None,
flow_sampling: float = None,
metadata: str = None,
):
self.aggregation_interval = aggregation_interval
self.flow_sampling = flow_sampling
self.metadata = metadata
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeBetaSubnetworkLogConfig()
if SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
):
res.aggregation_interval = (
SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
)
)
if Primitive.to_proto(resource.flow_sampling):
res.flow_sampling = Primitive.to_proto(resource.flow_sampling)
if SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata):
res.metadata = SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkLogConfig(
aggregation_interval=SubnetworkLogConfigAggregationIntervalEnum.from_proto(
resource.aggregation_interval
),
flow_sampling=Primitive.from_proto(resource.flow_sampling),
metadata=SubnetworkLogConfigMetadataEnum.from_proto(resource.metadata),
)
class SubnetworkLogConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkLogConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkLogConfig.from_proto(i) for i in resources]
class SubnetworkPurposeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkPurposeEnum.Value(
"ComputeBetaSubnetworkPurposeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkPurposeEnum.Name(resource)[
len("ComputeBetaSubnetworkPurposeEnum") :
]
class SubnetworkRoleEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkRoleEnum.Value(
"ComputeBetaSubnetworkRoleEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkRoleEnum.Name(resource)[
len("ComputeBetaSubnetworkRoleEnum") :
]
class SubnetworkLogConfigAggregationIntervalEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return (
subnetwork_pb2.ComputeBetaSubnetworkLogConfigAggregationIntervalEnum.Value(
"ComputeBetaSubnetworkLogConfigAggregationIntervalEnum%s" % resource
)
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return (
subnetwork_pb2.ComputeBetaSubnetworkLogConfigAggregationIntervalEnum.Name(
resource
)[len("ComputeBetaSubnetworkLogConfigAggregationIntervalEnum") :]
)
class SubnetworkLogConfigMetadataEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkLogConfigMetadataEnum.Value(
"ComputeBetaSubnetworkLogConfigMetadataEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkLogConfigMetadataEnum.Name(resource)[
len("ComputeBetaSubnetworkLogConfigMetadataEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s | python/services/compute/beta/subnetwork.py | from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2_grpc
from typing import List
class Subnetwork(object):
def __init__(
self,
creation_timestamp: str = None,
description: str = None,
gateway_address: str = None,
ip_cidr_range: str = None,
name: str = None,
network: str = None,
fingerprint: str = None,
purpose: str = None,
role: str = None,
secondary_ip_ranges: list = None,
private_ip_google_access: bool = None,
region: str = None,
log_config: dict = None,
project: str = None,
self_link: str = None,
enable_flow_logs: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.description = description
self.ip_cidr_range = ip_cidr_range
self.name = name
self.network = network
self.purpose = purpose
self.role = role
self.secondary_ip_ranges = secondary_ip_ranges
self.private_ip_google_access = private_ip_google_access
self.region = region
self.log_config = log_config
self.project = project
self.enable_flow_logs = enable_flow_logs
self.service_account_file = service_account_file
def apply(self):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ApplyComputeBetaSubnetworkRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
request.resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaSubnetwork(request)
self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)
self.description = Primitive.from_proto(response.description)
self.gateway_address = Primitive.from_proto(response.gateway_address)
self.ip_cidr_range = Primitive.from_proto(response.ip_cidr_range)
self.name = Primitive.from_proto(response.name)
self.network = Primitive.from_proto(response.network)
self.fingerprint = Primitive.from_proto(response.fingerprint)
self.purpose = SubnetworkPurposeEnum.from_proto(response.purpose)
self.role = SubnetworkRoleEnum.from_proto(response.role)
self.secondary_ip_ranges = SubnetworkSecondaryIPRangesArray.from_proto(
response.secondary_ip_ranges
)
self.private_ip_google_access = Primitive.from_proto(
response.private_ip_google_access
)
self.region = Primitive.from_proto(response.region)
self.log_config = SubnetworkLogConfig.from_proto(response.log_config)
self.project = Primitive.from_proto(response.project)
self.self_link = Primitive.from_proto(response.self_link)
self.enable_flow_logs = Primitive.from_proto(response.enable_flow_logs)
def delete(self):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.DeleteComputeBetaSubnetworkRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
request.resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
response = stub.DeleteComputeBetaSubnetwork(request)
@classmethod
def list(self, project, region, service_account_file=""):
stub = subnetwork_pb2_grpc.ComputeBetaSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ListComputeBetaSubnetworkRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
return stub.ListComputeBetaSubnetwork(request).items
def to_proto(self):
resource = subnetwork_pb2.ComputeBetaSubnetwork()
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges):
resource.secondary_ip_ranges.extend(
SubnetworkSecondaryIPRangesArray.to_proto(self.secondary_ip_ranges)
)
if Primitive.to_proto(self.private_ip_google_access):
resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
resource.log_config.CopyFrom(SubnetworkLogConfig.to_proto(self.log_config))
else:
resource.ClearField("log_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
resource.enable_flow_logs = Primitive.to_proto(self.enable_flow_logs)
return resource
class SubnetworkSecondaryIPRanges(object):
def __init__(self, range_name: str = None, ip_cidr_range: str = None):
self.range_name = range_name
self.ip_cidr_range = ip_cidr_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeBetaSubnetworkSecondaryIPRanges()
if Primitive.to_proto(resource.range_name):
res.range_name = Primitive.to_proto(resource.range_name)
if Primitive.to_proto(resource.ip_cidr_range):
res.ip_cidr_range = Primitive.to_proto(resource.ip_cidr_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkSecondaryIPRanges(
range_name=Primitive.from_proto(resource.range_name),
ip_cidr_range=Primitive.from_proto(resource.ip_cidr_range),
)
class SubnetworkSecondaryIPRangesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkSecondaryIPRanges.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkSecondaryIPRanges.from_proto(i) for i in resources]
class SubnetworkLogConfig(object):
def __init__(
self,
aggregation_interval: str = None,
flow_sampling: float = None,
metadata: str = None,
):
self.aggregation_interval = aggregation_interval
self.flow_sampling = flow_sampling
self.metadata = metadata
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeBetaSubnetworkLogConfig()
if SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
):
res.aggregation_interval = (
SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
)
)
if Primitive.to_proto(resource.flow_sampling):
res.flow_sampling = Primitive.to_proto(resource.flow_sampling)
if SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata):
res.metadata = SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkLogConfig(
aggregation_interval=SubnetworkLogConfigAggregationIntervalEnum.from_proto(
resource.aggregation_interval
),
flow_sampling=Primitive.from_proto(resource.flow_sampling),
metadata=SubnetworkLogConfigMetadataEnum.from_proto(resource.metadata),
)
class SubnetworkLogConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkLogConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkLogConfig.from_proto(i) for i in resources]
class SubnetworkPurposeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkPurposeEnum.Value(
"ComputeBetaSubnetworkPurposeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkPurposeEnum.Name(resource)[
len("ComputeBetaSubnetworkPurposeEnum") :
]
class SubnetworkRoleEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkRoleEnum.Value(
"ComputeBetaSubnetworkRoleEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkRoleEnum.Name(resource)[
len("ComputeBetaSubnetworkRoleEnum") :
]
class SubnetworkLogConfigAggregationIntervalEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return (
subnetwork_pb2.ComputeBetaSubnetworkLogConfigAggregationIntervalEnum.Value(
"ComputeBetaSubnetworkLogConfigAggregationIntervalEnum%s" % resource
)
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return (
subnetwork_pb2.ComputeBetaSubnetworkLogConfigAggregationIntervalEnum.Name(
resource
)[len("ComputeBetaSubnetworkLogConfigAggregationIntervalEnum") :]
)
class SubnetworkLogConfigMetadataEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkLogConfigMetadataEnum.Value(
"ComputeBetaSubnetworkLogConfigMetadataEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeBetaSubnetworkLogConfigMetadataEnum.Name(resource)[
len("ComputeBetaSubnetworkLogConfigMetadataEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s | 0.631935 | 0.144511 |
# ALGORITHMS CLASSES BASIC LANGUAGE FEATURES OBJECT-ORIENTED PROGRAMMING
# FUNDAMENTALS RULES
import allure
import unittest
from utils.log_func import print_log
from kyu_4.the_greatest_warrior.warrior import Warrior
@allure.epic('4 kyu')
@allure.parent_suite('Competent')
@allure.suite('OOP')
@allure.sub_suite("Unit Tests")
@allure.feature('Classes')
@allure.story('The Greatest Warrior')
@allure.tag('ALGORITHMS', 'CLASSES', 'BASIC LANGUAGE FEATURES',
'OBJECT-ORIENTED PROGRAMMING', 'FUNDAMENTALS', 'RULES')
@allure.link(
url='https://www.codewars.com/kata/5941c545f5c394fef900000c/train/python',
name='Source/Kata')
class WarriorTestCase(unittest.TestCase):
"""
Testing Warrior class
"""
def test_warrior_tom(self):
"""
Testing Warrior class >>> tom
"""
allure.dynamic.title("Testing Warrior class >>> tom")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html(
'<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Basic test: level, experience, rank</p>")
with allure.step("Instantiate a new warrior >>> tom"):
tom = Warrior()
with allure.step("Assert level"):
print_log(level=tom.level)
self.assertEqual(tom.level, 1)
with allure.step("Assert experience"):
print_log(experience=tom.experience)
self.assertEqual(tom.experience, 100)
with allure.step("Assert rank"):
print_log(rank=tom.rank)
self.assertEqual(tom.rank, "Pushover")
def test_warrior_bruce_lee(self):
"""
Testing Warrior class >>> bruce_lee
"""
allure.dynamic.title("Testing Warrior class >>> bruce_lee")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html(
'<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Advanced Warrior class assertions</p>")
with allure.step("Instantiate a new warrior >>> bruce_lee"):
bruce_lee = Warrior()
with allure.step("Assert level"):
print_log(level=bruce_lee.level)
self.assertEqual(bruce_lee.level, 1)
with allure.step("Assert experience"):
print_log(experience=bruce_lee.experience)
self.assertEqual(bruce_lee.experience, 100)
with allure.step("Assert rank"):
print_log(rank=bruce_lee.rank)
self.assertEqual(bruce_lee.rank, "Pushover")
with allure.step("Assert achievements"):
self.assertListEqual(bruce_lee.achievements, [])
with allure.step("Assert training"):
self.assertEqual(bruce_lee.training(
["Defeated Chuck Norris", 9000, 1]), "Defeated Chuck Norris")
with allure.step("Assert experience"):
self.assertEqual(bruce_lee.experience, 9100)
with allure.step("Assert level"):
self.assertEqual(91, bruce_lee.level)
with allure.step("Assert rank"):
self.assertEqual(bruce_lee.rank, "Master")
with allure.step("Assert battle"):
self.assertEqual(bruce_lee.battle(90), "A good fight")
with allure.step("Assert experience"):
self.assertEqual(bruce_lee.experience, 9105)
with allure.step("Assert achievements"):
self.assertEqual(bruce_lee.achievements, ["Defeated Chuck Norris"]) | kyu_4/the_greatest_warrior/test_warrior.py |
# ALGORITHMS CLASSES BASIC LANGUAGE FEATURES OBJECT-ORIENTED PROGRAMMING
# FUNDAMENTALS RULES
import allure
import unittest
from utils.log_func import print_log
from kyu_4.the_greatest_warrior.warrior import Warrior
@allure.epic('4 kyu')
@allure.parent_suite('Competent')
@allure.suite('OOP')
@allure.sub_suite("Unit Tests")
@allure.feature('Classes')
@allure.story('The Greatest Warrior')
@allure.tag('ALGORITHMS', 'CLASSES', 'BASIC LANGUAGE FEATURES',
'OBJECT-ORIENTED PROGRAMMING', 'FUNDAMENTALS', 'RULES')
@allure.link(
url='https://www.codewars.com/kata/5941c545f5c394fef900000c/train/python',
name='Source/Kata')
class WarriorTestCase(unittest.TestCase):
"""
Testing Warrior class
"""
def test_warrior_tom(self):
"""
Testing Warrior class >>> tom
"""
allure.dynamic.title("Testing Warrior class >>> tom")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html(
'<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Basic test: level, experience, rank</p>")
with allure.step("Instantiate a new warrior >>> tom"):
tom = Warrior()
with allure.step("Assert level"):
print_log(level=tom.level)
self.assertEqual(tom.level, 1)
with allure.step("Assert experience"):
print_log(experience=tom.experience)
self.assertEqual(tom.experience, 100)
with allure.step("Assert rank"):
print_log(rank=tom.rank)
self.assertEqual(tom.rank, "Pushover")
def test_warrior_bruce_lee(self):
"""
Testing Warrior class >>> bruce_lee
"""
allure.dynamic.title("Testing Warrior class >>> bruce_lee")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html(
'<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Advanced Warrior class assertions</p>")
with allure.step("Instantiate a new warrior >>> bruce_lee"):
bruce_lee = Warrior()
with allure.step("Assert level"):
print_log(level=bruce_lee.level)
self.assertEqual(bruce_lee.level, 1)
with allure.step("Assert experience"):
print_log(experience=bruce_lee.experience)
self.assertEqual(bruce_lee.experience, 100)
with allure.step("Assert rank"):
print_log(rank=bruce_lee.rank)
self.assertEqual(bruce_lee.rank, "Pushover")
with allure.step("Assert achievements"):
self.assertListEqual(bruce_lee.achievements, [])
with allure.step("Assert training"):
self.assertEqual(bruce_lee.training(
["Defeated Chuck Norris", 9000, 1]), "Defeated Chuck Norris")
with allure.step("Assert experience"):
self.assertEqual(bruce_lee.experience, 9100)
with allure.step("Assert level"):
self.assertEqual(91, bruce_lee.level)
with allure.step("Assert rank"):
self.assertEqual(bruce_lee.rank, "Master")
with allure.step("Assert battle"):
self.assertEqual(bruce_lee.battle(90), "A good fight")
with allure.step("Assert experience"):
self.assertEqual(bruce_lee.experience, 9105)
with allure.step("Assert achievements"):
self.assertEqual(bruce_lee.achievements, ["Defeated Chuck Norris"]) | 0.51562 | 0.350282 |
import optparse
import tokenize
import warnings
# Polyfill stdin loading/reading lines
# https://gitlab.com/pycqa/flake8-polyfill/blob/1.0.1/src/flake8_polyfill/stdin.py#L52-57
try:
from flake8.engine import pep8
stdin_get_value = pep8.stdin_get_value
readlines = pep8.readlines
except ImportError:
from flake8 import utils
import pycodestyle
stdin_get_value = utils.stdin_get_value
readlines = pycodestyle.readlines
from flake8_quotes.__about__ import __version__
from flake8_quotes.docstring_detection import get_docstring_tokens
class QuoteChecker(object):
name = __name__
version = __version__
INLINE_QUOTES = {
# When user wants only single quotes
"'": {
'good_single': "'",
'bad_single': '"',
'single_error_message': 'Double quotes found but single quotes preferred',
},
# When user wants only double quotes
'"': {
'good_single': '"',
'bad_single': "'",
'single_error_message': 'Single quotes found but double quotes preferred',
},
}
# Provide aliases for Windows CLI support
# https://github.com/zheller/flake8-quotes/issues/49
INLINE_QUOTES['single'] = INLINE_QUOTES["'"]
INLINE_QUOTES['double'] = INLINE_QUOTES['"']
MULTILINE_QUOTES = {
"'": {
'good_multiline': "'''",
'good_multiline_ending': '\'"""',
'bad_multiline': '"""',
'multiline_error_message': 'Double quote multiline found but single quotes preferred',
},
'"': {
'good_multiline': '"""',
'good_multiline_ending': '"\'\'\'',
'bad_multiline': "'''",
'multiline_error_message': 'Single quote multiline found but double quotes preferred',
},
}
# Provide Windows CLI and multi-quote aliases
MULTILINE_QUOTES['single'] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['double'] = MULTILINE_QUOTES['"']
MULTILINE_QUOTES["'''"] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['"""'] = MULTILINE_QUOTES['"']
DOCSTRING_QUOTES = {
"'": {
'good_docstring': "'''",
'bad_docstring': '"""',
'docstring_error_message': 'Double quote docstring found but single quotes preferred',
},
'"': {
'good_docstring': '"""',
'bad_docstring': "'''",
'docstring_error_message': 'Single quote docstring found but double quotes preferred',
},
}
# Provide Windows CLI and docstring-quote aliases
DOCSTRING_QUOTES['single'] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['double'] = DOCSTRING_QUOTES['"']
DOCSTRING_QUOTES["'''"] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['"""'] = DOCSTRING_QUOTES['"']
def __init__(self, tree, lines=None, filename='(none)'):
self.filename = filename
self.lines = lines
@staticmethod
def _register_opt(parser, *args, **kwargs):
"""
Handler to register an option for both Flake8 3.x and 2.x.
This is based on:
https://github.com/PyCQA/flake8/blob/3.0.0b2/docs/source/plugin-development/cross-compatibility.rst#option-handling-on-flake8-2-and-3
It only supports `parse_from_config` from the original function and it
uses the `Option` object returned to get the string.
"""
try:
# Flake8 3.x registration
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
# Flake8 2.x registration
parse_from_config = kwargs.pop('parse_from_config', False)
option = parser.add_option(*args, **kwargs)
if parse_from_config:
parser.config_options.append(option.get_opt_string().lstrip('-'))
@classmethod
def add_options(cls, parser):
cls._register_opt(parser, '--quotes', action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help='Deprecated alias for `--inline-quotes`')
cls._register_opt(parser, '--inline-quotes', default="'",
action='store', parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help="Quote to expect in all files (default: ')")
cls._register_opt(parser, '--multiline-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.MULTILINE_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--docstring-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.DOCSTRING_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--avoid-escape', default=None, action='store_true',
parse_from_config=True,
help='Avoiding escaping same quotes in inline strings (enabled by default)')
cls._register_opt(parser, '--no-avoid-escape', dest='avoid_escape', default=None, action='store_false',
parse_from_config=False,
help='Disable avoiding escaping same quotes in inline strings')
@classmethod
def parse_options(cls, options):
# Define our default config
# cls.config = {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config = {}
cls.config.update(cls.INLINE_QUOTES["'"])
cls.config.update(cls.MULTILINE_QUOTES['"""'])
cls.config.update(cls.DOCSTRING_QUOTES['"""'])
# If `options.quotes` was specified, then use it
if hasattr(options, 'quotes') and options.quotes is not None:
# https://docs.python.org/2/library/warnings.html#warnings.warn
warnings.warn('flake8-quotes has deprecated `quotes` in favor of `inline-quotes`. '
'Please update your configuration')
cls.config.update(cls.INLINE_QUOTES[options.quotes])
# Otherwise, use the supported `inline_quotes`
else:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ", good_multiline: """, bad_single: ', bad_multiline: '''}
cls.config.update(cls.INLINE_QUOTES[options.inline_quotes])
# If multiline quotes was specified, overload our config with those options
if hasattr(options, 'multiline_quotes') and options.multiline_quotes is not None:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config.update(cls.MULTILINE_QUOTES[options.multiline_quotes])
# If docstring quotes was specified, overload our config with those options
if hasattr(options, 'docstring_quotes') and options.docstring_quotes is not None:
cls.config.update(cls.DOCSTRING_QUOTES[options.docstring_quotes])
# If avoid escaped specified, add to config
if hasattr(options, 'avoid_escape') and options.avoid_escape is not None:
cls.config.update({'avoid_escape': options.avoid_escape})
else:
cls.config.update({'avoid_escape': True})
def get_file_contents(self):
if self.filename in ('stdin', '-', None):
return stdin_get_value().splitlines(True)
else:
if self.lines:
return self.lines
else:
return readlines(self.filename)
def run(self):
file_contents = self.get_file_contents()
noqa_line_numbers = self.get_noqa_lines(file_contents)
errors = self.get_quotes_errors(file_contents)
for error in errors:
if error.get('line') not in noqa_line_numbers:
yield (error.get('line'), error.get('col'), error.get('message'), type(self))
def get_noqa_lines(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
return [token.start_row
for token in tokens
if token.type == tokenize.COMMENT and token.string.endswith('noqa')]
def get_quotes_errors(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
docstring_tokens = get_docstring_tokens(tokens)
for token in tokens:
if token.type != tokenize.STRING:
# ignore non strings
continue
# Remove any prefixes in strings like `u` from `u"foo"`
# DEV: `last_quote_char` is 1 character, even for multiline strings
# `"foo"` -> `"foo"`
# `b"foo"` -> `"foo"`
# `br"foo"` -> `"foo"`
# `b"""foo"""` -> `"""foo"""`
last_quote_char = token.string[-1]
first_quote_index = token.string.index(last_quote_char)
prefix = token.string[:first_quote_index].lower()
unprefixed_string = token.string[first_quote_index:]
# Determine if our string is multiline-based
# "foo"[0] * 3 = " * 3 = """
# "foo"[0:3] = "fo
# """foo"""[0:3] = """
is_docstring = token in docstring_tokens
is_multiline_string = unprefixed_string[0] * 3 == unprefixed_string[0:3]
start_row, start_col = token.start
# If our string is a docstring
# DEV: Docstring quotes must come before multiline quotes as it can as a multiline quote
if is_docstring:
if self.config['good_docstring'] in unprefixed_string:
continue
yield {
'message': 'Q002 ' + self.config['docstring_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise if our string is multiline
elif is_multiline_string:
# If our string is or containing a known good string, then ignore it
# (""")foo""" -> good (continue)
# '''foo(""")''' -> good (continue)
# (''')foo''' -> possibly bad
if self.config['good_multiline'] in unprefixed_string:
continue
# If our string ends with a known good ending, then ignore it
# '''foo("''') -> good (continue)
# Opposite, """foo"""", would break our parser (cannot handle """" ending)
if unprefixed_string.endswith(self.config['good_multiline_ending']):
continue
# Output our error
yield {
'message': 'Q001 ' + self.config['multiline_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise (string is inline quote)
else:
# 'This is a string' -> Good
# 'This is a "string"' -> Good
# 'This is a \"string\"' -> Good
# 'This is a \'string\'' -> Bad (Q003) Escaped inner quotes
# '"This" is a \'string\'' -> Good Changing outer quotes would not avoid escaping
# "This is a string" -> Bad (Q000)
# "This is a 'string'" -> Good Avoids escaped inner quotes
# "This is a \"string\"" -> Bad (Q000)
# "\"This\" is a 'string'" -> Good
string_contents = unprefixed_string[1:-1]
# If string preferred type, check for escapes
if last_quote_char == self.config['good_single']:
if not self.config['avoid_escape'] or 'r' in prefix:
continue
if (self.config['good_single'] in string_contents and
not self.config['bad_single'] in string_contents):
yield {
'message': 'Q003 Change outer quotes to avoid escaping inner quotes',
'line': start_row,
'col': start_col,
}
continue
# If not preferred type, only allow use to avoid escapes.
if not self.config['good_single'] in string_contents:
yield {
'message': 'Q000 ' + self.config['single_error_message'],
'line': start_row,
'col': start_col,
}
class Token:
"""Python 2 and 3 compatible token"""
def __init__(self, token):
self.token = token
@property
def type(self):
return self.token[0]
@property
def string(self):
return self.token[1]
@property
def start(self):
return self.token[2]
@property
def start_row(self):
return self.token[2][0]
@property
def start_col(self):
return self.token[2][1] | flake8_quotes/__init__.py | import optparse
import tokenize
import warnings
# Polyfill stdin loading/reading lines
# https://gitlab.com/pycqa/flake8-polyfill/blob/1.0.1/src/flake8_polyfill/stdin.py#L52-57
try:
from flake8.engine import pep8
stdin_get_value = pep8.stdin_get_value
readlines = pep8.readlines
except ImportError:
from flake8 import utils
import pycodestyle
stdin_get_value = utils.stdin_get_value
readlines = pycodestyle.readlines
from flake8_quotes.__about__ import __version__
from flake8_quotes.docstring_detection import get_docstring_tokens
class QuoteChecker(object):
name = __name__
version = __version__
INLINE_QUOTES = {
# When user wants only single quotes
"'": {
'good_single': "'",
'bad_single': '"',
'single_error_message': 'Double quotes found but single quotes preferred',
},
# When user wants only double quotes
'"': {
'good_single': '"',
'bad_single': "'",
'single_error_message': 'Single quotes found but double quotes preferred',
},
}
# Provide aliases for Windows CLI support
# https://github.com/zheller/flake8-quotes/issues/49
INLINE_QUOTES['single'] = INLINE_QUOTES["'"]
INLINE_QUOTES['double'] = INLINE_QUOTES['"']
MULTILINE_QUOTES = {
"'": {
'good_multiline': "'''",
'good_multiline_ending': '\'"""',
'bad_multiline': '"""',
'multiline_error_message': 'Double quote multiline found but single quotes preferred',
},
'"': {
'good_multiline': '"""',
'good_multiline_ending': '"\'\'\'',
'bad_multiline': "'''",
'multiline_error_message': 'Single quote multiline found but double quotes preferred',
},
}
# Provide Windows CLI and multi-quote aliases
MULTILINE_QUOTES['single'] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['double'] = MULTILINE_QUOTES['"']
MULTILINE_QUOTES["'''"] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['"""'] = MULTILINE_QUOTES['"']
DOCSTRING_QUOTES = {
"'": {
'good_docstring': "'''",
'bad_docstring': '"""',
'docstring_error_message': 'Double quote docstring found but single quotes preferred',
},
'"': {
'good_docstring': '"""',
'bad_docstring': "'''",
'docstring_error_message': 'Single quote docstring found but double quotes preferred',
},
}
# Provide Windows CLI and docstring-quote aliases
DOCSTRING_QUOTES['single'] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['double'] = DOCSTRING_QUOTES['"']
DOCSTRING_QUOTES["'''"] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['"""'] = DOCSTRING_QUOTES['"']
def __init__(self, tree, lines=None, filename='(none)'):
self.filename = filename
self.lines = lines
@staticmethod
def _register_opt(parser, *args, **kwargs):
"""
Handler to register an option for both Flake8 3.x and 2.x.
This is based on:
https://github.com/PyCQA/flake8/blob/3.0.0b2/docs/source/plugin-development/cross-compatibility.rst#option-handling-on-flake8-2-and-3
It only supports `parse_from_config` from the original function and it
uses the `Option` object returned to get the string.
"""
try:
# Flake8 3.x registration
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
# Flake8 2.x registration
parse_from_config = kwargs.pop('parse_from_config', False)
option = parser.add_option(*args, **kwargs)
if parse_from_config:
parser.config_options.append(option.get_opt_string().lstrip('-'))
@classmethod
def add_options(cls, parser):
cls._register_opt(parser, '--quotes', action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help='Deprecated alias for `--inline-quotes`')
cls._register_opt(parser, '--inline-quotes', default="'",
action='store', parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help="Quote to expect in all files (default: ')")
cls._register_opt(parser, '--multiline-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.MULTILINE_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--docstring-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.DOCSTRING_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--avoid-escape', default=None, action='store_true',
parse_from_config=True,
help='Avoiding escaping same quotes in inline strings (enabled by default)')
cls._register_opt(parser, '--no-avoid-escape', dest='avoid_escape', default=None, action='store_false',
parse_from_config=False,
help='Disable avoiding escaping same quotes in inline strings')
@classmethod
def parse_options(cls, options):
# Define our default config
# cls.config = {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config = {}
cls.config.update(cls.INLINE_QUOTES["'"])
cls.config.update(cls.MULTILINE_QUOTES['"""'])
cls.config.update(cls.DOCSTRING_QUOTES['"""'])
# If `options.quotes` was specified, then use it
if hasattr(options, 'quotes') and options.quotes is not None:
# https://docs.python.org/2/library/warnings.html#warnings.warn
warnings.warn('flake8-quotes has deprecated `quotes` in favor of `inline-quotes`. '
'Please update your configuration')
cls.config.update(cls.INLINE_QUOTES[options.quotes])
# Otherwise, use the supported `inline_quotes`
else:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ", good_multiline: """, bad_single: ', bad_multiline: '''}
cls.config.update(cls.INLINE_QUOTES[options.inline_quotes])
# If multiline quotes was specified, overload our config with those options
if hasattr(options, 'multiline_quotes') and options.multiline_quotes is not None:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config.update(cls.MULTILINE_QUOTES[options.multiline_quotes])
# If docstring quotes was specified, overload our config with those options
if hasattr(options, 'docstring_quotes') and options.docstring_quotes is not None:
cls.config.update(cls.DOCSTRING_QUOTES[options.docstring_quotes])
# If avoid escaped specified, add to config
if hasattr(options, 'avoid_escape') and options.avoid_escape is not None:
cls.config.update({'avoid_escape': options.avoid_escape})
else:
cls.config.update({'avoid_escape': True})
def get_file_contents(self):
if self.filename in ('stdin', '-', None):
return stdin_get_value().splitlines(True)
else:
if self.lines:
return self.lines
else:
return readlines(self.filename)
def run(self):
file_contents = self.get_file_contents()
noqa_line_numbers = self.get_noqa_lines(file_contents)
errors = self.get_quotes_errors(file_contents)
for error in errors:
if error.get('line') not in noqa_line_numbers:
yield (error.get('line'), error.get('col'), error.get('message'), type(self))
def get_noqa_lines(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
return [token.start_row
for token in tokens
if token.type == tokenize.COMMENT and token.string.endswith('noqa')]
def get_quotes_errors(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
docstring_tokens = get_docstring_tokens(tokens)
for token in tokens:
if token.type != tokenize.STRING:
# ignore non strings
continue
# Remove any prefixes in strings like `u` from `u"foo"`
# DEV: `last_quote_char` is 1 character, even for multiline strings
# `"foo"` -> `"foo"`
# `b"foo"` -> `"foo"`
# `br"foo"` -> `"foo"`
# `b"""foo"""` -> `"""foo"""`
last_quote_char = token.string[-1]
first_quote_index = token.string.index(last_quote_char)
prefix = token.string[:first_quote_index].lower()
unprefixed_string = token.string[first_quote_index:]
# Determine if our string is multiline-based
# "foo"[0] * 3 = " * 3 = """
# "foo"[0:3] = "fo
# """foo"""[0:3] = """
is_docstring = token in docstring_tokens
is_multiline_string = unprefixed_string[0] * 3 == unprefixed_string[0:3]
start_row, start_col = token.start
# If our string is a docstring
# DEV: Docstring quotes must come before multiline quotes as it can as a multiline quote
if is_docstring:
if self.config['good_docstring'] in unprefixed_string:
continue
yield {
'message': 'Q002 ' + self.config['docstring_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise if our string is multiline
elif is_multiline_string:
# If our string is or containing a known good string, then ignore it
# (""")foo""" -> good (continue)
# '''foo(""")''' -> good (continue)
# (''')foo''' -> possibly bad
if self.config['good_multiline'] in unprefixed_string:
continue
# If our string ends with a known good ending, then ignore it
# '''foo("''') -> good (continue)
# Opposite, """foo"""", would break our parser (cannot handle """" ending)
if unprefixed_string.endswith(self.config['good_multiline_ending']):
continue
# Output our error
yield {
'message': 'Q001 ' + self.config['multiline_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise (string is inline quote)
else:
# 'This is a string' -> Good
# 'This is a "string"' -> Good
# 'This is a \"string\"' -> Good
# 'This is a \'string\'' -> Bad (Q003) Escaped inner quotes
# '"This" is a \'string\'' -> Good Changing outer quotes would not avoid escaping
# "This is a string" -> Bad (Q000)
# "This is a 'string'" -> Good Avoids escaped inner quotes
# "This is a \"string\"" -> Bad (Q000)
# "\"This\" is a 'string'" -> Good
string_contents = unprefixed_string[1:-1]
# If string preferred type, check for escapes
if last_quote_char == self.config['good_single']:
if not self.config['avoid_escape'] or 'r' in prefix:
continue
if (self.config['good_single'] in string_contents and
not self.config['bad_single'] in string_contents):
yield {
'message': 'Q003 Change outer quotes to avoid escaping inner quotes',
'line': start_row,
'col': start_col,
}
continue
# If not preferred type, only allow use to avoid escapes.
if not self.config['good_single'] in string_contents:
yield {
'message': 'Q000 ' + self.config['single_error_message'],
'line': start_row,
'col': start_col,
}
class Token:
"""Python 2 and 3 compatible token"""
def __init__(self, token):
self.token = token
@property
def type(self):
return self.token[0]
@property
def string(self):
return self.token[1]
@property
def start(self):
return self.token[2]
@property
def start_row(self):
return self.token[2][0]
@property
def start_col(self):
return self.token[2][1] | 0.606848 | 0.15059 |
from __future__ import division, print_function
import os, os.path
import copy
import pickle
import numpy
from ..util import plot, conversion, config
from .Potential import PotentialError, flatten
from ..util.conversion import physical_conversion,\
potential_physical_input, physical_compatible
class linearPotential(object):
"""Class representing 1D potentials"""
def __init__(self,amp=1.,ro=None,vo=None):
self._amp= amp
self.dim= 1
self.isRZ= False
self.hasC= False
self.hasC_dxdv= False
self.hasC_dens= False
# Parse ro and vo
if ro is None:
self._ro= config.__config__.getfloat('normalization','ro')
self._roSet= False
else:
ro= conversion.parse_length_kpc(ro)
self._ro= ro
self._roSet= True
if vo is None:
self._vo= config.__config__.getfloat('normalization','vo')
self._voSet= False
else:
vo= conversion.parse_velocity_kms(vo)
self._vo= vo
self._voSet= True
return None
def __mul__(self,b):
"""
NAME:
__mul__
PURPOSE:
Multiply a linearPotential's amplitude by a number
INPUT:
b - number
OUTPUT:
New instance with amplitude = (old amplitude) x b
HISTORY:
2019-01-27 - Written - Bovy (UofT)
"""
if not isinstance(b,(int,float)):
raise TypeError("Can only multiply a planarPotential instance with a number")
out= copy.deepcopy(self)
out._amp*= b
return out
# Similar functions
__rmul__= __mul__
def __div__(self,b): return self.__mul__(1./b)
__truediv__= __div__
def __add__(self,b):
"""
NAME:
__add__
PURPOSE:
Add linearPotential instances together to create a multi-component potential (e.g., pot= pot1+pot2+pot3)
INPUT:
b - linearPotential instance or a list thereof
OUTPUT:
List of linearPotential instances that represents the combined potential
HISTORY:
2019-01-27 - Written - Bovy (UofT)
"""
from ..potential import flatten as flatten_pot
if not isinstance(flatten_pot([b])[0],linearPotential):
raise TypeError("""Can only combine galpy linearPotential"""
""" objects with """
"""other such objects or lists thereof""")
assert physical_compatible(self,b), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between potentials to be combined"""
if isinstance(b,list):
return [self]+b
else:
return [self,b]
# Define separately to keep order
def __radd__(self,b):
from ..potential import flatten as flatten_pot
if not isinstance(flatten_pot([b])[0],linearPotential):
raise TypeError("""Can only combine galpy linearPotential"""
""" objects with """
"""other such objects or lists thereof""")
assert physical_compatible(self,b), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between potentials to be combined"""
# If we get here, b has to be a list
return b+[self]
def turn_physical_off(self):
"""
NAME:
turn_physical_off
PURPOSE:
turn off automatic returning of outputs in physical units
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2016-01-30 - Written - Bovy (UofT)
"""
self._roSet= False
self._voSet= False
return None
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc; can be Quantity)
vo= reference velocity (km/s; can be Quantity)
OUTPUT:
(none)
HISTORY:
2016-01-30 - Written - Bovy (UofT)
2020-04-22 - Don't turn on a parameter when it is False - Bovy (UofT)
"""
if not ro is False: self._roSet= True
if not vo is False: self._voSet= True
if not ro is None and ro:
self._ro= conversion.parse_length_kpc(ro)
if not vo is None and vo:
self._vo= conversion.parse_velocity_kms(vo)
return None
@potential_physical_input
@physical_conversion('energy',pop=True)
def __call__(self,x,t=0.):
"""
NAME:
__call__
PURPOSE:
evaluate the potential
INPUT:
x - position (can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
Phi(x,t)
HISTORY:
2010-07-12 - Written - Bovy (NYU)
"""
return self._call_nodecorator(x,t=t)
def _call_nodecorator(self,x,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._evaluate(x,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_evaluate' function not implemented for this potential")
@potential_physical_input
@physical_conversion('force',pop=True)
def force(self,x,t=0.):
"""
NAME:
force
PURPOSE:
evaluate the force
INPUT:
x - position (can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
F(x,t)
HISTORY:
2010-07-12 - Written - Bovy (NYU)
"""
return self._force_nodecorator(x,t=t)
def _force_nodecorator(self,x,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._force(x,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_force' function not implemented for this potential")
def plot(self,t=0.,min=-15.,max=15,ns=21,savefilename=None):
"""
NAME:
plot
PURPOSE:
plot the potential
INPUT:
t - time to evaluate the potential at
min - minimum x
max - maximum x
ns - grid in x
savefilename - save to or restore from this savefile (pickle)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potx= pickle.load(savefile)
xs= pickle.load(savefile)
savefile.close()
else:
xs= numpy.linspace(min,max,ns)
potx= numpy.zeros(ns)
for ii in range(ns):
potx[ii]= self._evaluate(xs[ii],t=t)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potx,savefile)
pickle.dump(xs,savefile)
savefile.close()
return plot.plot(xs,potx,
xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$",
xrange=[min,max])
@potential_physical_input
@physical_conversion('energy',pop=True)
def evaluatelinearPotentials(Pot,x,t=0.):
"""
NAME:
evaluatelinearPotentials
PURPOSE:
evaluate the sum of a list of potentials
INPUT:
Pot - (list of) linearPotential instance(s)
x - evaluate potentials at this position (can be Quantity)
t - time to evaluate at (can be Quantity)
OUTPUT:
pot(x,t)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return _evaluatelinearPotentials(Pot,x,t=t)
def _evaluatelinearPotentials(Pot,x,t=0.):
"""Raw, undecorated function for internal use"""
if isinstance(Pot,list):
sum= 0.
for pot in Pot:
sum+= pot._call_nodecorator(x,t=t)
return sum
elif isinstance(Pot,linearPotential):
return Pot._call_nodecorator(x,t=t)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatelinearPotentials' is neither a linearPotential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('force',pop=True)
def evaluatelinearForces(Pot,x,t=0.):
"""
NAME:
evaluatelinearForces
PURPOSE:
evaluate the forces due to a list of potentials
INPUT:
Pot - (list of) linearPotential instance(s)
x - evaluate forces at this position (can be Quantity)
t - time to evaluate at (can be Quantity)
OUTPUT:
force(x,t)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return _evaluatelinearForces(Pot,x,t=t)
def _evaluatelinearForces(Pot,x,t=0.):
"""Raw, undecorated function for internal use"""
if isinstance(Pot,list):
sum= 0.
for pot in Pot:
sum+= pot._force_nodecorator(x,t=t)
return sum
elif isinstance(Pot,linearPotential):
return Pot._force_nodecorator(x,t=t)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateForces' is neither a linearPotential-instance or a list of such instances")
def plotlinearPotentials(Pot,t=0.,min=-15.,max=15,ns=21,savefilename=None):
"""
NAME:
plotlinearPotentials
PURPOSE:
plot a combination of potentials
INPUT:
t - time to evaluate potential at
min - minimum x
max - maximum x
ns - grid in x
savefilename - save to or restore from this savefile (pickle)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
Pot= flatten(Pot)
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potx= pickle.load(savefile)
xs= pickle.load(savefile)
savefile.close()
else:
xs= numpy.linspace(min,max,ns)
potx= numpy.zeros(ns)
for ii in range(ns):
potx[ii]= evaluatelinearPotentials(Pot,xs[ii],t=t)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potx,savefile)
pickle.dump(xs,savefile)
savefile.close()
return plot.plot(xs,potx,
xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$",
xrange=[min,max]) | galpy/potential/linearPotential.py | from __future__ import division, print_function
import os, os.path
import copy
import pickle
import numpy
from ..util import plot, conversion, config
from .Potential import PotentialError, flatten
from ..util.conversion import physical_conversion,\
potential_physical_input, physical_compatible
class linearPotential(object):
"""Class representing 1D potentials"""
def __init__(self,amp=1.,ro=None,vo=None):
self._amp= amp
self.dim= 1
self.isRZ= False
self.hasC= False
self.hasC_dxdv= False
self.hasC_dens= False
# Parse ro and vo
if ro is None:
self._ro= config.__config__.getfloat('normalization','ro')
self._roSet= False
else:
ro= conversion.parse_length_kpc(ro)
self._ro= ro
self._roSet= True
if vo is None:
self._vo= config.__config__.getfloat('normalization','vo')
self._voSet= False
else:
vo= conversion.parse_velocity_kms(vo)
self._vo= vo
self._voSet= True
return None
def __mul__(self,b):
"""
NAME:
__mul__
PURPOSE:
Multiply a linearPotential's amplitude by a number
INPUT:
b - number
OUTPUT:
New instance with amplitude = (old amplitude) x b
HISTORY:
2019-01-27 - Written - Bovy (UofT)
"""
if not isinstance(b,(int,float)):
raise TypeError("Can only multiply a planarPotential instance with a number")
out= copy.deepcopy(self)
out._amp*= b
return out
# Similar functions
__rmul__= __mul__
def __div__(self,b): return self.__mul__(1./b)
__truediv__= __div__
def __add__(self,b):
"""
NAME:
__add__
PURPOSE:
Add linearPotential instances together to create a multi-component potential (e.g., pot= pot1+pot2+pot3)
INPUT:
b - linearPotential instance or a list thereof
OUTPUT:
List of linearPotential instances that represents the combined potential
HISTORY:
2019-01-27 - Written - Bovy (UofT)
"""
from ..potential import flatten as flatten_pot
if not isinstance(flatten_pot([b])[0],linearPotential):
raise TypeError("""Can only combine galpy linearPotential"""
""" objects with """
"""other such objects or lists thereof""")
assert physical_compatible(self,b), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between potentials to be combined"""
if isinstance(b,list):
return [self]+b
else:
return [self,b]
# Define separately to keep order
def __radd__(self,b):
from ..potential import flatten as flatten_pot
if not isinstance(flatten_pot([b])[0],linearPotential):
raise TypeError("""Can only combine galpy linearPotential"""
""" objects with """
"""other such objects or lists thereof""")
assert physical_compatible(self,b), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between potentials to be combined"""
# If we get here, b has to be a list
return b+[self]
def turn_physical_off(self):
"""
NAME:
turn_physical_off
PURPOSE:
turn off automatic returning of outputs in physical units
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2016-01-30 - Written - Bovy (UofT)
"""
self._roSet= False
self._voSet= False
return None
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc; can be Quantity)
vo= reference velocity (km/s; can be Quantity)
OUTPUT:
(none)
HISTORY:
2016-01-30 - Written - Bovy (UofT)
2020-04-22 - Don't turn on a parameter when it is False - Bovy (UofT)
"""
if not ro is False: self._roSet= True
if not vo is False: self._voSet= True
if not ro is None and ro:
self._ro= conversion.parse_length_kpc(ro)
if not vo is None and vo:
self._vo= conversion.parse_velocity_kms(vo)
return None
@potential_physical_input
@physical_conversion('energy',pop=True)
def __call__(self,x,t=0.):
"""
NAME:
__call__
PURPOSE:
evaluate the potential
INPUT:
x - position (can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
Phi(x,t)
HISTORY:
2010-07-12 - Written - Bovy (NYU)
"""
return self._call_nodecorator(x,t=t)
def _call_nodecorator(self,x,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._evaluate(x,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_evaluate' function not implemented for this potential")
@potential_physical_input
@physical_conversion('force',pop=True)
def force(self,x,t=0.):
"""
NAME:
force
PURPOSE:
evaluate the force
INPUT:
x - position (can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
F(x,t)
HISTORY:
2010-07-12 - Written - Bovy (NYU)
"""
return self._force_nodecorator(x,t=t)
def _force_nodecorator(self,x,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._force(x,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_force' function not implemented for this potential")
def plot(self,t=0.,min=-15.,max=15,ns=21,savefilename=None):
"""
NAME:
plot
PURPOSE:
plot the potential
INPUT:
t - time to evaluate the potential at
min - minimum x
max - maximum x
ns - grid in x
savefilename - save to or restore from this savefile (pickle)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potx= pickle.load(savefile)
xs= pickle.load(savefile)
savefile.close()
else:
xs= numpy.linspace(min,max,ns)
potx= numpy.zeros(ns)
for ii in range(ns):
potx[ii]= self._evaluate(xs[ii],t=t)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potx,savefile)
pickle.dump(xs,savefile)
savefile.close()
return plot.plot(xs,potx,
xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$",
xrange=[min,max])
@potential_physical_input
@physical_conversion('energy',pop=True)
def evaluatelinearPotentials(Pot,x,t=0.):
"""
NAME:
evaluatelinearPotentials
PURPOSE:
evaluate the sum of a list of potentials
INPUT:
Pot - (list of) linearPotential instance(s)
x - evaluate potentials at this position (can be Quantity)
t - time to evaluate at (can be Quantity)
OUTPUT:
pot(x,t)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return _evaluatelinearPotentials(Pot,x,t=t)
def _evaluatelinearPotentials(Pot,x,t=0.):
"""Raw, undecorated function for internal use"""
if isinstance(Pot,list):
sum= 0.
for pot in Pot:
sum+= pot._call_nodecorator(x,t=t)
return sum
elif isinstance(Pot,linearPotential):
return Pot._call_nodecorator(x,t=t)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatelinearPotentials' is neither a linearPotential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('force',pop=True)
def evaluatelinearForces(Pot,x,t=0.):
"""
NAME:
evaluatelinearForces
PURPOSE:
evaluate the forces due to a list of potentials
INPUT:
Pot - (list of) linearPotential instance(s)
x - evaluate forces at this position (can be Quantity)
t - time to evaluate at (can be Quantity)
OUTPUT:
force(x,t)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return _evaluatelinearForces(Pot,x,t=t)
def _evaluatelinearForces(Pot,x,t=0.):
"""Raw, undecorated function for internal use"""
if isinstance(Pot,list):
sum= 0.
for pot in Pot:
sum+= pot._force_nodecorator(x,t=t)
return sum
elif isinstance(Pot,linearPotential):
return Pot._force_nodecorator(x,t=t)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateForces' is neither a linearPotential-instance or a list of such instances")
def plotlinearPotentials(Pot,t=0.,min=-15.,max=15,ns=21,savefilename=None):
"""
NAME:
plotlinearPotentials
PURPOSE:
plot a combination of potentials
INPUT:
t - time to evaluate potential at
min - minimum x
max - maximum x
ns - grid in x
savefilename - save to or restore from this savefile (pickle)
OUTPUT:
plot to output device
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
Pot= flatten(Pot)
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potx= pickle.load(savefile)
xs= pickle.load(savefile)
savefile.close()
else:
xs= numpy.linspace(min,max,ns)
potx= numpy.zeros(ns)
for ii in range(ns):
potx[ii]= evaluatelinearPotentials(Pot,xs[ii],t=t)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potx,savefile)
pickle.dump(xs,savefile)
savefile.close()
return plot.plot(xs,potx,
xlabel=r"$x/x_0$",ylabel=r"$\Phi(x)$",
xrange=[min,max]) | 0.675978 | 0.265617 |
import re
from html import unescape
from typing import Dict, List, Union
class CategoryParser():
def __init__(self, html: str):
super().__init__()
self._html = html
self._lines = html.split("\n")
@property
def text(self):
return self._html
def name(self) -> str:
if len(self._lines) == 0:
return None
m = re.search(r"<B>(.*?)</B><br>", self._lines[0])
if m is None:
return None
return m.group(1).strip()
def boards(self) -> List[Dict[str, str]]:
"""
Returns
-------
{'name', 'server', 'board'}
"""
board_reg = re.compile(r"<A HREF=https://(.*?).5ch.net/(.*?)/>(.*?)</A>")
boards = []
for line in self._lines[1:]:
m = board_reg.search(line)
if m is None:
continue
server = m.group(1).strip()
board = m.group(2).strip()
name = m.group(3).strip()
boards.append({"name": name, "server": server, "board": board})
return boards
class BbsmenuParser:
def __init__(self, html):
super().__init__()
self._html = html
@property
def text(self):
return self._html
def categories(self) -> List[Dict[str, Union[str, Dict[str, str]]]]:
"""
Returns
-------
[{'name': str, 'boards': {'server', 'board', 'name'}}]
"""
cats = self._html.split("<br><br>")
categories = []
for cat in cats:
cat_parser = CategoryParser(cat)
name = cat_parser.name()
if name is not None and name != "他のサイト":
categories.append({"name": name, "boards": cat_parser.boards()})
return categories
class BoardParser:
def __init__(self, subject: str):
super().__init__()
self._subject = subject
@property
def text(self):
return self._subject
def threads(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{'key', 'title', 'count', 'speed'}]
"""
txt = unescape(self._subject)
threads = []
for (i, line) in enumerate(txt.split("\n"), 1):
m = re.search(r"^(\d{10})\.dat<>(.*)\((\d{1,})\)$", line)
if m is not None:
key = m.group(1).strip()
title = m.group(2).strip()
count = int(m.group(3).strip())
threads.append(
{"key": key, "title": title, "count": count}
)
return threads
class ThreadParserH:
def __init__(self, html: str):
super().__init__()
self._html = html
@property
def text(self):
return self._html
def title(self) -> str:
return unescape(re.search("<title>(.*?)\n</title>", self._html).group(1)).strip()
def is_pastlog(self) -> bool:
return re.search('<div class="stoplight stopred stopdone', self._html) is None
def responses(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{number, mail, name, date, id, message}]
"""
re_res = re.compile(
r'<div class="post" id="(?P<num>\d+)".*?"name"><b>(<a href="mailto:(?P<mail>.*?)">)?(?P<name>.*?)(</a>)?'
r'</b></span>.*?"date">(?P<date>.*?)<.*?"uid">(?P<id>.*?)<.*?(<span.*?>)+? (?P<msg>.*?) (</span>)+?</div>'
r'</div><br>'
)
# re_link = re.compile(r'<a href="http.*?>(.*?)</a>|<a class="image".*?>(.*?)</a>')
# re_anchor = re.compile(r'<a href.*?class="reply_link">(.*?)</a>')
br = re.compile(r' ?<br> ')
tag = re.compile(r'<.*?>')
responses = []
for res in re_res.finditer(self._html):
number = int(res.group("num"))
mail = res.group("mail")
name = tag.sub("", res.group("name"))
date = res.group("date")
id = res.group("id")
msg = res.group("msg")
msg = br.sub("\n", msg)
msg = tag.sub("", msg)
msg = unescape(msg)
responses.append({
"number": number, "name": name, "mail": mail, "date": date, "id": id, "message": msg
})
return responses
class ThreadParserD:
def __init__(self, dat: str):
super().__init__()
self._dat = dat
self._lines = dat.split("\n")
@property
def text(self):
return self._dat
def title(self) -> str:
return unescape(re.search(r".*<>(.*?)$", self._lines[0]).group(1))
def is_pastlog(self) -> bool:
if len(self._lines) == 2:
r = self.responses()
if r[1]["name"] == "5ちゃんねる ★"\
and r[1]["message"].startswith("このスレッドは過去ログです"):
return True
return False
def responses(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{number, mail, name, date, id, message}]
"""
re_res = re.compile(r"(.*?)<>(.*?)<>(.*? .*?) (.*?)<> (.*?) <>.*")
re_b = re.compile(r"</?b>")
# re_img = re.compile(r'<a class="image".*?>(.*?)</a>')
# re_link = re.compile(r'(http.*?)(?: |$)')
br = re.compile(r'< ?<br> >')
tag = re.compile(r'<.*?>')
responses = []
for i, l in enumerate(self._lines, 1):
m = re_res.search(l)
if m is None:
continue
name = re_b.sub("", m.group(1))
mail = m.group(2)
date = m.group(3)
id = m.group(4)
msg = m.group(5)
msg = br.sub("\n", msg)
msg = tag.sub("", msg)
msg = unescape(msg)
responses.append({
"number": i, "name": name, "mail": mail, "date": date, "id": id, "message": msg
})
return responses | gochan/parser.py | import re
from html import unescape
from typing import Dict, List, Union
class CategoryParser():
def __init__(self, html: str):
super().__init__()
self._html = html
self._lines = html.split("\n")
@property
def text(self):
return self._html
def name(self) -> str:
if len(self._lines) == 0:
return None
m = re.search(r"<B>(.*?)</B><br>", self._lines[0])
if m is None:
return None
return m.group(1).strip()
def boards(self) -> List[Dict[str, str]]:
"""
Returns
-------
{'name', 'server', 'board'}
"""
board_reg = re.compile(r"<A HREF=https://(.*?).5ch.net/(.*?)/>(.*?)</A>")
boards = []
for line in self._lines[1:]:
m = board_reg.search(line)
if m is None:
continue
server = m.group(1).strip()
board = m.group(2).strip()
name = m.group(3).strip()
boards.append({"name": name, "server": server, "board": board})
return boards
class BbsmenuParser:
def __init__(self, html):
super().__init__()
self._html = html
@property
def text(self):
return self._html
def categories(self) -> List[Dict[str, Union[str, Dict[str, str]]]]:
"""
Returns
-------
[{'name': str, 'boards': {'server', 'board', 'name'}}]
"""
cats = self._html.split("<br><br>")
categories = []
for cat in cats:
cat_parser = CategoryParser(cat)
name = cat_parser.name()
if name is not None and name != "他のサイト":
categories.append({"name": name, "boards": cat_parser.boards()})
return categories
class BoardParser:
def __init__(self, subject: str):
super().__init__()
self._subject = subject
@property
def text(self):
return self._subject
def threads(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{'key', 'title', 'count', 'speed'}]
"""
txt = unescape(self._subject)
threads = []
for (i, line) in enumerate(txt.split("\n"), 1):
m = re.search(r"^(\d{10})\.dat<>(.*)\((\d{1,})\)$", line)
if m is not None:
key = m.group(1).strip()
title = m.group(2).strip()
count = int(m.group(3).strip())
threads.append(
{"key": key, "title": title, "count": count}
)
return threads
class ThreadParserH:
def __init__(self, html: str):
super().__init__()
self._html = html
@property
def text(self):
return self._html
def title(self) -> str:
return unescape(re.search("<title>(.*?)\n</title>", self._html).group(1)).strip()
def is_pastlog(self) -> bool:
return re.search('<div class="stoplight stopred stopdone', self._html) is None
def responses(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{number, mail, name, date, id, message}]
"""
re_res = re.compile(
r'<div class="post" id="(?P<num>\d+)".*?"name"><b>(<a href="mailto:(?P<mail>.*?)">)?(?P<name>.*?)(</a>)?'
r'</b></span>.*?"date">(?P<date>.*?)<.*?"uid">(?P<id>.*?)<.*?(<span.*?>)+? (?P<msg>.*?) (</span>)+?</div>'
r'</div><br>'
)
# re_link = re.compile(r'<a href="http.*?>(.*?)</a>|<a class="image".*?>(.*?)</a>')
# re_anchor = re.compile(r'<a href.*?class="reply_link">(.*?)</a>')
br = re.compile(r' ?<br> ')
tag = re.compile(r'<.*?>')
responses = []
for res in re_res.finditer(self._html):
number = int(res.group("num"))
mail = res.group("mail")
name = tag.sub("", res.group("name"))
date = res.group("date")
id = res.group("id")
msg = res.group("msg")
msg = br.sub("\n", msg)
msg = tag.sub("", msg)
msg = unescape(msg)
responses.append({
"number": number, "name": name, "mail": mail, "date": date, "id": id, "message": msg
})
return responses
class ThreadParserD:
def __init__(self, dat: str):
super().__init__()
self._dat = dat
self._lines = dat.split("\n")
@property
def text(self):
return self._dat
def title(self) -> str:
return unescape(re.search(r".*<>(.*?)$", self._lines[0]).group(1))
def is_pastlog(self) -> bool:
if len(self._lines) == 2:
r = self.responses()
if r[1]["name"] == "5ちゃんねる ★"\
and r[1]["message"].startswith("このスレッドは過去ログです"):
return True
return False
def responses(self) -> List[Dict[str, Union[str, int]]]:
"""
Returns
-------
[{number, mail, name, date, id, message}]
"""
re_res = re.compile(r"(.*?)<>(.*?)<>(.*? .*?) (.*?)<> (.*?) <>.*")
re_b = re.compile(r"</?b>")
# re_img = re.compile(r'<a class="image".*?>(.*?)</a>')
# re_link = re.compile(r'(http.*?)(?: |$)')
br = re.compile(r'< ?<br> >')
tag = re.compile(r'<.*?>')
responses = []
for i, l in enumerate(self._lines, 1):
m = re_res.search(l)
if m is None:
continue
name = re_b.sub("", m.group(1))
mail = m.group(2)
date = m.group(3)
id = m.group(4)
msg = m.group(5)
msg = br.sub("\n", msg)
msg = tag.sub("", msg)
msg = unescape(msg)
responses.append({
"number": i, "name": name, "mail": mail, "date": date, "id": id, "message": msg
})
return responses | 0.724091 | 0.269136 |
# Create a parent logger for all PUDL loggers to inherit from
import logging
import pkg_resources
import pudl.analysis.allocate_net_gen
import pudl.analysis.mcoe
import pudl.analysis.service_territory
import pudl.analysis.spatial
import pudl.analysis.state_demand
import pudl.analysis.timeseries_cleaning
import pudl.cli
import pudl.constants
import pudl.convert.censusdp1tract_to_sqlite
import pudl.convert.epacems_to_parquet
import pudl.convert.ferc1_to_sqlite
import pudl.convert.metadata_to_rst
import pudl.etl
import pudl.extract.eia860
import pudl.extract.eia860m
import pudl.extract.eia861
import pudl.extract.eia923
import pudl.extract.epacems
import pudl.extract.excel
import pudl.extract.ferc1
import pudl.extract.ferc714
import pudl.glue.eia_epacems
import pudl.glue.ferc1_eia
import pudl.helpers
import pudl.load.parquet
import pudl.load.sqlite
import pudl.metadata.classes
import pudl.metadata.constants
import pudl.metadata.fields
import pudl.metadata.helpers
import pudl.metadata.labels
import pudl.metadata.resources
# Output modules by data source:
import pudl.output.censusdp1tract
import pudl.output.eia860
import pudl.output.eia923
import pudl.output.epacems
import pudl.output.ferc1
import pudl.output.ferc714
import pudl.output.pudltabl
# Transformation functions, organized by data source:
import pudl.transform.eia
import pudl.transform.eia860
import pudl.transform.eia861
import pudl.transform.eia923
import pudl.transform.epacems
import pudl.transform.ferc1
import pudl.transform.ferc714
# Data validation tools and test cases:
import pudl.validate
# Deployed data & workspace management
import pudl.workspace.datastore
import pudl.workspace.setup # noqa: F401 WTF is this showing up as unused?
__author__ = "Catalyst Cooperative"
__contact__ = "<EMAIL>"
__maintainer__ = "Catalyst Cooperative"
__license__ = "MIT License"
__maintainer_email__ = "<EMAIL>"
__version__ = pkg_resources.get_distribution("catalystcoop.pudl").version
__docformat__ = "restructuredtext en"
__description__ = "Tools for liberating public US electric utility data."
__long_description__ = """
This Public Utility Data Liberation (PUDL) project is a collection of tools
that allow programmatic access to and manipulation of many public data sets
related to electric utilities in the United States. These data sets are
often collected by state and federal agencies, but are publicized in ways
that are not well standardized, or intended for interoperability. PUDL
seeks to allow more transparent and useful access to this important public
data, with the goal of enabling climate advocates, academic researchers, and
data journalists to better understand the electricity system and its impacts
on climate.
"""
__projecturl__ = "https://catalyst.coop/pudl/"
__downloadurl__ = "https://github.com/catalyst-cooperative/pudl/"
logging.getLogger(__name__).addHandler(logging.NullHandler()) | src/pudl/__init__.py |
# Create a parent logger for all PUDL loggers to inherit from
import logging
import pkg_resources
import pudl.analysis.allocate_net_gen
import pudl.analysis.mcoe
import pudl.analysis.service_territory
import pudl.analysis.spatial
import pudl.analysis.state_demand
import pudl.analysis.timeseries_cleaning
import pudl.cli
import pudl.constants
import pudl.convert.censusdp1tract_to_sqlite
import pudl.convert.epacems_to_parquet
import pudl.convert.ferc1_to_sqlite
import pudl.convert.metadata_to_rst
import pudl.etl
import pudl.extract.eia860
import pudl.extract.eia860m
import pudl.extract.eia861
import pudl.extract.eia923
import pudl.extract.epacems
import pudl.extract.excel
import pudl.extract.ferc1
import pudl.extract.ferc714
import pudl.glue.eia_epacems
import pudl.glue.ferc1_eia
import pudl.helpers
import pudl.load.parquet
import pudl.load.sqlite
import pudl.metadata.classes
import pudl.metadata.constants
import pudl.metadata.fields
import pudl.metadata.helpers
import pudl.metadata.labels
import pudl.metadata.resources
# Output modules by data source:
import pudl.output.censusdp1tract
import pudl.output.eia860
import pudl.output.eia923
import pudl.output.epacems
import pudl.output.ferc1
import pudl.output.ferc714
import pudl.output.pudltabl
# Transformation functions, organized by data source:
import pudl.transform.eia
import pudl.transform.eia860
import pudl.transform.eia861
import pudl.transform.eia923
import pudl.transform.epacems
import pudl.transform.ferc1
import pudl.transform.ferc714
# Data validation tools and test cases:
import pudl.validate
# Deployed data & workspace management
import pudl.workspace.datastore
import pudl.workspace.setup # noqa: F401 WTF is this showing up as unused?
__author__ = "Catalyst Cooperative"
__contact__ = "<EMAIL>"
__maintainer__ = "Catalyst Cooperative"
__license__ = "MIT License"
__maintainer_email__ = "<EMAIL>"
__version__ = pkg_resources.get_distribution("catalystcoop.pudl").version
__docformat__ = "restructuredtext en"
__description__ = "Tools for liberating public US electric utility data."
__long_description__ = """
This Public Utility Data Liberation (PUDL) project is a collection of tools
that allow programmatic access to and manipulation of many public data sets
related to electric utilities in the United States. These data sets are
often collected by state and federal agencies, but are publicized in ways
that are not well standardized, or intended for interoperability. PUDL
seeks to allow more transparent and useful access to this important public
data, with the goal of enabling climate advocates, academic researchers, and
data journalists to better understand the electricity system and its impacts
on climate.
"""
__projecturl__ = "https://catalyst.coop/pudl/"
__downloadurl__ = "https://github.com/catalyst-cooperative/pudl/"
logging.getLogger(__name__).addHandler(logging.NullHandler()) | 0.339937 | 0.267301 |
import re
from livecli.plugin import Plugin
from livecli.plugin.api import http, validate
from livecli.stream import HLSStream
from livecli.exceptions import PluginError
__livecli_docs__ = {
"domains": [
"ceskatelevize.cz",
],
"geo_blocked": [
"CZ",
],
"notes": "",
"live": True,
"vod": True,
"last_update": "2017-02-02",
}
_url_re = re.compile(
r'http(s)?://([^.]*.)?ceskatelevize.cz'
)
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
"url": validate.any(
validate.url(),
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
})
def _find_playlist_info(response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = _playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
def _find_player_url(response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = _player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = _hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class Ceskatelevize(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
# fetch requested url and find playlist info
response = http.get(self.url)
info = _find_playlist_info(response)
if not info:
# playlist info not found, let's try to find player url
player_url = _find_player_url(response)
if not player_url:
raise PluginError('Cannot find playlist info or player url!')
# get player url and try to find playlist info in it
response = http.get(player_url)
info = _find_playlist_info(response)
if not info:
raise PluginError('Cannot find playlist info in the player url!')
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayerCT24.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = http.post(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=data,
headers=headers
)
json_data = http.json(response, schema=_playlist_url_schema)
if json_data['url'] == "error_region":
self.logger.error("This stream is not available in your territory")
return
# fetch playlist
response = http.post(json_data['url'])
json_data = http.json(response, schema=_playlist_schema)
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
__plugin__ = Ceskatelevize | src/livecli/plugins/ceskatelevize.py | import re
from livecli.plugin import Plugin
from livecli.plugin.api import http, validate
from livecli.stream import HLSStream
from livecli.exceptions import PluginError
__livecli_docs__ = {
"domains": [
"ceskatelevize.cz",
],
"geo_blocked": [
"CZ",
],
"notes": "",
"live": True,
"vod": True,
"last_update": "2017-02-02",
}
_url_re = re.compile(
r'http(s)?://([^.]*.)?ceskatelevize.cz'
)
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
"url": validate.any(
validate.url(),
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
})
def _find_playlist_info(response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = _playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
def _find_player_url(response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = _player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = _hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class Ceskatelevize(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
# fetch requested url and find playlist info
response = http.get(self.url)
info = _find_playlist_info(response)
if not info:
# playlist info not found, let's try to find player url
player_url = _find_player_url(response)
if not player_url:
raise PluginError('Cannot find playlist info or player url!')
# get player url and try to find playlist info in it
response = http.get(player_url)
info = _find_playlist_info(response)
if not info:
raise PluginError('Cannot find playlist info in the player url!')
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayerCT24.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = http.post(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=data,
headers=headers
)
json_data = http.json(response, schema=_playlist_url_schema)
if json_data['url'] == "error_region":
self.logger.error("This stream is not available in your territory")
return
# fetch playlist
response = http.post(json_data['url'])
json_data = http.json(response, schema=_playlist_schema)
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
__plugin__ = Ceskatelevize | 0.402157 | 0.174094 |
# Copyright (c) 2018 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Pylint over any module"""
import argparse
import os
import shutil
from pathlib import Path
from pylint import epylint as lint
def run_pylint(modulepath, pylint_options):
"""Runs Pylint. Returns a boolean indicating success"""
pylint_stats = Path('/run/user/{}/pylint_stats'.format(os.getuid()))
if not pylint_stats.parent.is_dir(): #pylint: disable=no-member
pylint_stats = Path('/run/shm/pylint_stats')
os.environ['PYLINTHOME'] = str(pylint_stats)
result = lint.lint(
filename=str(modulepath),
options=pylint_options,
)
if pylint_stats.is_dir():
shutil.rmtree(str(pylint_stats))
if result != 0:
print('WARNING: {}() returned non-zero result: {}'.format(
'.'.join((lint.lint.__module__, lint.lint.__name__)), result))
return False
return True
def main():
"""CLI entrypoint"""
parser = argparse.ArgumentParser(description='Run Pylint over an arbitrary module')
parser.add_argument(
'--hide-fixme', action='store_true',
help='Hide "fixme" Pylint warnings.')
parser.add_argument(
'--show-locally-disabled', action='store_true',
help='Show "locally-disabled" Pylint warnings.')
parser.add_argument(
'modulepath', type=Path,
help='Path to the module to check')
args = parser.parse_args()
if not args.modulepath.exists():
print('ERROR: Module path "{}" does not exist'.format(args.modulepath))
exit(1)
disables = [
'wrong-import-position',
]
if args.hide_fixme:
disables.append('fixme')
if not args.show_locally_disabled:
disables.append('locally-disabled')
pylint_options = [
'--disable={}'.format(','.join(disables)),
'--jobs=4',
]
if not run_pylint(args.modulepath, pylint_options):
exit(1)
exit(0)
if __name__ == '__main__':
main() | developer_utilities/pylint_devutils.py |
# Copyright (c) 2018 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Pylint over any module"""
import argparse
import os
import shutil
from pathlib import Path
from pylint import epylint as lint
def run_pylint(modulepath, pylint_options):
"""Runs Pylint. Returns a boolean indicating success"""
pylint_stats = Path('/run/user/{}/pylint_stats'.format(os.getuid()))
if not pylint_stats.parent.is_dir(): #pylint: disable=no-member
pylint_stats = Path('/run/shm/pylint_stats')
os.environ['PYLINTHOME'] = str(pylint_stats)
result = lint.lint(
filename=str(modulepath),
options=pylint_options,
)
if pylint_stats.is_dir():
shutil.rmtree(str(pylint_stats))
if result != 0:
print('WARNING: {}() returned non-zero result: {}'.format(
'.'.join((lint.lint.__module__, lint.lint.__name__)), result))
return False
return True
def main():
"""CLI entrypoint"""
parser = argparse.ArgumentParser(description='Run Pylint over an arbitrary module')
parser.add_argument(
'--hide-fixme', action='store_true',
help='Hide "fixme" Pylint warnings.')
parser.add_argument(
'--show-locally-disabled', action='store_true',
help='Show "locally-disabled" Pylint warnings.')
parser.add_argument(
'modulepath', type=Path,
help='Path to the module to check')
args = parser.parse_args()
if not args.modulepath.exists():
print('ERROR: Module path "{}" does not exist'.format(args.modulepath))
exit(1)
disables = [
'wrong-import-position',
]
if args.hide_fixme:
disables.append('fixme')
if not args.show_locally_disabled:
disables.append('locally-disabled')
pylint_options = [
'--disable={}'.format(','.join(disables)),
'--jobs=4',
]
if not run_pylint(args.modulepath, pylint_options):
exit(1)
exit(0)
if __name__ == '__main__':
main() | 0.681197 | 0.089973 |
from smartmin.views import SmartCreateView, SmartCRUDL, SmartListView, SmartUpdateView
from django import forms
from django.core.validators import validate_image_file_extension
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from dash.categories.fields import CategoryChoiceField
from dash.orgs.views import OrgObjPermsMixin, OrgPermsMixin
from .models import Category, Story, StoryImage
class StoryForm(forms.ModelForm):
category = CategoryChoiceField(Category.objects.none())
def __init__(self, *args, **kwargs):
self.org = kwargs["org"]
del kwargs["org"]
super(StoryForm, self).__init__(*args, **kwargs)
# We show all categories even inactive one in the dropdown
qs = Category.objects.filter(org=self.org).order_by("name")
self.fields["category"].queryset = qs
class Meta:
model = Story
fields = (
"is_active",
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
class StoryCRUDL(SmartCRUDL):
model = Story
actions = ("create", "update", "list", "images")
class Update(OrgObjPermsMixin, SmartUpdateView):
form_class = StoryForm
fields = (
"is_active",
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
def pre_save(self, obj):
obj = super(StoryCRUDL.Update, self).pre_save(obj)
obj.audio_link = Story.format_audio_link(obj.audio_link)
obj.tags = Story.space_tags(obj.tags)
return obj
def get_form_kwargs(self):
kwargs = super(StoryCRUDL.Update, self).get_form_kwargs()
kwargs["org"] = self.request.org
return kwargs
class List(OrgPermsMixin, SmartListView):
fields = ("title", "images", "featured", "has_report", "created_on")
search_fields = ("title__icontains",)
link_fields = ("title", "images")
default_order = ("-created_on",)
ordering = ("-created_on",)
def get_has_report(self, obj):
if obj.attachment:
return _("Yes")
return _("No")
def get_featured(self, obj):
if obj.featured:
return _("Yes")
return _("No")
def lookup_field_link(self, context, field, obj):
if field == "images":
return reverse("stories.story_images", args=[obj.pk])
else:
return super(StoryCRUDL.List, self).lookup_field_link(context, field, obj)
def get_images(self, obj):
return obj.images.count()
def get_queryset(self, **kwargs):
queryset = super(StoryCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.filter(org=self.derive_org())
return queryset
class Images(OrgObjPermsMixin, SmartUpdateView):
success_url = "@stories.story_list"
title = _("Story Images")
def get_form(self):
form = super(StoryCRUDL.Images, self).get_form()
form.fields.clear()
idx = 1
# add existing images
for image in self.object.images.all().order_by("pk"):
image_field_name = "image_%d" % idx
image_field = forms.ImageField(
required=False,
initial=image.image,
label=_("Image %d") % idx,
help_text=_("Image to display on story page and in previews. (optional)"),
validators=[validate_image_file_extension],
)
self.form.fields[image_field_name] = image_field
idx += 1
while idx <= 3:
self.form.fields["image_%d" % idx] = forms.ImageField(
required=False,
label=_("Image %d") % idx,
help_text=_("Image to display on story page and in previews (optional)"),
validators=[validate_image_file_extension],
)
idx += 1
return form
def post_save(self, obj):
obj = super(StoryCRUDL.Images, self).post_save(obj)
# remove our existing images
self.object.images.all().delete()
# overwrite our new ones
# TODO: this could probably be done more elegantly
for idx in range(1, 4):
image = self.form.cleaned_data.get("image_%d" % idx, None)
if image:
StoryImage.objects.create(
story=self.object, image=image, created_by=self.request.user, modified_by=self.request.user
)
return obj
class Create(OrgPermsMixin, SmartCreateView):
form_class = StoryForm
success_url = "id@stories.story_images"
fields = (
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
def pre_save(self, obj):
obj = super(StoryCRUDL.Create, self).pre_save(obj)
obj.org = self.request.org
obj.audio_link = Story.format_audio_link(obj.audio_link)
obj.tags = Story.space_tags(obj.tags)
return obj
def get_form_kwargs(self):
kwargs = super(StoryCRUDL.Create, self).get_form_kwargs()
kwargs["org"] = self.request.org
return kwargs | dash/stories/views.py | from smartmin.views import SmartCreateView, SmartCRUDL, SmartListView, SmartUpdateView
from django import forms
from django.core.validators import validate_image_file_extension
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from dash.categories.fields import CategoryChoiceField
from dash.orgs.views import OrgObjPermsMixin, OrgPermsMixin
from .models import Category, Story, StoryImage
class StoryForm(forms.ModelForm):
category = CategoryChoiceField(Category.objects.none())
def __init__(self, *args, **kwargs):
self.org = kwargs["org"]
del kwargs["org"]
super(StoryForm, self).__init__(*args, **kwargs)
# We show all categories even inactive one in the dropdown
qs = Category.objects.filter(org=self.org).order_by("name")
self.fields["category"].queryset = qs
class Meta:
model = Story
fields = (
"is_active",
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
class StoryCRUDL(SmartCRUDL):
model = Story
actions = ("create", "update", "list", "images")
class Update(OrgObjPermsMixin, SmartUpdateView):
form_class = StoryForm
fields = (
"is_active",
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
def pre_save(self, obj):
obj = super(StoryCRUDL.Update, self).pre_save(obj)
obj.audio_link = Story.format_audio_link(obj.audio_link)
obj.tags = Story.space_tags(obj.tags)
return obj
def get_form_kwargs(self):
kwargs = super(StoryCRUDL.Update, self).get_form_kwargs()
kwargs["org"] = self.request.org
return kwargs
class List(OrgPermsMixin, SmartListView):
fields = ("title", "images", "featured", "has_report", "created_on")
search_fields = ("title__icontains",)
link_fields = ("title", "images")
default_order = ("-created_on",)
ordering = ("-created_on",)
def get_has_report(self, obj):
if obj.attachment:
return _("Yes")
return _("No")
def get_featured(self, obj):
if obj.featured:
return _("Yes")
return _("No")
def lookup_field_link(self, context, field, obj):
if field == "images":
return reverse("stories.story_images", args=[obj.pk])
else:
return super(StoryCRUDL.List, self).lookup_field_link(context, field, obj)
def get_images(self, obj):
return obj.images.count()
def get_queryset(self, **kwargs):
queryset = super(StoryCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.filter(org=self.derive_org())
return queryset
class Images(OrgObjPermsMixin, SmartUpdateView):
success_url = "@stories.story_list"
title = _("Story Images")
def get_form(self):
form = super(StoryCRUDL.Images, self).get_form()
form.fields.clear()
idx = 1
# add existing images
for image in self.object.images.all().order_by("pk"):
image_field_name = "image_%d" % idx
image_field = forms.ImageField(
required=False,
initial=image.image,
label=_("Image %d") % idx,
help_text=_("Image to display on story page and in previews. (optional)"),
validators=[validate_image_file_extension],
)
self.form.fields[image_field_name] = image_field
idx += 1
while idx <= 3:
self.form.fields["image_%d" % idx] = forms.ImageField(
required=False,
label=_("Image %d") % idx,
help_text=_("Image to display on story page and in previews (optional)"),
validators=[validate_image_file_extension],
)
idx += 1
return form
def post_save(self, obj):
obj = super(StoryCRUDL.Images, self).post_save(obj)
# remove our existing images
self.object.images.all().delete()
# overwrite our new ones
# TODO: this could probably be done more elegantly
for idx in range(1, 4):
image = self.form.cleaned_data.get("image_%d" % idx, None)
if image:
StoryImage.objects.create(
story=self.object, image=image, created_by=self.request.user, modified_by=self.request.user
)
return obj
class Create(OrgPermsMixin, SmartCreateView):
form_class = StoryForm
success_url = "id@stories.story_images"
fields = (
"title",
"featured",
"summary",
"content",
"attachment",
"written_by",
"audio_link",
"video_id",
"tags",
"category",
)
def pre_save(self, obj):
obj = super(StoryCRUDL.Create, self).pre_save(obj)
obj.org = self.request.org
obj.audio_link = Story.format_audio_link(obj.audio_link)
obj.tags = Story.space_tags(obj.tags)
return obj
def get_form_kwargs(self):
kwargs = super(StoryCRUDL.Create, self).get_form_kwargs()
kwargs["org"] = self.request.org
return kwargs | 0.513912 | 0.141163 |
from __future__ import absolute_import
import sys, os
import importlib
from . import backend
_enabled_apis = set()
def _gen_missing_api(api, mod_name):
def _missing_api(*args, **kwargs):
raise ImportError('API "%s" is not supported by backend "%s".'
' You can switch to other backends by setting'
' the DGLBACKEND environment.' % (api, mod_name))
return _missing_api
def load_backend(mod_name):
mod = importlib.import_module('.%s' % mod_name, __name__)
thismod = sys.modules[__name__]
for api in backend.__dict__.keys():
if api.startswith('__'):
# ignore python builtin attributes
continue
if api == 'data_type_dict':
# load data type
if api not in mod.__dict__:
raise ImportError('API "data_type_dict" is required but missing for'
' backend "%s".' % (mod_name))
data_type_dict = mod.__dict__[api]()
for name, dtype in data_type_dict.items():
setattr(thismod, name, dtype)
# override data type dict function
setattr(thismod, 'data_type_dict', data_type_dict)
setattr(thismod,
'reverse_data_type_dict',
{v: k for k, v in data_type_dict.items()})
# log backend name
setattr(thismod, 'backend_name', mod_name)
else:
# load functions
if api in mod.__dict__:
_enabled_apis.add(api)
setattr(thismod, api, mod.__dict__[api])
else:
setattr(thismod, api, _gen_missing_api(api, mod_name))
load_backend(os.environ.get('DGLBACKEND', 'pytorch').lower())
def is_enabled(api):
"""Return true if the api is enabled by the current backend.
Parameters
----------
api : str
The api name.
Returns
-------
bool
True if the API is enabled by the current backend.
"""
return api in _enabled_apis | python/dgl/backend/__init__.py | from __future__ import absolute_import
import sys, os
import importlib
from . import backend
_enabled_apis = set()
def _gen_missing_api(api, mod_name):
def _missing_api(*args, **kwargs):
raise ImportError('API "%s" is not supported by backend "%s".'
' You can switch to other backends by setting'
' the DGLBACKEND environment.' % (api, mod_name))
return _missing_api
def load_backend(mod_name):
mod = importlib.import_module('.%s' % mod_name, __name__)
thismod = sys.modules[__name__]
for api in backend.__dict__.keys():
if api.startswith('__'):
# ignore python builtin attributes
continue
if api == 'data_type_dict':
# load data type
if api not in mod.__dict__:
raise ImportError('API "data_type_dict" is required but missing for'
' backend "%s".' % (mod_name))
data_type_dict = mod.__dict__[api]()
for name, dtype in data_type_dict.items():
setattr(thismod, name, dtype)
# override data type dict function
setattr(thismod, 'data_type_dict', data_type_dict)
setattr(thismod,
'reverse_data_type_dict',
{v: k for k, v in data_type_dict.items()})
# log backend name
setattr(thismod, 'backend_name', mod_name)
else:
# load functions
if api in mod.__dict__:
_enabled_apis.add(api)
setattr(thismod, api, mod.__dict__[api])
else:
setattr(thismod, api, _gen_missing_api(api, mod_name))
load_backend(os.environ.get('DGLBACKEND', 'pytorch').lower())
def is_enabled(api):
"""Return true if the api is enabled by the current backend.
Parameters
----------
api : str
The api name.
Returns
-------
bool
True if the API is enabled by the current backend.
"""
return api in _enabled_apis | 0.36557 | 0.035311 |
import six
import mock
import shutil
import sqlalchemy
from sqlalchemy.orm import exc
import tempfile
import testtools
import time
import kmip
from kmip.core import attributes
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import misc
from kmip.core import objects
from kmip.core import secrets
from kmip.core.factories import attributes as factory
from kmip.core.messages import contents
from kmip.core.messages import messages
from kmip.core.messages.payloads import activate
from kmip.core.messages.payloads import create
from kmip.core.messages.payloads import create_key_pair
from kmip.core.messages.payloads import destroy
from kmip.core.messages.payloads import discover_versions
from kmip.core.messages.payloads import get
from kmip.core.messages.payloads import get_attribute_list
from kmip.core.messages.payloads import get_attributes
from kmip.core.messages.payloads import query
from kmip.core.messages.payloads import register
from kmip.core.messages.payloads import mac
from kmip.core.messages.payloads import locate
from kmip.pie import objects as pie_objects
from kmip.pie import sqltypes
from kmip.services.server import engine
class MockRegexString(str):
"""
A comparator string for doing simple containment regex comparisons
for mock asserts.
"""
def __eq__(self, other):
return self in other
class TestKmipEngine(testtools.TestCase):
"""
A test suite for the KmipEngine.
"""
def setUp(self):
super(TestKmipEngine, self).setUp()
self.engine = sqlalchemy.create_engine(
'sqlite:///:memory:',
)
sqltypes.Base.metadata.create_all(self.engine)
self.session_factory = sqlalchemy.orm.sessionmaker(
bind=self.engine
)
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
def tearDown(self):
super(TestKmipEngine, self).tearDown()
def _build_request(self):
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = [
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
]
protocol = contents.ProtocolVersion.create(1, 0)
max_size = contents.MaximumResponseSize(2 ** 20)
asynch = contents.AsynchronousIndicator(False)
# TODO (peterhamilton) Change this insanity in the substructs.
username = objects.Credential.UsernamePasswordCredential.Username(
"tester"
)
password = objects.Credential.UsernamePasswordCredential.Password(
"password"
)
creds = objects.Credential.UsernamePasswordCredential(
username=username,
password=password
)
auth = contents.Authentication(creds)
batch_error_option = contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
)
batch_order_option = contents.BatchOrderOption(True)
timestamp = contents.TimeStamp(int(time.time()))
header = messages.RequestHeader(
protocol_version=protocol,
maximum_response_size=max_size,
asynchronous_indicator=asynch,
authentication=auth,
batch_error_cont_option=batch_error_option,
batch_order_option=batch_order_option,
time_stamp=timestamp,
batch_count=contents.BatchCount(len(batch))
)
return messages.RequestMessage(
request_header=header,
batch_items=batch
)
def test_init(self):
"""
Test that a KmipEngine can be instantiated without any errors.
"""
engine.KmipEngine()
@mock.patch('sqlalchemy.create_engine')
def test_init_create_engine(self, create_engine_mock):
"""
Test that the right arguments are used to create the engine's SQLite
backend.
"""
engine.KmipEngine()
args = ("sqlite:////tmp/pykmip.database",)
fargs = {
'echo': False,
'connect_args': {'check_same_thread': False}
}
create_engine_mock.assert_called_once_with(*args, **fargs)
def test_load_operation_policies(self):
"""
Test that the KmipEngine can correctly load operation policies.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
self.assertEqual(3, len(e._operation_policies))
self.assertIn('test', e._operation_policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, e._operation_policies.get('test'))
def test_load_operation_policies_with_file_read_error(self):
"""
Test that the KmipEngine can correctly handle load errors.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"INVALID": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
e._logger.error.assert_called_once_with(
"A failure occurred while loading policies."
)
e._logger.exception.assert_called_once()
self.assertEqual(2, len(e._operation_policies))
def test_load_operation_policies_with_reserved(self):
"""
Test that the KmipEngine can correctly load operation policies, even
when a policy attempts to overwrite a reserved one.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"public": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
e._logger.warning.assert_called_once_with(
"Loaded policy 'public' overwrites a reserved policy and will "
"be thrown out."
)
self.assertEqual(2, len(e._operation_policies))
def test_load_operation_policies_with_duplicate(self):
"""
Test that the KmipEngine can correctly load operation policies, even
when a policy is defined multiple times.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file_a = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file_a.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
policy_file_b = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file_b.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file_a.name
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file_b.name
)
)
e._logger.warning.assert_called_once_with(
"Loaded policy 'test' overwrites a preexisting policy and will "
"be thrown out."
)
self.assertEqual(3, len(e._operation_policies))
self.assertIn('test', e._operation_policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, e._operation_policies.get('test'))
def test_version_operation_match(self):
"""
Test that a valid response is generated when trying to invoke an
operation supported by a specific version of KMIP.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
e._process_discover_versions(payload)
def test_version_operation_mismatch(self):
"""
Test that an OperationNotSupported error is generated when trying to
invoke an operation unsupported by a specific version of KMIP.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
args = (None, )
regex = "DiscoverVersions is not supported by KMIP {0}".format(
e._protocol_version
)
six.assertRaisesRegex(
self,
exceptions.OperationNotSupported,
regex,
e._process_discover_versions,
*args
)
def test_process_request(self):
"""
Test that a basic request is processed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
maximum_response_size=contents.MaximumResponseSize(2 ** 20),
authentication=contents.Authentication(),
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
),
batch_order_option=contents.BatchOrderOption(True),
time_stamp=contents.TimeStamp(int(time.time())),
batch_count=contents.BatchCount(1)
)
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
request = messages.RequestMessage(
request_header=header,
batch_items=batch
)
response, max_size = e.process_request(request)
e._logger.info.assert_any_call(
MockRegexString("Received request at time:")
)
e._logger.info.assert_any_call(
"Processing operation: DiscoverVersions"
)
self.assertIsInstance(response, messages.ResponseMessage)
self.assertEqual(2 ** 20, max_size)
self.assertIsNotNone(response.response_header)
header = response.response_header
self.assertIsNotNone(header)
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
header.protocol_version
)
self.assertIsInstance(header.time_stamp, contents.TimeStamp)
self.assertIsInstance(header.batch_count, contents.BatchCount)
self.assertEqual(1, header.batch_count.value)
batch = response.batch_items
self.assertNotEqual(list(), batch)
batch_item = batch[0]
self.assertIsInstance(batch_item.operation, contents.Operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
batch_item.operation.value
)
self.assertIsNone(batch_item.unique_batch_item_id)
self.assertEqual(
enums.ResultStatus.SUCCESS,
batch_item.result_status.value
)
self.assertIsNone(batch_item.result_reason)
self.assertIsNone(batch_item.result_message)
self.assertIsNone(batch_item.async_correlation_value)
self.assertIsInstance(
batch_item.response_payload,
discover_versions.DiscoverVersionsResponsePayload
)
self.assertIsNone(batch_item.message_extension)
def test_process_request_unsupported_version(self):
"""
Test that an InvalidMessage exception is raised when processing a
request using an unsupported KMIP version.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(0, 1)
header = messages.RequestHeader(
protocol_version=protocol
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "KMIP {0} is not supported by the server.".format(
protocol
)
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_stale_timestamp(self):
"""
Test that an InvalidMessage exception is raised when processing a
request with a stale timestamp.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 0)
header = messages.RequestHeader(
protocol_version=protocol,
time_stamp=contents.TimeStamp(0)
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "Stale request rejected by server."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
e._logger.warning.assert_any_call(
MockRegexString(
"Received request with old timestamp. Possible replay attack."
)
)
def test_process_request_future_timestamp(self):
"""
Test that an InvalidMessage exception is raised when processing a
request with a future timestamp.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 0)
header = messages.RequestHeader(
protocol_version=protocol,
time_stamp=contents.TimeStamp(10 ** 10)
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "Future request rejected by server."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
e._logger.warning.assert_any_call(
MockRegexString(
"Received request with future timestamp."
)
)
def test_process_request_unsupported_async_indicator(self):
"""
Test than an InvalidMessage error is generated while processing a
batch with an unsupported asynchronous indicator option.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
asynchronous_indicator=contents.AsynchronousIndicator(True)
)
request = messages.RequestMessage(
request_header=header,
)
args = (request, )
regex = "Asynchronous operations are not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_unsupported_batch_option(self):
"""
Test that an InvalidMessage error is generated while processing a
batch with an unsupported batch error continuation option.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
authentication=contents.Authentication(),
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.UNDO
)
)
request = messages.RequestMessage(
request_header=header,
)
args = (request, )
regex = "Undo option for batch handling is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_missing_credential(self):
"""
Test that the engine does not immediately error out when retrieving
a non-existent credential from the request.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
authentication=None,
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
),
batch_order_option=contents.BatchOrderOption(True),
time_stamp=contents.TimeStamp(int(time.time())),
batch_count=contents.BatchCount(1)
)
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
request = messages.RequestMessage(
request_header=header,
batch_items=batch
)
e.process_request(request)
def test_build_error_response(self):
"""
Test that a bare bones response containing a single error result can
be constructed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
response = e.build_error_response(
contents.ProtocolVersion.create(1, 1),
enums.ResultReason.GENERAL_FAILURE,
"A general test failure occurred."
)
self.assertIsInstance(response, messages.ResponseMessage)
header = response.response_header
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
header.protocol_version
)
self.assertIsNotNone(header.time_stamp)
self.assertIsNotNone(header.batch_count)
self.assertEqual(1, header.batch_count.value)
batch = response.batch_items
self.assertEqual(1, len(batch))
batch_item = batch[0]
self.assertIsNone(batch_item.operation)
self.assertIsNone(batch_item.unique_batch_item_id)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
batch_item.result_status.value
)
self.assertEqual(
enums.ResultReason.GENERAL_FAILURE,
batch_item.result_reason.value
)
self.assertEqual(
"A general test failure occurred.",
batch_item.result_message.value
)
self.assertIsNone(batch_item.async_correlation_value)
self.assertIsNone(batch_item.response_payload)
self.assertIsNone(batch_item.message_extension)
def test_process_batch(self):
"""
Test that a batch is processed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
def test_process_multibatch(self):
"""
Test that a batch containing multiple operations is processed
correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
unique_batch_item_id=contents.UniqueBatchItemID(1),
request_payload=payload
),
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
unique_batch_item_id=contents.UniqueBatchItemID(2),
request_payload=payload
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(2, len(results))
def test_process_batch_missing_batch_id(self):
"""
Test that an InvalidMessage error is generated while processing a
batch with missing batch IDs.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
batch = list([
messages.RequestBatchItem(),
messages.RequestBatchItem()
])
args = (batch, None, None)
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
"Batch item ID is undefined.",
e._process_batch,
*args
)
def test_process_batch_expected_error(self):
"""
Test than an expected KMIP error is handled appropriately while
processing a batch of operations.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
)
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
result = results[0]
self.assertIsInstance(result, messages.ResponseBatchItem)
self.assertIsNotNone(result.operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operation.value
)
self.assertIsNone(result.unique_batch_item_id)
self.assertIsNotNone(result.result_status)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
result.result_status.value
)
self.assertIsNotNone(result.result_reason)
self.assertEqual(
enums.ResultReason.OPERATION_NOT_SUPPORTED,
result.result_reason.value
)
self.assertIsNotNone(result.result_message)
error_message = "DiscoverVersions is not supported by KMIP {0}".format(
e._protocol_version
)
self.assertEqual(error_message, result.result_message.value)
self.assertIsNone(result.async_correlation_value)
self.assertIsNone(result.response_payload)
self.assertIsNone(result.message_extension)
def test_process_batch_unexpected_error(self):
"""
Test that an unexpected, non-KMIP error is handled appropriately
while processing a batch of operations.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
test_exception = Exception("A general test failure occurred.")
e._process_operation = mock.MagicMock(side_effect=test_exception)
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
)
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
result = results[0]
e._logger.warning.assert_called_with(
"Error occurred while processing operation."
)
e._logger.exception.assert_called_with(test_exception)
self.assertIsInstance(result, messages.ResponseBatchItem)
self.assertIsNotNone(result.operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operation.value
)
self.assertIsNone(result.unique_batch_item_id)
self.assertIsNotNone(result.result_status)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
result.result_status.value
)
self.assertIsNotNone(result.result_reason)
self.assertEqual(
enums.ResultReason.GENERAL_FAILURE,
result.result_reason.value
)
self.assertIsNotNone(result.result_message)
self.assertEqual(
"Operation failed. See the server logs for more information.",
result.result_message.value
)
self.assertIsNone(result.async_correlation_value)
self.assertIsNone(result.response_payload)
self.assertIsNone(result.message_extension)
def test_process_operation(self):
"""
Test that the right subroutine is called when invoking operations
supported by the server.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._process_create = mock.MagicMock()
e._process_create_key_pair = mock.MagicMock()
e._process_register = mock.MagicMock()
e._process_get = mock.MagicMock()
e._process_get_attributes = mock.MagicMock()
e._process_get_attribute_list = mock.MagicMock()
e._process_activate = mock.MagicMock()
e._process_destroy = mock.MagicMock()
e._process_query = mock.MagicMock()
e._process_discover_versions = mock.MagicMock()
e._process_operation(enums.Operation.CREATE, None)
e._process_operation(enums.Operation.CREATE_KEY_PAIR, None)
e._process_operation(enums.Operation.REGISTER, None)
e._process_operation(enums.Operation.GET, None)
e._process_operation(enums.Operation.GET_ATTRIBUTES, None)
e._process_operation(enums.Operation.GET_ATTRIBUTE_LIST, None)
e._process_operation(enums.Operation.ACTIVATE, None)
e._process_operation(enums.Operation.DESTROY, None)
e._process_operation(enums.Operation.QUERY, None)
e._process_operation(enums.Operation.DISCOVER_VERSIONS, None)
e._process_create.assert_called_with(None)
e._process_create_key_pair.assert_called_with(None)
e._process_register.assert_called_with(None)
e._process_get.assert_called_with(None)
e._process_get_attributes.assert_called_with(None)
e._process_get_attribute_list.assert_called_with(None)
e._process_activate.assert_called_with(None)
e._process_destroy.assert_called_with(None)
e._process_query.assert_called_with(None)
e._process_discover_versions.assert_called_with(None)
def test_unsupported_operation(self):
"""
Test that an OperationNotSupported error is generated when invoking
an operation not supported by the server.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
args = (enums.Operation.POLL, None)
regex = "{0} operation is not supported by the server.".format(
args[0].name.title()
)
six.assertRaisesRegex(
self,
exceptions.OperationNotSupported,
regex,
e._process_operation,
*args
)
def test_get_object_type(self):
"""
Test that the object type of a stored object can be retrieved
correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
object_type = e._get_object_type(id_a)
e._data_session.commit()
self.assertEqual(pie_objects.OpaqueObject, object_type)
def test_get_object_type_missing_object(self):
"""
Test that an ItemNotFound error is generated when attempting to
retrieve the object type of an object that does not exist.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
args = ('1', )
regex = "Could not locate object: 1"
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._get_object_type,
*args
)
e._data_session.commit()
e._logger.warning.assert_called_once_with(
"Could not identify object type for object: 1"
)
def test_get_object_type_multiple_objects(self):
"""
Test that a sqlalchemy.orm.exc.MultipleResultsFound error is generated
when getting the object type of multiple objects map to the same
object ID.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
test_exception = exc.MultipleResultsFound()
e._data_session.query = mock.MagicMock(side_effect=test_exception)
e._logger = mock.MagicMock()
args = ('1', )
self.assertRaises(
exc.MultipleResultsFound,
e._get_object_type,
*args
)
e._data_session.commit()
e._logger.warning.assert_called_once_with(
"Multiple objects found for ID: 1"
)
def test_get_object_type_unsupported_type(self):
"""
Test that an InvalidField error is generated when attempting to
get the object type of an object with an unsupported object type.
This should never happen by definition, but "Safety first!"
"""
e = engine.KmipEngine()
e._object_map = {enums.ObjectType.OPAQUE_DATA: None}
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
args = (id_a, )
name = enums.ObjectType.OPAQUE_DATA.name
regex = "The {0} object type is not supported.".format(
''.join(
[x.capitalize() for x in name.split('_')]
)
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._get_object_type,
*args
)
e._data_session.commit()
def test_build_core_object(self):
"""
Test that kmip.core objects can be built from simpler kmip.pie
objects.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
# Test building a Certificate.
managed_object = pie_objects.X509Certificate(value=b'')
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.Certificate)
self.assertEqual(
b'',
core_object.certificate_value.value
)
self.assertEqual(
enums.CertificateTypeEnum.X_509,
core_object.certificate_type.value
)
# Test building a Symmetric Key.
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.SymmetricKey)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building a Public Key.
managed_object = pie_objects.PublicKey(
enums.CryptographicAlgorithm.RSA,
0,
b''
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.PublicKey)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building a Private Key.
managed_object = pie_objects.PrivateKey(
enums.CryptographicAlgorithm.RSA,
0,
b'',
enums.KeyFormatType.PKCS_8
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.PrivateKey)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.PKCS_8,
core_object.key_block.key_format_type.value
)
# Test building a Secret Data.
managed_object = pie_objects.SecretData(
b'',
enums.SecretDataType.PASSWORD
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.SecretData)
self.assertEqual(
enums.SecretDataType.PASSWORD,
core_object.secret_data_type.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building an Opaque Data.
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
core_object.opaque_data_type.value
)
self.assertEqual(
b'',
core_object.opaque_data_value.value
)
def test_build_core_object_unsupported_type(self):
"""
Test that an InvalidField error is generated when building
kmip.core objects that are unsupported.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
args = (None, )
regex = "Cannot build an unsupported object type."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._build_core_object,
*args
)
class DummyObject:
def __init__(self):
self._object_type = enums.ObjectType.SPLIT_KEY
args = (DummyObject(), )
regex = "The SplitKey object type is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._build_core_object,
*args
)
def test_process_template_attribute(self):
"""
Test that a template attribute structure can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(
attributes=[name, algorithm, length, mask]
)
result = e._process_template_attribute(template_attribute)
self.assertIsInstance(result, dict)
self.assertEqual(4, len(result.keys()))
self.assertIn('Name', result.keys())
self.assertIn('Cryptographic Algorithm', result.keys())
self.assertIn('Cryptographic Length', result.keys())
self.assertIn('Cryptographic Usage Mask', result.keys())
self.assertEqual([name.attribute_value], result.get('Name'))
self.assertEqual(
algorithm.attribute_value,
result.get('Cryptographic Algorithm')
)
self.assertEqual(
length.attribute_value,
result.get('Cryptographic Length')
)
self.assertEqual(
mask.attribute_value,
result.get('Cryptographic Usage Mask')
)
def test_process_template_attribute_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are referenced while processing a template attribute.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that providing template names generates an InvalidField error.
template_attribute = objects.TemplateAttribute(
names=[
attributes.Name.create(
'invalid',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
]
)
args = (template_attribute, )
regex = "Attribute templates are not supported."
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_template_attribute,
*args
)
# Test that an unrecognized attribute generates an InvalidField error.
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name.attribute_name.value = 'invalid'
template_attribute = objects.TemplateAttribute(attributes=[name])
args = (template_attribute, )
regex = "The invalid attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that missing indices generate an InvalidField error.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
template_attribute = objects.TemplateAttribute(
attributes=[name_a, name_b]
)
args = (template_attribute, )
regex = "Attribute index missing from multivalued attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that a non-zero index generates an InvalidField error.
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES,
1
)
template_attribute = objects.TemplateAttribute(attributes=[algorithm])
args = (template_attribute, )
regex = "Non-zero attribute index found for single-valued attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that setting multiple values for a single-value attribute
# generates an InvalidField error.
algorithm_a = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
algorithm_b = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.TRIPLE_DES
)
template_attribute = objects.TemplateAttribute(
attributes=[algorithm_a, algorithm_b]
)
args = (template_attribute, )
regex = (
"Cannot set multiple instances of the Cryptographic Algorithm "
"attribute."
)
six.assertRaisesRegex(
self,
exceptions.IndexOutOfBounds,
regex,
e._process_template_attribute,
*args
)
def test_get_attributes_from_managed_object(self):
"""
Test that multiple attributes can be retrieved from a given managed
object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
symmetric_key.names = ['Name 1', 'Name 2']
e._data_session.add(symmetric_key)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
result = e._get_attributes_from_managed_object(
symmetric_key,
['Unique Identifier',
'Name',
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask',
'invalid']
)
attribute_factory = factory.AttributeFactory()
self.assertEqual(6, len(result))
attribute = attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
'1'
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
self.assertIn(attribute, result)
def test_get_attributes_from_managed_object_with_missing_attribute(self):
"""
Test that any exceptions are suppressed when attempting to retrieve
non-existent attributes from managed objects.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
symmetric_key.names = ['Name 1', 'Name 2']
e._data_session.add(symmetric_key)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._get_attribute_from_managed_object = mock.Mock()
e._get_attribute_from_managed_object.side_effect = Exception
result = e._get_attributes_from_managed_object(
symmetric_key,
['Unique Identifier',
'Name',
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask',
'invalid']
)
self.assertEqual(0, len(result))
def test_get_attribute_from_managed_object(self):
"""
Test that an attribute can be retrieved from a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
certificate = pie_objects.X509Certificate(
b''
)
opaque_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
e._data_session.add(symmetric_key)
e._data_session.add(certificate)
e._data_session.add(opaque_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
result = e._get_attribute_from_managed_object(
symmetric_key,
'Unique Identifier'
)
self.assertEqual('1', result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Name'
)
self.assertEqual(
[attributes.Name(
attributes.Name.NameValue('Symmetric Key'),
attributes.Name.NameType(
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)],
result
)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Object Type'
)
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Algorithm'
)
self.assertEqual(enums.CryptographicAlgorithm.AES, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Length'
)
self.assertEqual(0, result)
result = e._get_attribute_from_managed_object(
certificate,
'Cryptographic Parameters'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Cryptographic Domain Parameters'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Type'
)
self.assertEqual(enums.CertificateTypeEnum.X_509, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Length'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Identifier'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Subject'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Issuer'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Identifier'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Subject'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Issuer'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Digital Signature Algorithm'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
opaque_object,
'Digest'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Operation Policy Name'
)
self.assertEqual('default', result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Usage Mask'
)
self.assertEqual(
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT],
result
)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Lease Time'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Usage Limits'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'State'
)
self.assertEqual(enums.State.PRE_ACTIVE, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Initial Date'
)
self.assertIsNotNone(result)
self.assertIsInstance(result, six.integer_types)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Activation Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Process Start Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Protect Stop Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Deactivation Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Destroy Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Compromise Occurrence Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Compromise Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Revocation Reason'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Archive Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Object Group'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Fresh'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Link'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Application Specific Information'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Contact Information'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Last Change Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'invalid'
)
self.assertEqual(None, result)
def test_set_attributes_on_managed_object(self):
"""
Test that multiple attributes can be set on a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SecretData(
b'',
enums.SecretDataType.PASSWORD
)
managed_object.names = []
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Secret Data',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(
attributes=[name, mask]
)
object_attributes = e._process_template_attribute(template_attribute)
self.assertEqual([], managed_object.names)
self.assertEqual([], managed_object.cryptographic_usage_masks)
e._set_attributes_on_managed_object(
managed_object,
object_attributes
)
self.assertEqual(['Test Secret Data'], managed_object.names)
self.assertEqual(
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
],
managed_object.cryptographic_usage_masks
)
def test_set_attributes_on_managed_object_attribute_mismatch(self):
"""
Test that an InvalidField error is generated when attempting to set
an attribute that is not applicable for a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
attribute_factory = factory.AttributeFactory()
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(attributes=[mask])
object_attributes = e._process_template_attribute(template_attribute)
args = (managed_object, object_attributes)
regex = (
"Cannot set Cryptographic Usage Mask attribute on OpaqueData "
"object."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attributes_on_managed_object,
*args
)
def test_set_attribute_on_managed_object(self):
"""
Test that various attributes can be set correctly on a given
managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
managed_object.names = []
self.assertEqual([], managed_object.names)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
managed_object.cryptographic_algorithm
)
self.assertEqual(0, managed_object.cryptographic_length)
self.assertEqual([], managed_object.cryptographic_usage_masks)
e._set_attribute_on_managed_object(
managed_object,
('Name', [name.attribute_value])
)
self.assertEqual(['Test Symmetric Key'], managed_object.names)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Algorithm', algorithm.attribute_value)
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
managed_object.cryptographic_algorithm
)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Length', length.attribute_value)
)
self.assertEqual(0, managed_object.cryptographic_length)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Usage Mask', mask.attribute_value)
)
self.assertEqual(
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
],
managed_object.cryptographic_usage_masks
)
def test_set_attribute_on_managed_object_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are referenced while setting managed object attributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
8,
b'\x00'
)
# Test that multiple duplicate names cannot be set on an object.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
args = (
managed_object,
('Name', [name_a.attribute_value, name_b.attribute_value])
)
regex = "Cannot set duplicate name values."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that a multivalued, unsupported attribute cannot be set on an
# object.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
args = (
managed_object,
('Digest', [name_a.attribute_value, name_b.attribute_value])
)
regex = "The Digest attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that a set attribute cannot be overwritten.
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
args = (
managed_object,
('Cryptographic Length', length.attribute_value)
)
regex = "Cannot overwrite the Cryptographic Length attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that an unsupported attribute cannot be set.
object_group = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_GROUP,
'Test Group'
)
args = (
managed_object,
('Object Group', object_group.attribute_value)
)
regex = "The Object Group attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
def test_is_allowed_by_operation_policy(self):
"""
Test that an allowed operation is correctly allowed by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_policy_blocked(self):
"""
Test that an unallowed operation is correctly blocked by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_public(self):
"""
Test that a public operation is allowed by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_block_all(self):
"""
Test that a blocked operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.DISALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_safety_check(self):
"""
Test that an unknown operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: 'unknown value'
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_policy_nonexistent_policy(self):
"""
Test that a check with a non-existent policy yields a logging warning
and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy = 'nonexistent-policy'
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not exist.".format(policy)
)
def test_is_allowed_by_operation_policy_not_object_applicable(self):
"""
Test that a check for an object with a non-applicable policy yields
a logging warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.PRIVATE_KEY
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} objects.".format(
policy,
e._get_enum_string(object_type)
)
)
def test_is_allowed_by_operation_policy_not_applicable(self):
"""
Test that a check with a non-applicable policy yields a logging
warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.SYMMETRIC_KEY
operation = enums.Operation.CREATE
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
operation
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} operations on {2} "
"objects.".format(
policy,
e._get_enum_string(operation),
e._get_enum_string(object_type)
)
)
def test_get_object_with_access_controls(self):
"""
Test that an unallowed object access request is handled correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
# Test by specifying the ID of the object to retrieve and the
# operation context.
args = [id_a, enums.Operation.GET]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._get_object_with_access_controls,
*args
)
def test_create(self):
"""
Test that a Create request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build Create request
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
),
attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'test'
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
response_payload = e._process_create(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Create"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
# Retrieve the stored object and verify all attributes were set
# appropriately.
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == uid
).one()
self.assertEqual(
enums.KeyFormatType.RAW,
symmetric_key.key_format_type
)
self.assertEqual(1, len(symmetric_key.names))
self.assertIn('Test Symmetric Key', symmetric_key.names)
self.assertEqual(256, len(symmetric_key.value) * 8)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
symmetric_key.cryptographic_algorithm
)
self.assertEqual(256, symmetric_key.cryptographic_length)
self.assertEqual(2, len(symmetric_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertEqual('test', symmetric_key.operation_policy_name)
self.assertIsNotNone(symmetric_key.initial_date)
self.assertNotEqual(0, symmetric_key.initial_date)
self.assertEqual(uid, e._id_placeholder)
def test_create_unsupported_object_type(self):
"""
Test that an InvalidField error is generated when attempting to
create an unsupported object type.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.PUBLIC_KEY)
payload = create.CreateRequestPayload(
object_type
)
args = (payload, )
regex = "Cannot create a PublicKey object with the Create operation."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
def test_create_omitting_attributes(self):
"""
Test that InvalidField errors are generated when trying to create
a symmetric key without required attributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test the error for omitting the Cryptographic Algorithm
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Length
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Usage Mask
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
def test_create_key_pair(self):
"""
Test that a CreateKeyPair request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
response_payload = e._process_create_key_pair(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
public_id = response_payload.public_key_uuid.value
self.assertEqual('1', public_id)
private_id = response_payload.private_key_uuid.value
self.assertEqual('2', private_id)
# Retrieve the stored public key and verify all attributes were set
# appropriately.
public_key = e._data_session.query(
pie_objects.PublicKey
).filter(
pie_objects.ManagedObject.unique_identifier == public_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_1,
public_key.key_format_type
)
self.assertEqual(1, len(public_key.names))
self.assertIn('Test Asymmetric Key', public_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
public_key.cryptographic_algorithm
)
self.assertEqual(2048, public_key.cryptographic_length)
self.assertEqual(1, len(public_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
public_key.cryptographic_usage_masks
)
self.assertEqual('default', public_key.operation_policy_name)
self.assertIsNotNone(public_key.initial_date)
self.assertNotEqual(0, public_key.initial_date)
# Retrieve the stored private key and verify all attributes were set
# appropriately.
private_key = e._data_session.query(
pie_objects.PrivateKey
).filter(
pie_objects.ManagedObject.unique_identifier == private_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_8,
private_key.key_format_type
)
self.assertEqual(1, len(private_key.names))
self.assertIn('Test Asymmetric Key', private_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
private_key.cryptographic_algorithm
)
self.assertEqual(2048, private_key.cryptographic_length)
self.assertEqual(1, len(private_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
private_key.cryptographic_usage_masks
)
self.assertEqual('default', private_key.operation_policy_name)
self.assertIsNotNone(private_key.initial_date)
self.assertNotEqual(0, private_key.initial_date)
self.assertEqual(private_id, e._id_placeholder)
def test_create_key_pair_omitting_attributes(self):
"""
Test that the right errors are generated when required attributes
are missing from a CreateKeyPair request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that a missing PublicKey CryptographicAlgorithm raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute "
"for the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicAlgorithm raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute "
"for the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PublicKey CryptographicLength raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute for "
"the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicLength raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute for "
"the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PublicKey CryptographicUsageMask raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute "
"for the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicUsageMask raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute "
"for the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
def test_create_key_pair_mismatched_attributes(self):
"""
Test that the right errors are generated when required attributes
are mismatched in a CreateKeyPair request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that mismatched CryptographicAlgorithms raise an error.
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.DSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The public and private key algorithms must be the same."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that mismatched CryptographicAlgorithms raise an error.
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
4096
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The public and private key lengths must be the same."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
def test_register(self):
"""
Test that a Register request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
),
attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'test'
)
]
)
key_bytes = (
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
)
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(key_bytes)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.AES
),
cryptographic_length=attributes.CryptographicLength(128)
)
)
payload = register.RegisterRequestPayload(
object_type=object_type,
template_attribute=template_attribute,
secret=secret
)
response_payload = e._process_register(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Register"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
# Retrieve the stored object and verify all attributes were set
# appropriately.
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == uid
).one()
self.assertEqual(
enums.KeyFormatType.RAW,
symmetric_key.key_format_type
)
self.assertEqual(1, len(symmetric_key.names))
self.assertIn('Test Symmetric Key', symmetric_key.names)
self.assertEqual(key_bytes, symmetric_key.value)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
symmetric_key.cryptographic_algorithm
)
self.assertEqual(128, symmetric_key.cryptographic_length)
self.assertEqual(2, len(symmetric_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertEqual('test', symmetric_key.operation_policy_name)
self.assertIsNotNone(symmetric_key.initial_date)
self.assertNotEqual(0, symmetric_key.initial_date)
self.assertEqual(uid, e._id_placeholder)
def test_register_unsupported_object_type(self):
"""
Test that an InvalidField error is generated when attempting to
register an unsupported object type.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.SPLIT_KEY)
payload = register.RegisterRequestPayload(object_type=object_type)
args = (payload, )
regex = "The SplitKey object type is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_register,
*args
)
def test_request_omitting_secret(self):
"""
Test that an InvalidField error is generate when trying to register
a secret in absentia.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
payload = register.RegisterRequestPayload(object_type=object_type)
args = (payload, )
regex = "Cannot register a secret in absentia."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_register,
*args
)
def test_locate(self):
"""
Test that a Locate request can be processed correctly.
"""
# TODO Need add more extensive tests after locate operaton is
# fully supported
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
# locate should return nothing at beginning
payload = locate.LocateRequestPayload()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
0
)
# Add the first obj and test the locate
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = locate.LocateRequestPayload()
e._logger.reset_mock()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
1
)
self.assertEqual(
id_a,
response_payload.unique_identifiers[0].value
)
# Add the second obj and test the locate
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_b = str(obj_b.unique_identifier)
payload = locate.LocateRequestPayload()
e._logger.reset_mock()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
2
)
self.assertIn(
id_a,
[uid.value for uid in response_payload.unique_identifiers]
)
self.assertIn(
id_b,
[uid.value for uid in response_payload.unique_identifiers]
)
def test_get(self):
"""
Test that a Get request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
id_b = str(obj_b.unique_identifier)
# Test by specifying the ID of the object to get.
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.OPAQUE_DATA,
response_payload.object_type.value
)
self.assertEqual(str(id_a), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
response_payload.secret.opaque_data_type.value
)
self.assertEqual(
b'',
response_payload.secret.opaque_data_value.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
e._id_placeholder = str(id_b)
# Test by using the ID placeholder to specify the object to get.
payload = get.GetRequestPayload()
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.OPAQUE_DATA,
response_payload.object_type.value
)
self.assertEqual(str(id_b), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
response_payload.secret.opaque_data_type.value
)
self.assertEqual(
b'',
response_payload.secret.opaque_data_value.value
)
e._data_session.commit()
def test_get_with_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are used in a Get request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
# Test that specifying the key compression type generates an error.
payload = get.GetRequestPayload(
key_compression_type=get.GetRequestPayload.KeyCompressionType(
enums.KeyCompressionType.EC_PUBLIC_KEY_TYPE_UNCOMPRESSED
)
)
args = (payload, )
regex = "Key compression is not supported."
six.assertRaisesRegex(
self,
exceptions.KeyCompressionTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
e._logger.reset_mock()
# Test that specifying the key wrapping specification generates an
# error.
payload = get.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification()
)
args = (payload, )
regex = "Key wrapping is not supported."
six.assertRaisesRegex(
self,
exceptions.PermissionDenied,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
def test_get_with_key_format_type(self):
"""
Test that the key format type is handled properly in a Get request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
# Test that a key can be retrieved with the right key format.
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.RAW
)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
response_payload.secret.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
response_payload.secret.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
response_payload.secret.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.RAW,
response_payload.secret.key_block.key_format_type.value
)
# Test that an error is generated when a key format conversion is
# required.
e._logger.reset_mock()
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.OPAQUE
)
)
args = (payload, )
regex = "Key format conversion from RAW to OPAQUE is unsupported."
six.assertRaisesRegex(
self,
exceptions.KeyFormatTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
# Test that an error is generated when a key format is requested but
# does not apply to the given managed object.
e._data_session = e._data_store_session_factory()
e._logger.reset_mock()
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_b = str(obj_b.unique_identifier)
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_b),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.RAW
)
)
args = (payload, )
regex = "Key format is not applicable to the specified object."
six.assertRaisesRegex(
self,
exceptions.KeyFormatTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
def test_get_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Get.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to get.
args = [payload]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get,
*args
)
def test_get_attributes(self):
"""
Test that a GetAttributes request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
payload = get_attributes.GetAttributesRequestPayload(
unique_identifier='1',
attribute_names=['Object Type', 'Cryptographic Algorithm']
)
response_payload = e._process_get_attributes(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributes"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
2,
len(response_payload.attributes)
)
attribute_factory = factory.AttributeFactory()
attribute = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_TYPE,
enums.ObjectType.SYMMETRIC_KEY
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, response_payload.attributes)
def test_get_attributes_with_no_arguments(self):
"""
Test that a GetAttributes request with no arguments can be processed
correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._id_placeholder = '1'
payload = get_attributes.GetAttributesRequestPayload()
response_payload = e._process_get_attributes(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributes"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attributes)
)
attribute_factory = factory.AttributeFactory()
attribute = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_TYPE,
enums.ObjectType.SYMMETRIC_KEY
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'default'
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[]
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.STATE,
enums.State.PRE_ACTIVE
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
'1'
)
self.assertIn(attribute, response_payload.attributes)
def test_get_attributes_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by GetAttributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get_attributes.GetAttributesRequestPayload(
unique_identifier=id_a
)
# Test by specifying the ID of the object whose attributes should
# be retrieved.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get_attributes,
*args
)
def test_get_attribute_list(self):
"""
Test that a GetAttributeList request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
payload = get_attribute_list.GetAttributeListRequestPayload(
unique_identifier='1'
)
response_payload = e._process_get_attribute_list(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributeList"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attribute_names)
)
self.assertIn(
"Object Type",
response_payload.attribute_names
)
self.assertIn(
"Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Algorithm",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Length",
response_payload.attribute_names
)
self.assertIn(
"Operation Policy Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Usage Mask",
response_payload.attribute_names
)
self.assertIn(
"State",
response_payload.attribute_names
)
self.assertIn(
"Unique Identifier",
response_payload.attribute_names
)
self.assertIn(
"Initial Date",
response_payload.attribute_names
)
def test_get_attribute_list_with_no_arguments(self):
"""
Test that a GetAttributeList request with no arguments can be
processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._id_placeholder = '1'
payload = get_attribute_list.GetAttributeListRequestPayload()
response_payload = e._process_get_attribute_list(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributeList"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attribute_names)
)
self.assertIn(
"Object Type",
response_payload.attribute_names
)
self.assertIn(
"Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Algorithm",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Length",
response_payload.attribute_names
)
self.assertIn(
"Operation Policy Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Usage Mask",
response_payload.attribute_names
)
self.assertIn(
"State",
response_payload.attribute_names
)
self.assertIn(
"Unique Identifier",
response_payload.attribute_names
)
self.assertIn(
"Initial Date",
response_payload.attribute_names
)
def test_get_attribute_list_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by
GetAttributeList.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get_attribute_list.GetAttributeListRequestPayload(
unique_identifier=id_a
)
# Test by specifying the ID of the object whose attributes should
# be retrieved.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get_attribute_list,
*args
)
def test_activate(self):
"""
Test that an Activate request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
self.assertEqual(enums.State.PRE_ACTIVE, managed_object.state)
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
response_payload = e._process_activate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Activate"
)
self.assertEqual(
str(object_id),
response_payload.unique_identifier.value
)
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == object_id
).one()
self.assertEqual(enums.State.ACTIVE, symmetric_key.state)
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
# Test that the ID placeholder can also be used to specify activation.
e._id_placeholder = str(object_id)
payload = activate.ActivateRequestPayload()
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
def test_activate_on_static_object(self):
"""
Test that the right error is generated when an activation request is
received for an object that cannot be activated.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
args = (payload,)
name = enums.ObjectType.OPAQUE_DATA.name
regex = "An {0} object has no state and cannot be activated.".format(
''.join(
[x.capitalize() for x in name.split('_')]
)
)
self.assertRaisesRegexp(
exceptions.IllegalOperation,
regex,
e._process_activate,
*args
)
def test_activate_on_active_object(self):
"""
Test that the right error is generated when an activation request is
received for an object that is not pre-active.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
managed_object.state = enums.State.ACTIVE
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
def test_activate_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Activate.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to activate.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_activate,
*args
)
def test_destroy(self):
"""
Test that a Destroy request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
id_b = str(obj_b.unique_identifier)
# Test by specifying the ID of the object to destroy.
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(id_a), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(id_a)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
e._id_placeholder = str(id_b)
# Test by using the ID placeholder to specify the object to destroy.
payload = destroy.DestroyRequestPayload()
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(id_b), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(id_b)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
def test_destroy_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Destroy.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to destroy.
args = [payload]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_destroy,
*args
)
def test_query(self):
"""
Test that a Query request can be processed correctly, for different
versions of KMIP.
"""
e = engine.KmipEngine()
# Test for KMIP 1.0.
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
payload = query.QueryRequestPayload([
misc.QueryFunction(enums.QueryFunction.QUERY_OPERATIONS),
misc.QueryFunction(enums.QueryFunction.QUERY_OBJECTS),
misc.QueryFunction(
enums.QueryFunction.QUERY_SERVER_INFORMATION
),
misc.QueryFunction(
enums.QueryFunction.QUERY_APPLICATION_NAMESPACES
),
misc.QueryFunction(enums.QueryFunction.QUERY_EXTENSION_LIST),
misc.QueryFunction(enums.QueryFunction.QUERY_EXTENSION_MAP)
])
result = e._process_query(payload)
e._logger.info.assert_called_once_with("Processing operation: Query")
self.assertIsInstance(result, query.QueryResponsePayload)
self.assertIsNotNone(result.operations)
self.assertEqual(9, len(result.operations))
self.assertEqual(
enums.Operation.CREATE,
result.operations[0].value
)
self.assertEqual(
enums.Operation.CREATE_KEY_PAIR,
result.operations[1].value
)
self.assertEqual(
enums.Operation.REGISTER,
result.operations[2].value
)
self.assertEqual(
enums.Operation.GET,
result.operations[3].value
)
self.assertEqual(
enums.Operation.GET_ATTRIBUTES,
result.operations[4].value
)
self.assertEqual(
enums.Operation.GET_ATTRIBUTE_LIST,
result.operations[5].value
)
self.assertEqual(
enums.Operation.ACTIVATE,
result.operations[6].value
)
self.assertEqual(
enums.Operation.DESTROY,
result.operations[7].value
)
self.assertEqual(
enums.Operation.QUERY,
result.operations[8].value
)
self.assertEqual(list(), result.object_types)
self.assertIsNotNone(result.vendor_identification)
self.assertEqual(
"PyKMIP {0} Software Server".format(kmip.__version__),
result.vendor_identification.value
)
self.assertIsNone(result.server_information)
self.assertEqual(list(), result.application_namespaces)
self.assertEqual(list(), result.extension_information)
# Test for KMIP 1.1.
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 1)
result = e._process_query(payload)
e._logger.info.assert_called_once_with("Processing operation: Query")
self.assertIsNotNone(result.operations)
self.assertEqual(10, len(result.operations))
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operations[-1].value
)
def test_discover_versions(self):
"""
Test that a DiscoverVersions request can be processed correctly for
different inputs.
"""
e = engine.KmipEngine()
# Test default request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertIsInstance(
result,
discover_versions.DiscoverVersionsResponsePayload
)
self.assertIsNotNone(result.protocol_versions)
self.assertEqual(3, len(result.protocol_versions))
self.assertEqual(
contents.ProtocolVersion.create(1, 2),
result.protocol_versions[0]
)
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
result.protocol_versions[1]
)
self.assertEqual(
contents.ProtocolVersion.create(1, 0),
result.protocol_versions[2]
)
# Test detailed request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload([
contents.ProtocolVersion.create(1, 0)
])
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertIsNotNone(result.protocol_versions)
self.assertEqual(1, len(result.protocol_versions))
self.assertEqual(
contents.ProtocolVersion.create(1, 0),
result.protocol_versions[0]
)
# Test disjoint request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload([
contents.ProtocolVersion.create(0, 1)
])
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertEqual([], result.protocol_versions)
def test_mac(self):
"""
Test that a MAC request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._cryptography_engine.logger = mock.MagicMock()
key = (b'\<KEY>'
b'\x00\x00\x00\x00\x00')
data = (b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A'
b'\x0B\x0C\x0D\x0E\x0F')
algorithm_a = enums.CryptographicAlgorithm.AES
algorithm_b = enums.CryptographicAlgorithm.HMAC_SHA512
obj = pie_objects.SymmetricKey(algorithm_a, 128, key)
e._data_session.add(obj)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
uuid = str(obj.unique_identifier)
cryptographic_parameters = attributes.CryptographicParameters(
cryptographic_algorithm=attributes.
CryptographicAlgorithm(algorithm_b)
)
# Verify when cryptographic_parameters is specified in request
payload = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid),
cryptographic_parameters=cryptographic_parameters,
data=objects.Data(data)
)
response_payload = e._process_mac(payload)
e._logger.info.assert_any_call(
"Processing operation: MAC"
)
e._cryptography_engine.logger.info.assert_any_call(
"Generating a hash-based message authentication code using {0}".
format(algorithm_b.name)
)
e._cryptography_engine.logger.reset_mock()
self.assertEqual(str(uuid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.mac_data, objects.MACData)
# Verify when cryptographic_parameters is not specified in request
payload = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid),
cryptographic_parameters=None,
data=objects.Data(data)
)
response_payload = e._process_mac(payload)
e._cryptography_engine.logger.info.assert_any_call(
"Generating a cipher-based message authentication code using {0}".
format(algorithm_a.name)
)
self.assertEqual(str(uuid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.mac_data, objects.MACData)
def test_mac_with_missing_fields(self):
"""
Test that the right errors are generated when required fields
are missing.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
key = (b'\<KEY>'
b'\x00\x00\x00\x00')
data = (b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B'
b'\x0C\x0D\x0E\x0F')
algorithm = enums.CryptographicAlgorithm.AES
obj_no_key = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_no_algorithm = pie_objects.OpaqueObject(
key, enums.OpaqueDataType.NONE)
e._data_session.add(obj_no_key)
e._data_session.add(obj_no_algorithm)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
uuid_no_key = str(obj_no_key.unique_identifier)
uuid_no_algorithm = str(obj_no_algorithm.unique_identifier)
cryptographic_parameters = attributes.CryptographicParameters(
cryptographic_algorithm=attributes.
CryptographicAlgorithm(algorithm))
payload_no_key = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_key),
cryptographic_parameters=cryptographic_parameters,
data=objects.Data(data)
)
args = (payload_no_key, )
regex = "A secret key value must be specified"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
payload_no_algorithm = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_algorithm),
cryptographic_parameters=None,
data=objects.Data(data)
)
args = (payload_no_algorithm, )
regex = "The cryptographic algorithm must be specified"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
payload_no_data = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_algorithm),
cryptographic_parameters=cryptographic_parameters,
data=None
)
args = (payload_no_data, )
regex = "No data to be MACed"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
def test_create_get_destroy(self):
"""
Test that a managed object can be created, retrieved, and destroyed
without error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
# Create the symmetric key with the corresponding attributes
payload = create.CreateRequestPayload(
object_type=object_type,
template_attribute=template_attribute
)
response_payload = e._process_create(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Create"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
e._logger.reset_mock()
# Retrieve the created key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.SYMMETRIC_KEY,
response_payload.object_type.value
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
key_block = response_payload.secret.key_block
self.assertEqual(
256,
len(key_block.key_value.key_material.value) * 8
)
self.assertEqual(
enums.KeyFormatType.RAW,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
256,
key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Destroy the symmetric key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(uid)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
def test_create_key_pair_get_destroy(self):
"""
Test that a key pair can be created, retrieved, and destroyed without
error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
response_payload = e._process_create_key_pair(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
public_id = response_payload.public_key_uuid.value
self.assertEqual('1', public_id)
private_id = response_payload.private_key_uuid.value
self.assertEqual('2', private_id)
e._logger.reset_mock()
# Retrieve the created public key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(public_id)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.PUBLIC_KEY,
response_payload.object_type.value
)
self.assertEqual(
str(public_id),
response_payload.unique_identifier.value
)
self.assertIsInstance(response_payload.secret, secrets.PublicKey)
key_block = response_payload.secret.key_block
self.assertEqual(
enums.KeyFormatType.PKCS_1,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
2048,
key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Retrieve the created private key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(private_id)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.PRIVATE_KEY,
response_payload.object_type.value
)
self.assertEqual(
str(private_id),
response_payload.unique_identifier.value
)
self.assertIsInstance(response_payload.secret, secrets.PrivateKey)
key_block = response_payload.secret.key_block
self.assertEqual(
enums.KeyFormatType.PKCS_8,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
2048,
key_block.cryptographic_length.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
# Destroy the public key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(public_id)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(
str(public_id),
response_payload.unique_identifier.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
args = (payload, )
regex = "Could not locate object: {0}".format(public_id)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
# Destroy the private key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(private_id)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(
str(private_id),
response_payload.unique_identifier.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
args = (payload, )
regex = "Could not locate object: {0}".format(private_id)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
def test_register_get_destroy(self):
"""
Test that a managed object can be registered, retrieved, and destroyed
without error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
key_bytes = (
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
)
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(key_bytes)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.AES
),
cryptographic_length=attributes.CryptographicLength(128)
)
)
# Register the symmetric key with the corresponding attributes
payload = register.RegisterRequestPayload(
object_type=object_type,
template_attribute=template_attribute,
secret=secret
)
response_payload = e._process_register(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Register"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
e._logger.reset_mock()
# Retrieve the registered key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.SYMMETRIC_KEY,
response_payload.object_type.value
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
self.assertEqual(
key_bytes,
response_payload.secret.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.RAW,
response_payload.secret.key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
response_payload.secret.key_block.cryptographic_algorithm.value
)
self.assertEqual(
128,
response_payload.secret.key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Destroy the symmetric key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(uid)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory() | kmip/tests/unit/services/server/test_engine.py |
import six
import mock
import shutil
import sqlalchemy
from sqlalchemy.orm import exc
import tempfile
import testtools
import time
import kmip
from kmip.core import attributes
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import misc
from kmip.core import objects
from kmip.core import secrets
from kmip.core.factories import attributes as factory
from kmip.core.messages import contents
from kmip.core.messages import messages
from kmip.core.messages.payloads import activate
from kmip.core.messages.payloads import create
from kmip.core.messages.payloads import create_key_pair
from kmip.core.messages.payloads import destroy
from kmip.core.messages.payloads import discover_versions
from kmip.core.messages.payloads import get
from kmip.core.messages.payloads import get_attribute_list
from kmip.core.messages.payloads import get_attributes
from kmip.core.messages.payloads import query
from kmip.core.messages.payloads import register
from kmip.core.messages.payloads import mac
from kmip.core.messages.payloads import locate
from kmip.pie import objects as pie_objects
from kmip.pie import sqltypes
from kmip.services.server import engine
class MockRegexString(str):
"""
A comparator string for doing simple containment regex comparisons
for mock asserts.
"""
def __eq__(self, other):
return self in other
class TestKmipEngine(testtools.TestCase):
"""
A test suite for the KmipEngine.
"""
def setUp(self):
super(TestKmipEngine, self).setUp()
self.engine = sqlalchemy.create_engine(
'sqlite:///:memory:',
)
sqltypes.Base.metadata.create_all(self.engine)
self.session_factory = sqlalchemy.orm.sessionmaker(
bind=self.engine
)
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
def tearDown(self):
super(TestKmipEngine, self).tearDown()
def _build_request(self):
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = [
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
]
protocol = contents.ProtocolVersion.create(1, 0)
max_size = contents.MaximumResponseSize(2 ** 20)
asynch = contents.AsynchronousIndicator(False)
# TODO (peterhamilton) Change this insanity in the substructs.
username = objects.Credential.UsernamePasswordCredential.Username(
"tester"
)
password = objects.Credential.UsernamePasswordCredential.Password(
"password"
)
creds = objects.Credential.UsernamePasswordCredential(
username=username,
password=password
)
auth = contents.Authentication(creds)
batch_error_option = contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
)
batch_order_option = contents.BatchOrderOption(True)
timestamp = contents.TimeStamp(int(time.time()))
header = messages.RequestHeader(
protocol_version=protocol,
maximum_response_size=max_size,
asynchronous_indicator=asynch,
authentication=auth,
batch_error_cont_option=batch_error_option,
batch_order_option=batch_order_option,
time_stamp=timestamp,
batch_count=contents.BatchCount(len(batch))
)
return messages.RequestMessage(
request_header=header,
batch_items=batch
)
def test_init(self):
"""
Test that a KmipEngine can be instantiated without any errors.
"""
engine.KmipEngine()
@mock.patch('sqlalchemy.create_engine')
def test_init_create_engine(self, create_engine_mock):
"""
Test that the right arguments are used to create the engine's SQLite
backend.
"""
engine.KmipEngine()
args = ("sqlite:////tmp/pykmip.database",)
fargs = {
'echo': False,
'connect_args': {'check_same_thread': False}
}
create_engine_mock.assert_called_once_with(*args, **fargs)
def test_load_operation_policies(self):
"""
Test that the KmipEngine can correctly load operation policies.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
self.assertEqual(3, len(e._operation_policies))
self.assertIn('test', e._operation_policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, e._operation_policies.get('test'))
def test_load_operation_policies_with_file_read_error(self):
"""
Test that the KmipEngine can correctly handle load errors.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"INVALID": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
e._logger.error.assert_called_once_with(
"A failure occurred while loading policies."
)
e._logger.exception.assert_called_once()
self.assertEqual(2, len(e._operation_policies))
def test_load_operation_policies_with_reserved(self):
"""
Test that the KmipEngine can correctly load operation policies, even
when a policy attempts to overwrite a reserved one.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file.name, 'w') as f:
f.write(
'{"public": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file.name
)
)
e._logger.warning.assert_called_once_with(
"Loaded policy 'public' overwrites a reserved policy and will "
"be thrown out."
)
self.assertEqual(2, len(e._operation_policies))
def test_load_operation_policies_with_duplicate(self):
"""
Test that the KmipEngine can correctly load operation policies, even
when a policy is defined multiple times.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy_file_a = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file_a.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
policy_file_b = tempfile.NamedTemporaryFile(
dir=self.temp_dir
)
with open(policy_file_b.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
self.assertEqual(2, len(e._operation_policies))
e._load_operation_policies(self.temp_dir)
e._logger.info.assert_any_call(
"Loading user-defined operation policy files from: {0}".format(
self.temp_dir
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file_a.name
)
)
e._logger.info.assert_any_call(
"Loading user-defined operation policies from file: {0}".format(
policy_file_b.name
)
)
e._logger.warning.assert_called_once_with(
"Loaded policy 'test' overwrites a preexisting policy and will "
"be thrown out."
)
self.assertEqual(3, len(e._operation_policies))
self.assertIn('test', e._operation_policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, e._operation_policies.get('test'))
def test_version_operation_match(self):
"""
Test that a valid response is generated when trying to invoke an
operation supported by a specific version of KMIP.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
e._process_discover_versions(payload)
def test_version_operation_mismatch(self):
"""
Test that an OperationNotSupported error is generated when trying to
invoke an operation unsupported by a specific version of KMIP.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
args = (None, )
regex = "DiscoverVersions is not supported by KMIP {0}".format(
e._protocol_version
)
six.assertRaisesRegex(
self,
exceptions.OperationNotSupported,
regex,
e._process_discover_versions,
*args
)
def test_process_request(self):
"""
Test that a basic request is processed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
maximum_response_size=contents.MaximumResponseSize(2 ** 20),
authentication=contents.Authentication(),
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
),
batch_order_option=contents.BatchOrderOption(True),
time_stamp=contents.TimeStamp(int(time.time())),
batch_count=contents.BatchCount(1)
)
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
request = messages.RequestMessage(
request_header=header,
batch_items=batch
)
response, max_size = e.process_request(request)
e._logger.info.assert_any_call(
MockRegexString("Received request at time:")
)
e._logger.info.assert_any_call(
"Processing operation: DiscoverVersions"
)
self.assertIsInstance(response, messages.ResponseMessage)
self.assertEqual(2 ** 20, max_size)
self.assertIsNotNone(response.response_header)
header = response.response_header
self.assertIsNotNone(header)
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
header.protocol_version
)
self.assertIsInstance(header.time_stamp, contents.TimeStamp)
self.assertIsInstance(header.batch_count, contents.BatchCount)
self.assertEqual(1, header.batch_count.value)
batch = response.batch_items
self.assertNotEqual(list(), batch)
batch_item = batch[0]
self.assertIsInstance(batch_item.operation, contents.Operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
batch_item.operation.value
)
self.assertIsNone(batch_item.unique_batch_item_id)
self.assertEqual(
enums.ResultStatus.SUCCESS,
batch_item.result_status.value
)
self.assertIsNone(batch_item.result_reason)
self.assertIsNone(batch_item.result_message)
self.assertIsNone(batch_item.async_correlation_value)
self.assertIsInstance(
batch_item.response_payload,
discover_versions.DiscoverVersionsResponsePayload
)
self.assertIsNone(batch_item.message_extension)
def test_process_request_unsupported_version(self):
"""
Test that an InvalidMessage exception is raised when processing a
request using an unsupported KMIP version.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(0, 1)
header = messages.RequestHeader(
protocol_version=protocol
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "KMIP {0} is not supported by the server.".format(
protocol
)
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_stale_timestamp(self):
"""
Test that an InvalidMessage exception is raised when processing a
request with a stale timestamp.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 0)
header = messages.RequestHeader(
protocol_version=protocol,
time_stamp=contents.TimeStamp(0)
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "Stale request rejected by server."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
e._logger.warning.assert_any_call(
MockRegexString(
"Received request with old timestamp. Possible replay attack."
)
)
def test_process_request_future_timestamp(self):
"""
Test that an InvalidMessage exception is raised when processing a
request with a future timestamp.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 0)
header = messages.RequestHeader(
protocol_version=protocol,
time_stamp=contents.TimeStamp(10 ** 10)
)
request = messages.RequestMessage(
request_header=header
)
args = (request, )
regex = "Future request rejected by server."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
e._logger.warning.assert_any_call(
MockRegexString(
"Received request with future timestamp."
)
)
def test_process_request_unsupported_async_indicator(self):
"""
Test than an InvalidMessage error is generated while processing a
batch with an unsupported asynchronous indicator option.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
asynchronous_indicator=contents.AsynchronousIndicator(True)
)
request = messages.RequestMessage(
request_header=header,
)
args = (request, )
regex = "Asynchronous operations are not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_unsupported_batch_option(self):
"""
Test that an InvalidMessage error is generated while processing a
batch with an unsupported batch error continuation option.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
authentication=contents.Authentication(),
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.UNDO
)
)
request = messages.RequestMessage(
request_header=header,
)
args = (request, )
regex = "Undo option for batch handling is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
regex,
e.process_request,
*args
)
def test_process_request_missing_credential(self):
"""
Test that the engine does not immediately error out when retrieving
a non-existent credential from the request.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
protocol = contents.ProtocolVersion.create(1, 1)
header = messages.RequestHeader(
protocol_version=protocol,
authentication=None,
batch_error_cont_option=contents.BatchErrorContinuationOption(
enums.BatchErrorContinuationOption.STOP
),
batch_order_option=contents.BatchOrderOption(True),
time_stamp=contents.TimeStamp(int(time.time())),
batch_count=contents.BatchCount(1)
)
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
request = messages.RequestMessage(
request_header=header,
batch_items=batch
)
e.process_request(request)
def test_build_error_response(self):
"""
Test that a bare bones response containing a single error result can
be constructed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
response = e.build_error_response(
contents.ProtocolVersion.create(1, 1),
enums.ResultReason.GENERAL_FAILURE,
"A general test failure occurred."
)
self.assertIsInstance(response, messages.ResponseMessage)
header = response.response_header
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
header.protocol_version
)
self.assertIsNotNone(header.time_stamp)
self.assertIsNotNone(header.batch_count)
self.assertEqual(1, header.batch_count.value)
batch = response.batch_items
self.assertEqual(1, len(batch))
batch_item = batch[0]
self.assertIsNone(batch_item.operation)
self.assertIsNone(batch_item.unique_batch_item_id)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
batch_item.result_status.value
)
self.assertEqual(
enums.ResultReason.GENERAL_FAILURE,
batch_item.result_reason.value
)
self.assertEqual(
"A general test failure occurred.",
batch_item.result_message.value
)
self.assertIsNone(batch_item.async_correlation_value)
self.assertIsNone(batch_item.response_payload)
self.assertIsNone(batch_item.message_extension)
def test_process_batch(self):
"""
Test that a batch is processed correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
request_payload=payload
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
def test_process_multibatch(self):
"""
Test that a batch containing multiple operations is processed
correctly.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
unique_batch_item_id=contents.UniqueBatchItemID(1),
request_payload=payload
),
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
),
unique_batch_item_id=contents.UniqueBatchItemID(2),
request_payload=payload
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(2, len(results))
def test_process_batch_missing_batch_id(self):
"""
Test that an InvalidMessage error is generated while processing a
batch with missing batch IDs.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
batch = list([
messages.RequestBatchItem(),
messages.RequestBatchItem()
])
args = (batch, None, None)
six.assertRaisesRegex(
self,
exceptions.InvalidMessage,
"Batch item ID is undefined.",
e._process_batch,
*args
)
def test_process_batch_expected_error(self):
"""
Test than an expected KMIP error is handled appropriately while
processing a batch of operations.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
)
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
result = results[0]
self.assertIsInstance(result, messages.ResponseBatchItem)
self.assertIsNotNone(result.operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operation.value
)
self.assertIsNone(result.unique_batch_item_id)
self.assertIsNotNone(result.result_status)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
result.result_status.value
)
self.assertIsNotNone(result.result_reason)
self.assertEqual(
enums.ResultReason.OPERATION_NOT_SUPPORTED,
result.result_reason.value
)
self.assertIsNotNone(result.result_message)
error_message = "DiscoverVersions is not supported by KMIP {0}".format(
e._protocol_version
)
self.assertEqual(error_message, result.result_message.value)
self.assertIsNone(result.async_correlation_value)
self.assertIsNone(result.response_payload)
self.assertIsNone(result.message_extension)
def test_process_batch_unexpected_error(self):
"""
Test that an unexpected, non-KMIP error is handled appropriately
while processing a batch of operations.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
test_exception = Exception("A general test failure occurred.")
e._process_operation = mock.MagicMock(side_effect=test_exception)
batch = list([
messages.RequestBatchItem(
operation=contents.Operation(
enums.Operation.DISCOVER_VERSIONS
)
)
])
results = e._process_batch(
batch,
enums.BatchErrorContinuationOption.STOP,
True
)
self.assertIsNotNone(results)
self.assertEqual(1, len(results))
result = results[0]
e._logger.warning.assert_called_with(
"Error occurred while processing operation."
)
e._logger.exception.assert_called_with(test_exception)
self.assertIsInstance(result, messages.ResponseBatchItem)
self.assertIsNotNone(result.operation)
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operation.value
)
self.assertIsNone(result.unique_batch_item_id)
self.assertIsNotNone(result.result_status)
self.assertEqual(
enums.ResultStatus.OPERATION_FAILED,
result.result_status.value
)
self.assertIsNotNone(result.result_reason)
self.assertEqual(
enums.ResultReason.GENERAL_FAILURE,
result.result_reason.value
)
self.assertIsNotNone(result.result_message)
self.assertEqual(
"Operation failed. See the server logs for more information.",
result.result_message.value
)
self.assertIsNone(result.async_correlation_value)
self.assertIsNone(result.response_payload)
self.assertIsNone(result.message_extension)
def test_process_operation(self):
"""
Test that the right subroutine is called when invoking operations
supported by the server.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._process_create = mock.MagicMock()
e._process_create_key_pair = mock.MagicMock()
e._process_register = mock.MagicMock()
e._process_get = mock.MagicMock()
e._process_get_attributes = mock.MagicMock()
e._process_get_attribute_list = mock.MagicMock()
e._process_activate = mock.MagicMock()
e._process_destroy = mock.MagicMock()
e._process_query = mock.MagicMock()
e._process_discover_versions = mock.MagicMock()
e._process_operation(enums.Operation.CREATE, None)
e._process_operation(enums.Operation.CREATE_KEY_PAIR, None)
e._process_operation(enums.Operation.REGISTER, None)
e._process_operation(enums.Operation.GET, None)
e._process_operation(enums.Operation.GET_ATTRIBUTES, None)
e._process_operation(enums.Operation.GET_ATTRIBUTE_LIST, None)
e._process_operation(enums.Operation.ACTIVATE, None)
e._process_operation(enums.Operation.DESTROY, None)
e._process_operation(enums.Operation.QUERY, None)
e._process_operation(enums.Operation.DISCOVER_VERSIONS, None)
e._process_create.assert_called_with(None)
e._process_create_key_pair.assert_called_with(None)
e._process_register.assert_called_with(None)
e._process_get.assert_called_with(None)
e._process_get_attributes.assert_called_with(None)
e._process_get_attribute_list.assert_called_with(None)
e._process_activate.assert_called_with(None)
e._process_destroy.assert_called_with(None)
e._process_query.assert_called_with(None)
e._process_discover_versions.assert_called_with(None)
def test_unsupported_operation(self):
"""
Test that an OperationNotSupported error is generated when invoking
an operation not supported by the server.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
args = (enums.Operation.POLL, None)
regex = "{0} operation is not supported by the server.".format(
args[0].name.title()
)
six.assertRaisesRegex(
self,
exceptions.OperationNotSupported,
regex,
e._process_operation,
*args
)
def test_get_object_type(self):
"""
Test that the object type of a stored object can be retrieved
correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
object_type = e._get_object_type(id_a)
e._data_session.commit()
self.assertEqual(pie_objects.OpaqueObject, object_type)
def test_get_object_type_missing_object(self):
"""
Test that an ItemNotFound error is generated when attempting to
retrieve the object type of an object that does not exist.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
args = ('1', )
regex = "Could not locate object: 1"
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._get_object_type,
*args
)
e._data_session.commit()
e._logger.warning.assert_called_once_with(
"Could not identify object type for object: 1"
)
def test_get_object_type_multiple_objects(self):
"""
Test that a sqlalchemy.orm.exc.MultipleResultsFound error is generated
when getting the object type of multiple objects map to the same
object ID.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
test_exception = exc.MultipleResultsFound()
e._data_session.query = mock.MagicMock(side_effect=test_exception)
e._logger = mock.MagicMock()
args = ('1', )
self.assertRaises(
exc.MultipleResultsFound,
e._get_object_type,
*args
)
e._data_session.commit()
e._logger.warning.assert_called_once_with(
"Multiple objects found for ID: 1"
)
def test_get_object_type_unsupported_type(self):
"""
Test that an InvalidField error is generated when attempting to
get the object type of an object with an unsupported object type.
This should never happen by definition, but "Safety first!"
"""
e = engine.KmipEngine()
e._object_map = {enums.ObjectType.OPAQUE_DATA: None}
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
args = (id_a, )
name = enums.ObjectType.OPAQUE_DATA.name
regex = "The {0} object type is not supported.".format(
''.join(
[x.capitalize() for x in name.split('_')]
)
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._get_object_type,
*args
)
e._data_session.commit()
def test_build_core_object(self):
"""
Test that kmip.core objects can be built from simpler kmip.pie
objects.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
# Test building a Certificate.
managed_object = pie_objects.X509Certificate(value=b'')
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.Certificate)
self.assertEqual(
b'',
core_object.certificate_value.value
)
self.assertEqual(
enums.CertificateTypeEnum.X_509,
core_object.certificate_type.value
)
# Test building a Symmetric Key.
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.SymmetricKey)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building a Public Key.
managed_object = pie_objects.PublicKey(
enums.CryptographicAlgorithm.RSA,
0,
b''
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.PublicKey)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building a Private Key.
managed_object = pie_objects.PrivateKey(
enums.CryptographicAlgorithm.RSA,
0,
b'',
enums.KeyFormatType.PKCS_8
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.PrivateKey)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
core_object.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
core_object.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.PKCS_8,
core_object.key_block.key_format_type.value
)
# Test building a Secret Data.
managed_object = pie_objects.SecretData(
b'',
enums.SecretDataType.PASSWORD
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.SecretData)
self.assertEqual(
enums.SecretDataType.PASSWORD,
core_object.secret_data_type.value
)
self.assertEqual(
b'',
core_object.key_block.key_value.key_material.value
)
# Test building an Opaque Data.
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
core_object = e._build_core_object(managed_object)
self.assertIsInstance(core_object, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
core_object.opaque_data_type.value
)
self.assertEqual(
b'',
core_object.opaque_data_value.value
)
def test_build_core_object_unsupported_type(self):
"""
Test that an InvalidField error is generated when building
kmip.core objects that are unsupported.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
args = (None, )
regex = "Cannot build an unsupported object type."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._build_core_object,
*args
)
class DummyObject:
def __init__(self):
self._object_type = enums.ObjectType.SPLIT_KEY
args = (DummyObject(), )
regex = "The SplitKey object type is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._build_core_object,
*args
)
def test_process_template_attribute(self):
"""
Test that a template attribute structure can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(
attributes=[name, algorithm, length, mask]
)
result = e._process_template_attribute(template_attribute)
self.assertIsInstance(result, dict)
self.assertEqual(4, len(result.keys()))
self.assertIn('Name', result.keys())
self.assertIn('Cryptographic Algorithm', result.keys())
self.assertIn('Cryptographic Length', result.keys())
self.assertIn('Cryptographic Usage Mask', result.keys())
self.assertEqual([name.attribute_value], result.get('Name'))
self.assertEqual(
algorithm.attribute_value,
result.get('Cryptographic Algorithm')
)
self.assertEqual(
length.attribute_value,
result.get('Cryptographic Length')
)
self.assertEqual(
mask.attribute_value,
result.get('Cryptographic Usage Mask')
)
def test_process_template_attribute_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are referenced while processing a template attribute.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that providing template names generates an InvalidField error.
template_attribute = objects.TemplateAttribute(
names=[
attributes.Name.create(
'invalid',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
]
)
args = (template_attribute, )
regex = "Attribute templates are not supported."
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_template_attribute,
*args
)
# Test that an unrecognized attribute generates an InvalidField error.
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name.attribute_name.value = 'invalid'
template_attribute = objects.TemplateAttribute(attributes=[name])
args = (template_attribute, )
regex = "The invalid attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that missing indices generate an InvalidField error.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
template_attribute = objects.TemplateAttribute(
attributes=[name_a, name_b]
)
args = (template_attribute, )
regex = "Attribute index missing from multivalued attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that a non-zero index generates an InvalidField error.
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES,
1
)
template_attribute = objects.TemplateAttribute(attributes=[algorithm])
args = (template_attribute, )
regex = "Non-zero attribute index found for single-valued attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_template_attribute,
*args
)
# Test that setting multiple values for a single-value attribute
# generates an InvalidField error.
algorithm_a = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
algorithm_b = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.TRIPLE_DES
)
template_attribute = objects.TemplateAttribute(
attributes=[algorithm_a, algorithm_b]
)
args = (template_attribute, )
regex = (
"Cannot set multiple instances of the Cryptographic Algorithm "
"attribute."
)
six.assertRaisesRegex(
self,
exceptions.IndexOutOfBounds,
regex,
e._process_template_attribute,
*args
)
def test_get_attributes_from_managed_object(self):
"""
Test that multiple attributes can be retrieved from a given managed
object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
symmetric_key.names = ['Name 1', 'Name 2']
e._data_session.add(symmetric_key)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
result = e._get_attributes_from_managed_object(
symmetric_key,
['Unique Identifier',
'Name',
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask',
'invalid']
)
attribute_factory = factory.AttributeFactory()
self.assertEqual(6, len(result))
attribute = attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
'1'
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
self.assertIn(attribute, result)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
self.assertIn(attribute, result)
def test_get_attributes_from_managed_object_with_missing_attribute(self):
"""
Test that any exceptions are suppressed when attempting to retrieve
non-existent attributes from managed objects.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
symmetric_key.names = ['Name 1', 'Name 2']
e._data_session.add(symmetric_key)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._get_attribute_from_managed_object = mock.Mock()
e._get_attribute_from_managed_object.side_effect = Exception
result = e._get_attributes_from_managed_object(
symmetric_key,
['Unique Identifier',
'Name',
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask',
'invalid']
)
self.assertEqual(0, len(result))
def test_get_attribute_from_managed_object(self):
"""
Test that an attribute can be retrieved from a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
symmetric_key = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b'',
masks=[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
)
certificate = pie_objects.X509Certificate(
b''
)
opaque_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
e._data_session.add(symmetric_key)
e._data_session.add(certificate)
e._data_session.add(opaque_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
result = e._get_attribute_from_managed_object(
symmetric_key,
'Unique Identifier'
)
self.assertEqual('1', result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Name'
)
self.assertEqual(
[attributes.Name(
attributes.Name.NameValue('Symmetric Key'),
attributes.Name.NameType(
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)],
result
)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Object Type'
)
self.assertEqual(enums.ObjectType.SYMMETRIC_KEY, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Algorithm'
)
self.assertEqual(enums.CryptographicAlgorithm.AES, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Length'
)
self.assertEqual(0, result)
result = e._get_attribute_from_managed_object(
certificate,
'Cryptographic Parameters'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Cryptographic Domain Parameters'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Type'
)
self.assertEqual(enums.CertificateTypeEnum.X_509, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Length'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Identifier'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Subject'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'X.509 Certificate Issuer'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Identifier'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Subject'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Certificate Issuer'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
certificate,
'Digital Signature Algorithm'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
opaque_object,
'Digest'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Operation Policy Name'
)
self.assertEqual('default', result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Cryptographic Usage Mask'
)
self.assertEqual(
[enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT],
result
)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Lease Time'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Usage Limits'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'State'
)
self.assertEqual(enums.State.PRE_ACTIVE, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Initial Date'
)
self.assertIsNotNone(result)
self.assertIsInstance(result, six.integer_types)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Activation Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Process Start Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Protect Stop Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Deactivation Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Destroy Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Compromise Occurrence Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Compromise Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Revocation Reason'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Archive Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Object Group'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Fresh'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Link'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Application Specific Information'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Contact Information'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'Last Change Date'
)
self.assertEqual(None, result)
result = e._get_attribute_from_managed_object(
symmetric_key,
'invalid'
)
self.assertEqual(None, result)
def test_set_attributes_on_managed_object(self):
"""
Test that multiple attributes can be set on a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SecretData(
b'',
enums.SecretDataType.PASSWORD
)
managed_object.names = []
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Secret Data',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(
attributes=[name, mask]
)
object_attributes = e._process_template_attribute(template_attribute)
self.assertEqual([], managed_object.names)
self.assertEqual([], managed_object.cryptographic_usage_masks)
e._set_attributes_on_managed_object(
managed_object,
object_attributes
)
self.assertEqual(['Test Secret Data'], managed_object.names)
self.assertEqual(
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
],
managed_object.cryptographic_usage_masks
)
def test_set_attributes_on_managed_object_attribute_mismatch(self):
"""
Test that an InvalidField error is generated when attempting to set
an attribute that is not applicable for a given managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
attribute_factory = factory.AttributeFactory()
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
template_attribute = objects.TemplateAttribute(attributes=[mask])
object_attributes = e._process_template_attribute(template_attribute)
args = (managed_object, object_attributes)
regex = (
"Cannot set Cryptographic Usage Mask attribute on OpaqueData "
"object."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attributes_on_managed_object,
*args
)
def test_set_attribute_on_managed_object(self):
"""
Test that various attributes can be set correctly on a given
managed object.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
name = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
algorithm = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
mask = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
managed_object.names = []
self.assertEqual([], managed_object.names)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
managed_object.cryptographic_algorithm
)
self.assertEqual(0, managed_object.cryptographic_length)
self.assertEqual([], managed_object.cryptographic_usage_masks)
e._set_attribute_on_managed_object(
managed_object,
('Name', [name.attribute_value])
)
self.assertEqual(['Test Symmetric Key'], managed_object.names)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Algorithm', algorithm.attribute_value)
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
managed_object.cryptographic_algorithm
)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Length', length.attribute_value)
)
self.assertEqual(0, managed_object.cryptographic_length)
e._set_attribute_on_managed_object(
managed_object,
('Cryptographic Usage Mask', mask.attribute_value)
)
self.assertEqual(
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
],
managed_object.cryptographic_usage_masks
)
def test_set_attribute_on_managed_object_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are referenced while setting managed object attributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
8,
b'\x00'
)
# Test that multiple duplicate names cannot be set on an object.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
args = (
managed_object,
('Name', [name_a.attribute_value, name_b.attribute_value])
)
regex = "Cannot set duplicate name values."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that a multivalued, unsupported attribute cannot be set on an
# object.
name_a = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
name_b = attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
args = (
managed_object,
('Digest', [name_a.attribute_value, name_b.attribute_value])
)
regex = "The Digest attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that a set attribute cannot be overwritten.
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
args = (
managed_object,
('Cryptographic Length', length.attribute_value)
)
regex = "Cannot overwrite the Cryptographic Length attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that an unsupported attribute cannot be set.
object_group = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_GROUP,
'Test Group'
)
args = (
managed_object,
('Object Group', object_group.attribute_value)
)
regex = "The Object Group attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
def test_is_allowed_by_operation_policy(self):
"""
Test that an allowed operation is correctly allowed by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_policy_blocked(self):
"""
Test that an unallowed operation is correctly blocked by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_public(self):
"""
Test that a public operation is allowed by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_block_all(self):
"""
Test that a blocked operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.DISALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_safety_check(self):
"""
Test that an unknown operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: 'unknown value'
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_policy_nonexistent_policy(self):
"""
Test that a check with a non-existent policy yields a logging warning
and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy = 'nonexistent-policy'
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not exist.".format(policy)
)
def test_is_allowed_by_operation_policy_not_object_applicable(self):
"""
Test that a check for an object with a non-applicable policy yields
a logging warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.PRIVATE_KEY
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} objects.".format(
policy,
e._get_enum_string(object_type)
)
)
def test_is_allowed_by_operation_policy_not_applicable(self):
"""
Test that a check with a non-applicable policy yields a logging
warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.SYMMETRIC_KEY
operation = enums.Operation.CREATE
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
operation
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} operations on {2} "
"objects.".format(
policy,
e._get_enum_string(operation),
e._get_enum_string(object_type)
)
)
def test_get_object_with_access_controls(self):
"""
Test that an unallowed object access request is handled correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
# Test by specifying the ID of the object to retrieve and the
# operation context.
args = [id_a, enums.Operation.GET]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._get_object_with_access_controls,
*args
)
def test_create(self):
"""
Test that a Create request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build Create request
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
),
attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'test'
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
response_payload = e._process_create(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Create"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
# Retrieve the stored object and verify all attributes were set
# appropriately.
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == uid
).one()
self.assertEqual(
enums.KeyFormatType.RAW,
symmetric_key.key_format_type
)
self.assertEqual(1, len(symmetric_key.names))
self.assertIn('Test Symmetric Key', symmetric_key.names)
self.assertEqual(256, len(symmetric_key.value) * 8)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
symmetric_key.cryptographic_algorithm
)
self.assertEqual(256, symmetric_key.cryptographic_length)
self.assertEqual(2, len(symmetric_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertEqual('test', symmetric_key.operation_policy_name)
self.assertIsNotNone(symmetric_key.initial_date)
self.assertNotEqual(0, symmetric_key.initial_date)
self.assertEqual(uid, e._id_placeholder)
def test_create_unsupported_object_type(self):
"""
Test that an InvalidField error is generated when attempting to
create an unsupported object type.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.PUBLIC_KEY)
payload = create.CreateRequestPayload(
object_type
)
args = (payload, )
regex = "Cannot create a PublicKey object with the Create operation."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
def test_create_omitting_attributes(self):
"""
Test that InvalidField errors are generated when trying to create
a symmetric key without required attributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test the error for omitting the Cryptographic Algorithm
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Length
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Usage Mask
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
def test_create_key_pair(self):
"""
Test that a CreateKeyPair request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
response_payload = e._process_create_key_pair(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
public_id = response_payload.public_key_uuid.value
self.assertEqual('1', public_id)
private_id = response_payload.private_key_uuid.value
self.assertEqual('2', private_id)
# Retrieve the stored public key and verify all attributes were set
# appropriately.
public_key = e._data_session.query(
pie_objects.PublicKey
).filter(
pie_objects.ManagedObject.unique_identifier == public_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_1,
public_key.key_format_type
)
self.assertEqual(1, len(public_key.names))
self.assertIn('Test Asymmetric Key', public_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
public_key.cryptographic_algorithm
)
self.assertEqual(2048, public_key.cryptographic_length)
self.assertEqual(1, len(public_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
public_key.cryptographic_usage_masks
)
self.assertEqual('default', public_key.operation_policy_name)
self.assertIsNotNone(public_key.initial_date)
self.assertNotEqual(0, public_key.initial_date)
# Retrieve the stored private key and verify all attributes were set
# appropriately.
private_key = e._data_session.query(
pie_objects.PrivateKey
).filter(
pie_objects.ManagedObject.unique_identifier == private_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_8,
private_key.key_format_type
)
self.assertEqual(1, len(private_key.names))
self.assertIn('Test Asymmetric Key', private_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
private_key.cryptographic_algorithm
)
self.assertEqual(2048, private_key.cryptographic_length)
self.assertEqual(1, len(private_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
private_key.cryptographic_usage_masks
)
self.assertEqual('default', private_key.operation_policy_name)
self.assertIsNotNone(private_key.initial_date)
self.assertNotEqual(0, private_key.initial_date)
self.assertEqual(private_id, e._id_placeholder)
def test_create_key_pair_omitting_attributes(self):
"""
Test that the right errors are generated when required attributes
are missing from a CreateKeyPair request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that a missing PublicKey CryptographicAlgorithm raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute "
"for the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicAlgorithm raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute "
"for the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PublicKey CryptographicLength raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute for "
"the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicLength raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute for "
"the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PublicKey CryptographicUsageMask raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute "
"for the public key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that a missing PrivateKey CryptographicUsageMask raises an error
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute "
"for the private key."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
def test_create_key_pair_mismatched_attributes(self):
"""
Test that the right errors are generated when required attributes
are mismatched in a CreateKeyPair request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test that mismatched CryptographicAlgorithms raise an error.
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.DSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The public and private key algorithms must be the same."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
# Test that mismatched CryptographicAlgorithms raise an error.
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
4096
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
args = (payload, )
regex = (
"The public and private key lengths must be the same."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create_key_pair,
*args
)
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
e._logger.reset_mock()
def test_register(self):
"""
Test that a Register request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
),
attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'test'
)
]
)
key_bytes = (
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
)
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(key_bytes)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.AES
),
cryptographic_length=attributes.CryptographicLength(128)
)
)
payload = register.RegisterRequestPayload(
object_type=object_type,
template_attribute=template_attribute,
secret=secret
)
response_payload = e._process_register(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Register"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
# Retrieve the stored object and verify all attributes were set
# appropriately.
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == uid
).one()
self.assertEqual(
enums.KeyFormatType.RAW,
symmetric_key.key_format_type
)
self.assertEqual(1, len(symmetric_key.names))
self.assertIn('Test Symmetric Key', symmetric_key.names)
self.assertEqual(key_bytes, symmetric_key.value)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
symmetric_key.cryptographic_algorithm
)
self.assertEqual(128, symmetric_key.cryptographic_length)
self.assertEqual(2, len(symmetric_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertEqual('test', symmetric_key.operation_policy_name)
self.assertIsNotNone(symmetric_key.initial_date)
self.assertNotEqual(0, symmetric_key.initial_date)
self.assertEqual(uid, e._id_placeholder)
def test_register_unsupported_object_type(self):
"""
Test that an InvalidField error is generated when attempting to
register an unsupported object type.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.SPLIT_KEY)
payload = register.RegisterRequestPayload(object_type=object_type)
args = (payload, )
regex = "The SplitKey object type is not supported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_register,
*args
)
def test_request_omitting_secret(self):
"""
Test that an InvalidField error is generate when trying to register
a secret in absentia.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
payload = register.RegisterRequestPayload(object_type=object_type)
args = (payload, )
regex = "Cannot register a secret in absentia."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_register,
*args
)
def test_locate(self):
"""
Test that a Locate request can be processed correctly.
"""
# TODO Need add more extensive tests after locate operaton is
# fully supported
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
# locate should return nothing at beginning
payload = locate.LocateRequestPayload()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
0
)
# Add the first obj and test the locate
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = locate.LocateRequestPayload()
e._logger.reset_mock()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
1
)
self.assertEqual(
id_a,
response_payload.unique_identifiers[0].value
)
# Add the second obj and test the locate
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_b = str(obj_b.unique_identifier)
payload = locate.LocateRequestPayload()
e._logger.reset_mock()
response_payload = e._process_locate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Locate"
)
self.assertEqual(
len(response_payload.unique_identifiers),
2
)
self.assertIn(
id_a,
[uid.value for uid in response_payload.unique_identifiers]
)
self.assertIn(
id_b,
[uid.value for uid in response_payload.unique_identifiers]
)
def test_get(self):
"""
Test that a Get request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
id_b = str(obj_b.unique_identifier)
# Test by specifying the ID of the object to get.
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.OPAQUE_DATA,
response_payload.object_type.value
)
self.assertEqual(str(id_a), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
response_payload.secret.opaque_data_type.value
)
self.assertEqual(
b'',
response_payload.secret.opaque_data_value.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
e._id_placeholder = str(id_b)
# Test by using the ID placeholder to specify the object to get.
payload = get.GetRequestPayload()
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.OPAQUE_DATA,
response_payload.object_type.value
)
self.assertEqual(str(id_b), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.OpaqueObject)
self.assertEqual(
enums.OpaqueDataType.NONE,
response_payload.secret.opaque_data_type.value
)
self.assertEqual(
b'',
response_payload.secret.opaque_data_value.value
)
e._data_session.commit()
def test_get_with_unsupported_features(self):
"""
Test that the right errors are generated when unsupported features
are used in a Get request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
# Test that specifying the key compression type generates an error.
payload = get.GetRequestPayload(
key_compression_type=get.GetRequestPayload.KeyCompressionType(
enums.KeyCompressionType.EC_PUBLIC_KEY_TYPE_UNCOMPRESSED
)
)
args = (payload, )
regex = "Key compression is not supported."
six.assertRaisesRegex(
self,
exceptions.KeyCompressionTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
e._logger.reset_mock()
# Test that specifying the key wrapping specification generates an
# error.
payload = get.GetRequestPayload(
key_wrapping_specification=objects.KeyWrappingSpecification()
)
args = (payload, )
regex = "Key wrapping is not supported."
six.assertRaisesRegex(
self,
exceptions.PermissionDenied,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
def test_get_with_key_format_type(self):
"""
Test that the key format type is handled properly in a Get request.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
# Test that a key can be retrieved with the right key format.
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.RAW
)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
response_payload.secret.key_block.cryptographic_algorithm.value
)
self.assertEqual(
0,
response_payload.secret.key_block.cryptographic_length.value
)
self.assertEqual(
b'',
response_payload.secret.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.RAW,
response_payload.secret.key_block.key_format_type.value
)
# Test that an error is generated when a key format conversion is
# required.
e._logger.reset_mock()
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.OPAQUE
)
)
args = (payload, )
regex = "Key format conversion from RAW to OPAQUE is unsupported."
six.assertRaisesRegex(
self,
exceptions.KeyFormatTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
# Test that an error is generated when a key format is requested but
# does not apply to the given managed object.
e._data_session = e._data_store_session_factory()
e._logger.reset_mock()
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_b = str(obj_b.unique_identifier)
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_b),
key_format_type=get.GetRequestPayload.KeyFormatType(
enums.KeyFormatType.RAW
)
)
args = (payload, )
regex = "Key format is not applicable to the specified object."
six.assertRaisesRegex(
self,
exceptions.KeyFormatTypeNotSupported,
regex,
e._process_get,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Get"
)
def test_get_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Get.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to get.
args = [payload]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get,
*args
)
def test_get_attributes(self):
"""
Test that a GetAttributes request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
payload = get_attributes.GetAttributesRequestPayload(
unique_identifier='1',
attribute_names=['Object Type', 'Cryptographic Algorithm']
)
response_payload = e._process_get_attributes(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributes"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
2,
len(response_payload.attributes)
)
attribute_factory = factory.AttributeFactory()
attribute = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_TYPE,
enums.ObjectType.SYMMETRIC_KEY
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, response_payload.attributes)
def test_get_attributes_with_no_arguments(self):
"""
Test that a GetAttributes request with no arguments can be processed
correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._id_placeholder = '1'
payload = get_attributes.GetAttributesRequestPayload()
response_payload = e._process_get_attributes(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributes"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attributes)
)
attribute_factory = factory.AttributeFactory()
attribute = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_TYPE,
enums.ObjectType.SYMMETRIC_KEY
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
0
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'default'
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[]
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.STATE,
enums.State.PRE_ACTIVE
)
self.assertIn(attribute, response_payload.attributes)
attribute = attribute_factory.create_attribute(
enums.AttributeType.UNIQUE_IDENTIFIER,
'1'
)
self.assertIn(attribute, response_payload.attributes)
def test_get_attributes_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by GetAttributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get_attributes.GetAttributesRequestPayload(
unique_identifier=id_a
)
# Test by specifying the ID of the object whose attributes should
# be retrieved.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get_attributes,
*args
)
def test_get_attribute_list(self):
"""
Test that a GetAttributeList request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
payload = get_attribute_list.GetAttributeListRequestPayload(
unique_identifier='1'
)
response_payload = e._process_get_attribute_list(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributeList"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attribute_names)
)
self.assertIn(
"Object Type",
response_payload.attribute_names
)
self.assertIn(
"Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Algorithm",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Length",
response_payload.attribute_names
)
self.assertIn(
"Operation Policy Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Usage Mask",
response_payload.attribute_names
)
self.assertIn(
"State",
response_payload.attribute_names
)
self.assertIn(
"Unique Identifier",
response_payload.attribute_names
)
self.assertIn(
"Initial Date",
response_payload.attribute_names
)
def test_get_attribute_list_with_no_arguments(self):
"""
Test that a GetAttributeList request with no arguments can be
processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
secret = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(secret)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._id_placeholder = '1'
payload = get_attribute_list.GetAttributeListRequestPayload()
response_payload = e._process_get_attribute_list(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: GetAttributeList"
)
self.assertEqual(
'1',
response_payload.unique_identifier
)
self.assertEqual(
9,
len(response_payload.attribute_names)
)
self.assertIn(
"Object Type",
response_payload.attribute_names
)
self.assertIn(
"Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Algorithm",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Length",
response_payload.attribute_names
)
self.assertIn(
"Operation Policy Name",
response_payload.attribute_names
)
self.assertIn(
"Cryptographic Usage Mask",
response_payload.attribute_names
)
self.assertIn(
"State",
response_payload.attribute_names
)
self.assertIn(
"Unique Identifier",
response_payload.attribute_names
)
self.assertIn(
"Initial Date",
response_payload.attribute_names
)
def test_get_attribute_list_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by
GetAttributeList.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = get_attribute_list.GetAttributeListRequestPayload(
unique_identifier=id_a
)
# Test by specifying the ID of the object whose attributes should
# be retrieved.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_get_attribute_list,
*args
)
def test_activate(self):
"""
Test that an Activate request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
self.assertEqual(enums.State.PRE_ACTIVE, managed_object.state)
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
response_payload = e._process_activate(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Activate"
)
self.assertEqual(
str(object_id),
response_payload.unique_identifier.value
)
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == object_id
).one()
self.assertEqual(enums.State.ACTIVE, symmetric_key.state)
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
# Test that the ID placeholder can also be used to specify activation.
e._id_placeholder = str(object_id)
payload = activate.ActivateRequestPayload()
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
def test_activate_on_static_object(self):
"""
Test that the right error is generated when an activation request is
received for an object that cannot be activated.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.OpaqueObject(
b'',
enums.OpaqueDataType.NONE
)
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
args = (payload,)
name = enums.ObjectType.OPAQUE_DATA.name
regex = "An {0} object has no state and cannot be activated.".format(
''.join(
[x.capitalize() for x in name.split('_')]
)
)
self.assertRaisesRegexp(
exceptions.IllegalOperation,
regex,
e._process_activate,
*args
)
def test_activate_on_active_object(self):
"""
Test that the right error is generated when an activation request is
received for an object that is not pre-active.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
managed_object = pie_objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
0,
b''
)
managed_object.state = enums.State.ACTIVE
e._data_session.add(managed_object)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
object_id = str(managed_object.unique_identifier)
# Test by specifying the ID of the object to activate.
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(object_id)
)
args = (payload,)
regex = "The object state is not pre-active and cannot be activated."
self.assertRaisesRegexp(
exceptions.PermissionDenied,
regex,
e._process_activate,
*args
)
def test_activate_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Activate.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = activate.ActivateRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to activate.
args = [payload]
self.assertRaisesRegex(
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_activate,
*args
)
def test_destroy(self):
"""
Test that a Destroy request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_b = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
e._data_session.add(obj_a)
e._data_session.add(obj_b)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
id_b = str(obj_b.unique_identifier)
# Test by specifying the ID of the object to destroy.
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(id_a), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(id_a)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
e._id_placeholder = str(id_b)
# Test by using the ID placeholder to specify the object to destroy.
payload = destroy.DestroyRequestPayload()
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(id_b), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(id_b)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
def test_destroy_not_allowed_by_policy(self):
"""
Test that an unallowed request is handled correctly by Destroy.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(id_a)
)
# Test by specifying the ID of the object to destroy.
args = [payload]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._process_destroy,
*args
)
def test_query(self):
"""
Test that a Query request can be processed correctly, for different
versions of KMIP.
"""
e = engine.KmipEngine()
# Test for KMIP 1.0.
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 0)
payload = query.QueryRequestPayload([
misc.QueryFunction(enums.QueryFunction.QUERY_OPERATIONS),
misc.QueryFunction(enums.QueryFunction.QUERY_OBJECTS),
misc.QueryFunction(
enums.QueryFunction.QUERY_SERVER_INFORMATION
),
misc.QueryFunction(
enums.QueryFunction.QUERY_APPLICATION_NAMESPACES
),
misc.QueryFunction(enums.QueryFunction.QUERY_EXTENSION_LIST),
misc.QueryFunction(enums.QueryFunction.QUERY_EXTENSION_MAP)
])
result = e._process_query(payload)
e._logger.info.assert_called_once_with("Processing operation: Query")
self.assertIsInstance(result, query.QueryResponsePayload)
self.assertIsNotNone(result.operations)
self.assertEqual(9, len(result.operations))
self.assertEqual(
enums.Operation.CREATE,
result.operations[0].value
)
self.assertEqual(
enums.Operation.CREATE_KEY_PAIR,
result.operations[1].value
)
self.assertEqual(
enums.Operation.REGISTER,
result.operations[2].value
)
self.assertEqual(
enums.Operation.GET,
result.operations[3].value
)
self.assertEqual(
enums.Operation.GET_ATTRIBUTES,
result.operations[4].value
)
self.assertEqual(
enums.Operation.GET_ATTRIBUTE_LIST,
result.operations[5].value
)
self.assertEqual(
enums.Operation.ACTIVATE,
result.operations[6].value
)
self.assertEqual(
enums.Operation.DESTROY,
result.operations[7].value
)
self.assertEqual(
enums.Operation.QUERY,
result.operations[8].value
)
self.assertEqual(list(), result.object_types)
self.assertIsNotNone(result.vendor_identification)
self.assertEqual(
"PyKMIP {0} Software Server".format(kmip.__version__),
result.vendor_identification.value
)
self.assertIsNone(result.server_information)
self.assertEqual(list(), result.application_namespaces)
self.assertEqual(list(), result.extension_information)
# Test for KMIP 1.1.
e._logger = mock.MagicMock()
e._protocol_version = contents.ProtocolVersion.create(1, 1)
result = e._process_query(payload)
e._logger.info.assert_called_once_with("Processing operation: Query")
self.assertIsNotNone(result.operations)
self.assertEqual(10, len(result.operations))
self.assertEqual(
enums.Operation.DISCOVER_VERSIONS,
result.operations[-1].value
)
def test_discover_versions(self):
"""
Test that a DiscoverVersions request can be processed correctly for
different inputs.
"""
e = engine.KmipEngine()
# Test default request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload()
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertIsInstance(
result,
discover_versions.DiscoverVersionsResponsePayload
)
self.assertIsNotNone(result.protocol_versions)
self.assertEqual(3, len(result.protocol_versions))
self.assertEqual(
contents.ProtocolVersion.create(1, 2),
result.protocol_versions[0]
)
self.assertEqual(
contents.ProtocolVersion.create(1, 1),
result.protocol_versions[1]
)
self.assertEqual(
contents.ProtocolVersion.create(1, 0),
result.protocol_versions[2]
)
# Test detailed request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload([
contents.ProtocolVersion.create(1, 0)
])
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertIsNotNone(result.protocol_versions)
self.assertEqual(1, len(result.protocol_versions))
self.assertEqual(
contents.ProtocolVersion.create(1, 0),
result.protocol_versions[0]
)
# Test disjoint request.
e._logger = mock.MagicMock()
payload = discover_versions.DiscoverVersionsRequestPayload([
contents.ProtocolVersion.create(0, 1)
])
result = e._process_discover_versions(payload)
e._logger.info.assert_called_once_with(
"Processing operation: DiscoverVersions"
)
self.assertEqual([], result.protocol_versions)
def test_mac(self):
"""
Test that a MAC request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._cryptography_engine.logger = mock.MagicMock()
key = (b'\<KEY>'
b'\x00\x00\x00\x00\x00')
data = (b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A'
b'\x0B\x0C\x0D\x0E\x0F')
algorithm_a = enums.CryptographicAlgorithm.AES
algorithm_b = enums.CryptographicAlgorithm.HMAC_SHA512
obj = pie_objects.SymmetricKey(algorithm_a, 128, key)
e._data_session.add(obj)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
uuid = str(obj.unique_identifier)
cryptographic_parameters = attributes.CryptographicParameters(
cryptographic_algorithm=attributes.
CryptographicAlgorithm(algorithm_b)
)
# Verify when cryptographic_parameters is specified in request
payload = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid),
cryptographic_parameters=cryptographic_parameters,
data=objects.Data(data)
)
response_payload = e._process_mac(payload)
e._logger.info.assert_any_call(
"Processing operation: MAC"
)
e._cryptography_engine.logger.info.assert_any_call(
"Generating a hash-based message authentication code using {0}".
format(algorithm_b.name)
)
e._cryptography_engine.logger.reset_mock()
self.assertEqual(str(uuid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.mac_data, objects.MACData)
# Verify when cryptographic_parameters is not specified in request
payload = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid),
cryptographic_parameters=None,
data=objects.Data(data)
)
response_payload = e._process_mac(payload)
e._cryptography_engine.logger.info.assert_any_call(
"Generating a cipher-based message authentication code using {0}".
format(algorithm_a.name)
)
self.assertEqual(str(uuid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.mac_data, objects.MACData)
def test_mac_with_missing_fields(self):
"""
Test that the right errors are generated when required fields
are missing.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
key = (b'\<KEY>'
b'\x00\x00\x00\x00')
data = (b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B'
b'\x0C\x0D\x0E\x0F')
algorithm = enums.CryptographicAlgorithm.AES
obj_no_key = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_no_algorithm = pie_objects.OpaqueObject(
key, enums.OpaqueDataType.NONE)
e._data_session.add(obj_no_key)
e._data_session.add(obj_no_algorithm)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
uuid_no_key = str(obj_no_key.unique_identifier)
uuid_no_algorithm = str(obj_no_algorithm.unique_identifier)
cryptographic_parameters = attributes.CryptographicParameters(
cryptographic_algorithm=attributes.
CryptographicAlgorithm(algorithm))
payload_no_key = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_key),
cryptographic_parameters=cryptographic_parameters,
data=objects.Data(data)
)
args = (payload_no_key, )
regex = "A secret key value must be specified"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
payload_no_algorithm = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_algorithm),
cryptographic_parameters=None,
data=objects.Data(data)
)
args = (payload_no_algorithm, )
regex = "The cryptographic algorithm must be specified"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
payload_no_data = mac.MACRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uuid_no_algorithm),
cryptographic_parameters=cryptographic_parameters,
data=None
)
args = (payload_no_data, )
regex = "No data to be MACed"
self.assertRaisesRegexp(
exceptions.InvalidField,
regex,
e._process_mac,
*args
)
def test_create_get_destroy(self):
"""
Test that a managed object can be created, retrieved, and destroyed
without error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
# Create the symmetric key with the corresponding attributes
payload = create.CreateRequestPayload(
object_type=object_type,
template_attribute=template_attribute
)
response_payload = e._process_create(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Create"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
e._logger.reset_mock()
# Retrieve the created key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.SYMMETRIC_KEY,
response_payload.object_type.value
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
key_block = response_payload.secret.key_block
self.assertEqual(
256,
len(key_block.key_value.key_material.value) * 8
)
self.assertEqual(
enums.KeyFormatType.RAW,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
256,
key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Destroy the symmetric key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(uid)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
def test_create_key_pair_get_destroy(self):
"""
Test that a key pair can be created, retrieved, and destroyed without
error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
response_payload = e._process_create_key_pair(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
public_id = response_payload.public_key_uuid.value
self.assertEqual('1', public_id)
private_id = response_payload.private_key_uuid.value
self.assertEqual('2', private_id)
e._logger.reset_mock()
# Retrieve the created public key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(public_id)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.PUBLIC_KEY,
response_payload.object_type.value
)
self.assertEqual(
str(public_id),
response_payload.unique_identifier.value
)
self.assertIsInstance(response_payload.secret, secrets.PublicKey)
key_block = response_payload.secret.key_block
self.assertEqual(
enums.KeyFormatType.PKCS_1,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
2048,
key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Retrieve the created private key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(private_id)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.PRIVATE_KEY,
response_payload.object_type.value
)
self.assertEqual(
str(private_id),
response_payload.unique_identifier.value
)
self.assertIsInstance(response_payload.secret, secrets.PrivateKey)
key_block = response_payload.secret.key_block
self.assertEqual(
enums.KeyFormatType.PKCS_8,
key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
key_block.cryptographic_algorithm.value
)
self.assertEqual(
2048,
key_block.cryptographic_length.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
# Destroy the public key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(public_id)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(
str(public_id),
response_payload.unique_identifier.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
args = (payload, )
regex = "Could not locate object: {0}".format(public_id)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
# Destroy the private key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(private_id)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(
str(private_id),
response_payload.unique_identifier.value
)
e._data_session.commit()
e._data_store_session_factory()
e._logger.reset_mock()
args = (payload, )
regex = "Could not locate object: {0}".format(private_id)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory()
def test_register_get_destroy(self):
"""
Test that a managed object can be registered, retrieved, and destroyed
without error.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build a SymmetricKey for registration.
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
key_bytes = (
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
)
secret = secrets.SymmetricKey(
key_block=objects.KeyBlock(
key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(key_bytes)
),
cryptographic_algorithm=attributes.CryptographicAlgorithm(
enums.CryptographicAlgorithm.AES
),
cryptographic_length=attributes.CryptographicLength(128)
)
)
# Register the symmetric key with the corresponding attributes
payload = register.RegisterRequestPayload(
object_type=object_type,
template_attribute=template_attribute,
secret=secret
)
response_payload = e._process_register(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Register"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
e._logger.reset_mock()
# Retrieve the registered key using Get and verify all fields set
payload = get.GetRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_get(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Get"
)
self.assertEqual(
enums.ObjectType.SYMMETRIC_KEY,
response_payload.object_type.value
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
self.assertIsInstance(response_payload.secret, secrets.SymmetricKey)
self.assertEqual(
key_bytes,
response_payload.secret.key_block.key_value.key_material.value
)
self.assertEqual(
enums.KeyFormatType.RAW,
response_payload.secret.key_block.key_format_type.value
)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
response_payload.secret.key_block.cryptographic_algorithm.value
)
self.assertEqual(
128,
response_payload.secret.key_block.cryptographic_length.value
)
e._logger.reset_mock()
# Destroy the symmetric key and verify it cannot be accessed again
payload = destroy.DestroyRequestPayload(
unique_identifier=attributes.UniqueIdentifier(uid)
)
response_payload = e._process_destroy(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Destroy"
)
self.assertEqual(str(uid), response_payload.unique_identifier.value)
args = (payload, )
regex = "Could not locate object: {0}".format(uid)
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
regex,
e._process_destroy,
*args
)
e._data_session.commit()
e._data_store_session_factory() | 0.452294 | 0.172189 |
import unittest
import sys
import os
sys.path.insert(0,"../src/")
import SVN
import shutil
class TestSVNbackend(unittest.TestCase):
def setUp(self):
self.workerc=4
self.workdir="testdir/workdir"
self.repodir="testdir/repo"
self.text="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut metus massa,
sagittis consequat tincidunt non, sodales at quam. Cras bibendum, mauris eu
placerat condimentum, magna nisi laoreet massa, eget venenatis ligula velit eu
nisi. Aenean nec turpis vel nunc porta ornare. Donec dolor dolor, imperdiet vel
ultricies interdum, eleifend at lorem. Aliquam vitae nunc lacus. Suspendisse
vitae leo sed risus tempor fermentum quis ut odio. Nunc eu faucibus nunc.
Integer accumsan tempus eros, vitae placerat risus pulvinar ut. Quisque eu
congue ipsum. Fusce ultrices sapien erat, sed pulvinar erat faucibus ac. Nullam
sit amet lectus mauris. Donec et tincidunt justo. Fusce porttitor augue et
libero varius pretium. Sed aliquet metus nec quam bibendum commodo. Morbi
venenatis sagittis semper. Integer venenatis accumsan magna vel bibendum. Aenean
elementum lorem lacus, nec imperdiet velit sagittis quis. Praesent lorem metus,
consectetur et consequat sit amet, suscipit in velit. Etiam ornare augue enim.
Phasellus egestas nunc vitae nisi imperdiet, sed lacinia ante sollicitudin.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut ut quam fringilla
est elementum fringilla et ut ligula. Nullam augue ipsum, porta ut turpis id,
facilisis lacinia eros. Nullam euismod fringilla massa, non lobortis tortor
placerat vitae. Cras risus mi, pulvinar quis augue at, convallis dignissim est.
Curabitur malesuada, massa a lacinia fermentum, ligula lorem molestie erat, in
consectetur risus purus ut justo. Aliquam lobortis laoreet enim, condimentum
consectetur felis. Aenean id scelerisque lectus, a placerat ex. Mauris felis
diam, interdum vitae augue sit amet, faucibus euismod velit. Vestibulum
malesuada augue at quam pharetra gravida. Vestibulum ante ipsum primis in
faucibus orci luctus et ultrices posuere cubilia Curae; Etiam tempus faucibus
justo vel vestibulum. Nulla ipsum lorem, blandit nec scelerisque ut, blandit at
"""
def _prepareRepo(self):
shutil.rmtree(self.workdir, ignore_errors=True)
shutil.rmtree(self.repodir, ignore_errors=True)
self.repo=SVN.SVNBackend(self.workdir,self.repodir,self.workerc)
def _testCleanUp(self):
for a in range(self.workerc):
for f in os.listdir(os.path.join(self.repo.workdir,"wd%d"%a)):
self.assertTrue(f.startswith('.'))
self.assertTrue(self.repo.workers.qsize()==self.workerc)
def test_add_get(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
def test_changefile(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self.repo.addFile("test1","file1",self.text[10:])
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text[10:]==res)
self._testCleanUp()
def test_loadCollections(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
res=wd.loadCollections()
self.assertTrue(len(res)==2)
self.assertTrue("test1" in res)
self.assertTrue("test2" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_loadObjects(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
self.repo.addFile("test2","file2",self.text)
wd=self.repo.getWorkdir()
res=wd.loadObjects("test2")
self.assertTrue(len(res)==2)
self.assertTrue("file2" in res)
self.assertTrue("file1" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_getFileInfo(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
fp=wd.openFile("test2","file1",'r')
res=wd.getFileInfo(fp)
fp.close()
self.assertTrue("changed" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_reopen(self):
self._prepareRepo()
self.repo.addFile("test3","file1",self.text)
repo2=SVN.SVNBackend(self.workdir+"2",self.repodir,1)
res=repo2.getFile("test3","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
if __name__ == '__main__':
unittest.main() | network-search/SVN-DB/backend/test/SVN-backend-test.py | import unittest
import sys
import os
sys.path.insert(0,"../src/")
import SVN
import shutil
class TestSVNbackend(unittest.TestCase):
def setUp(self):
self.workerc=4
self.workdir="testdir/workdir"
self.repodir="testdir/repo"
self.text="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut metus massa,
sagittis consequat tincidunt non, sodales at quam. Cras bibendum, mauris eu
placerat condimentum, magna nisi laoreet massa, eget venenatis ligula velit eu
nisi. Aenean nec turpis vel nunc porta ornare. Donec dolor dolor, imperdiet vel
ultricies interdum, eleifend at lorem. Aliquam vitae nunc lacus. Suspendisse
vitae leo sed risus tempor fermentum quis ut odio. Nunc eu faucibus nunc.
Integer accumsan tempus eros, vitae placerat risus pulvinar ut. Quisque eu
congue ipsum. Fusce ultrices sapien erat, sed pulvinar erat faucibus ac. Nullam
sit amet lectus mauris. Donec et tincidunt justo. Fusce porttitor augue et
libero varius pretium. Sed aliquet metus nec quam bibendum commodo. Morbi
venenatis sagittis semper. Integer venenatis accumsan magna vel bibendum. Aenean
elementum lorem lacus, nec imperdiet velit sagittis quis. Praesent lorem metus,
consectetur et consequat sit amet, suscipit in velit. Etiam ornare augue enim.
Phasellus egestas nunc vitae nisi imperdiet, sed lacinia ante sollicitudin.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut ut quam fringilla
est elementum fringilla et ut ligula. Nullam augue ipsum, porta ut turpis id,
facilisis lacinia eros. Nullam euismod fringilla massa, non lobortis tortor
placerat vitae. Cras risus mi, pulvinar quis augue at, convallis dignissim est.
Curabitur malesuada, massa a lacinia fermentum, ligula lorem molestie erat, in
consectetur risus purus ut justo. Aliquam lobortis laoreet enim, condimentum
consectetur felis. Aenean id scelerisque lectus, a placerat ex. Mauris felis
diam, interdum vitae augue sit amet, faucibus euismod velit. Vestibulum
malesuada augue at quam pharetra gravida. Vestibulum ante ipsum primis in
faucibus orci luctus et ultrices posuere cubilia Curae; Etiam tempus faucibus
justo vel vestibulum. Nulla ipsum lorem, blandit nec scelerisque ut, blandit at
"""
def _prepareRepo(self):
shutil.rmtree(self.workdir, ignore_errors=True)
shutil.rmtree(self.repodir, ignore_errors=True)
self.repo=SVN.SVNBackend(self.workdir,self.repodir,self.workerc)
def _testCleanUp(self):
for a in range(self.workerc):
for f in os.listdir(os.path.join(self.repo.workdir,"wd%d"%a)):
self.assertTrue(f.startswith('.'))
self.assertTrue(self.repo.workers.qsize()==self.workerc)
def test_add_get(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
def test_changefile(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self.repo.addFile("test1","file1",self.text[10:])
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text[10:]==res)
self._testCleanUp()
def test_loadCollections(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
res=wd.loadCollections()
self.assertTrue(len(res)==2)
self.assertTrue("test1" in res)
self.assertTrue("test2" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_loadObjects(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
self.repo.addFile("test2","file2",self.text)
wd=self.repo.getWorkdir()
res=wd.loadObjects("test2")
self.assertTrue(len(res)==2)
self.assertTrue("file2" in res)
self.assertTrue("file1" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_getFileInfo(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
fp=wd.openFile("test2","file1",'r')
res=wd.getFileInfo(fp)
fp.close()
self.assertTrue("changed" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_reopen(self):
self._prepareRepo()
self.repo.addFile("test3","file1",self.text)
repo2=SVN.SVNBackend(self.workdir+"2",self.repodir,1)
res=repo2.getFile("test3","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
if __name__ == '__main__':
unittest.main() | 0.133049 | 0.289372 |
from datetime import datetime as dt
from . import processor
import pygogo as gogo
from riko.dotdict import DotDict
OPTS = {"emit": True}
DEFAULTS = {"pubDate": dt.now().isoformat()}
logger = gogo.Gogo(__name__, monolog=True).logger
# yahoo style rss items (dots are for sub-levels)
RSS = {
"title": "y:title",
"guid": "y:id",
"mediaThumbURL": "media:thumbnail.url",
"mediaThumbHeight": "media:thumbnail.height",
"mediaThumbWidth": "media:thumbnail.width",
"mediaContentType": "media:content.type",
"mediaContentURL": "media:content.url",
"mediaContentHeight": "media:content.height",
"mediaContentWidth": "media:content.width",
}
def parser(item, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
item (obj): The entry to process (a DotDict instance)
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko.dotdict import DotDict
>>> from meza.fntools import Objectify
>>>
>>> item = DotDict()
>>> conf = {'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> objconf = Objectify(conf)
>>> kwargs = {'stream': item}
>>> result = parser(item, objconf, **kwargs)
>>> result == {'media:thumbnail': {'url': 'image.png'}, 'y:id': 'a1'}
True
"""
if skip:
stream = kwargs["stream"]
else:
items = objconf.items()
rdict = ((RSS.get(k, k), item.get(v, v, **kwargs)) for k, v in items)
stream = DotDict(rdict)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Returns:
dict: twisted.internet.defer.Deferred an iterator of items
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... resp = {'url': 'image.png'}
... callback = lambda x: print(next(x)['media:thumbnail'] == resp)
... conf = {
... 'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
... d = async_pipe(conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... pass
... except SystemExit:
... pass
...
True
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Yields:
dict: an rss item
Examples:
>>> # conf based
>>> conf = {'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> rss = next(pipe(conf=conf))
>>> rss['media:thumbnail'] == {'url': 'image.png'}
True
>>> sorted(rss.keys()) == [
... 'media:thumbnail', 'pubDate', 'y:id', 'y:title']
True
>>>
>>> # source based
>>> # TODO: look into subkey
>>> item = {'heading': 'Hi', 'id': 'a1', 'thumbnail': 'image.png'}
>>> conf = {
... 'title': 'heading', 'guid': 'id', 'mediaThumbURL': 'thumbnail'}
>>> next(pipe(item, conf=conf)) == rss
True
"""
return parser(*args, **kwargs) | riko/modules/rssitembuilder.py | from datetime import datetime as dt
from . import processor
import pygogo as gogo
from riko.dotdict import DotDict
OPTS = {"emit": True}
DEFAULTS = {"pubDate": dt.now().isoformat()}
logger = gogo.Gogo(__name__, monolog=True).logger
# yahoo style rss items (dots are for sub-levels)
RSS = {
"title": "y:title",
"guid": "y:id",
"mediaThumbURL": "media:thumbnail.url",
"mediaThumbHeight": "media:thumbnail.height",
"mediaThumbWidth": "media:thumbnail.width",
"mediaContentType": "media:content.type",
"mediaContentURL": "media:content.url",
"mediaContentHeight": "media:content.height",
"mediaContentWidth": "media:content.width",
}
def parser(item, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
item (obj): The entry to process (a DotDict instance)
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko.dotdict import DotDict
>>> from meza.fntools import Objectify
>>>
>>> item = DotDict()
>>> conf = {'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> objconf = Objectify(conf)
>>> kwargs = {'stream': item}
>>> result = parser(item, objconf, **kwargs)
>>> result == {'media:thumbnail': {'url': 'image.png'}, 'y:id': 'a1'}
True
"""
if skip:
stream = kwargs["stream"]
else:
items = objconf.items()
rdict = ((RSS.get(k, k), item.get(v, v, **kwargs)) for k, v in items)
stream = DotDict(rdict)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Returns:
dict: twisted.internet.defer.Deferred an iterator of items
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... resp = {'url': 'image.png'}
... callback = lambda x: print(next(x)['media:thumbnail'] == resp)
... conf = {
... 'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
... d = async_pipe(conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... pass
... except SystemExit:
... pass
...
True
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Yields:
dict: an rss item
Examples:
>>> # conf based
>>> conf = {'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> rss = next(pipe(conf=conf))
>>> rss['media:thumbnail'] == {'url': 'image.png'}
True
>>> sorted(rss.keys()) == [
... 'media:thumbnail', 'pubDate', 'y:id', 'y:title']
True
>>>
>>> # source based
>>> # TODO: look into subkey
>>> item = {'heading': 'Hi', 'id': 'a1', 'thumbnail': 'image.png'}
>>> conf = {
... 'title': 'heading', 'guid': 'id', 'mediaThumbURL': 'thumbnail'}
>>> next(pipe(item, conf=conf)) == rss
True
"""
return parser(*args, **kwargs) | 0.748536 | 0.155655 |
import numpy as np
from nibabel.volumeutils import Recoder
from nibabel.affines import voxel_sizes, from_matvec
from .base import (
BaseLinearTransformList,
StringBasedStruct,
LinearTransformStruct,
TransformFileError,
)
transform_codes = Recoder(
(
(0, "LINEAR_VOX_TO_VOX"),
(1, "LINEAR_RAS_TO_RAS"),
(2, "LINEAR_PHYSVOX_TO_PHYSVOX"),
(14, "REGISTER_DAT"),
(21, "LINEAR_COR_TO_COR"),
),
fields=("code", "label"),
)
class VolumeGeometry(StringBasedStruct):
"""Data structure for regularly gridded images."""
template_dtype = np.dtype(
[
("valid", "i4"), # Valid values: 0, 1
("volume", "i4", (3, )), # width, height, depth
("voxelsize", "f4", (3, )), # xsize, ysize, zsize
("xras", "f8", (3, 1)), # x_r, x_a, x_s
("yras", "f8", (3, 1)), # y_r, y_a, y_s
("zras", "f8", (3, 1)), # z_r, z_a, z_s
("cras", "f8", (3, )), # c_r, c_a, c_s
("filename", "U1024"),
]
) # Not conformant (may be >1024 bytes)
dtype = template_dtype
def as_affine(self):
"""Return the internal affine of this regular grid."""
sa = self.structarr
A = np.hstack((sa["xras"], sa["yras"], sa["zras"])) * sa["voxelsize"]
b = sa["cras"] - A @ sa["volume"] / 2
return from_matvec(A, b)
def __str__(self):
"""Format the structure as a text file."""
sa = self.structarr
lines = [
"valid = {} # volume info {:s}valid".format(
sa["valid"], "" if sa["valid"] else "in"
),
"filename = {}".format(sa["filename"]),
"volume = {:d} {:d} {:d}".format(*sa["volume"]),
"voxelsize = {:.15e} {:.15e} {:.15e}".format(*sa["voxelsize"]),
"xras = {:.15e} {:.15e} {:.15e}".format(*sa["xras"].flatten()),
"yras = {:.15e} {:.15e} {:.15e}".format(*sa["yras"].flatten()),
"zras = {:.15e} {:.15e} {:.15e}".format(*sa["zras"].flatten()),
"cras = {:.15e} {:.15e} {:.15e}".format(*sa["cras"].flatten()),
]
return "\n".join(lines)
def to_string(self):
"""Format the structure as a text file."""
return self.__str__()
@classmethod
def from_image(cls, img):
"""Create struct from an image."""
volgeom = cls()
sa = volgeom.structarr
sa["valid"] = 1
sa["volume"] = img.shape[:3] # Assumes xyzt-ordered image
sa["voxelsize"] = voxel_sizes(img.affine)[:3]
A = img.affine[:3, :3]
b = img.affine[:3, 3]
cols = A / sa["voxelsize"]
sa["xras"] = cols[:, [0]]
sa["yras"] = cols[:, [1]]
sa["zras"] = cols[:, [2]]
sa["cras"] = b + A @ sa["volume"] / 2
try:
sa["filename"] = img.file_map["image"].filename
except Exception:
pass
return volgeom
@classmethod
def from_string(cls, string):
"""Create a volume structure off of text."""
volgeom = cls()
sa = volgeom.structarr
lines = string.splitlines()
for key in (
"valid",
"filename",
"volume",
"voxelsize",
"xras",
"yras",
"zras",
"cras",
):
label, valstring = lines.pop(0).split(" =")
assert label.strip() == key
val = ""
if valstring.strip():
parsed = np.genfromtxt(
[valstring.encode()], autostrip=True, dtype=cls.dtype[key]
)
if parsed.size:
val = parsed.reshape(sa[key].shape)
sa[key] = val
return volgeom
class FSLinearTransform(LinearTransformStruct):
"""Represents a single LTA's transform structure."""
template_dtype = np.dtype(
[
("type", "i4"),
("mean", "f4", (3, 1)), # x0, y0, z0
("sigma", "f4"),
("m_L", "f8", (4, 4)),
("m_dL", "f8", (4, 4)),
("m_last_dL", "f8", (4, 4)),
("src", VolumeGeometry),
("dst", VolumeGeometry),
("label", "i4"),
]
)
dtype = template_dtype
def __getitem__(self, idx):
"""Implement dictionary access."""
val = super().__getitem__(idx)
if idx in ("src", "dst"):
val = VolumeGeometry(val)
return val
def set_type(self, new_type):
"""
Convert the internal transformation matrix to a different type inplace.
Parameters
----------
new_type : str, int
Tranformation type
"""
sa = self.structarr
current = sa["type"]
if isinstance(new_type, str):
new_type = transform_codes.code[new_type]
if current == new_type:
return
# VOX2VOX -> RAS2RAS
if (current, new_type) == (0, 1):
src = VolumeGeometry(sa["src"])
dst = VolumeGeometry(sa["dst"])
# See https://github.com/freesurfer/freesurfer/
# blob/bbb2ef78591dec2c1ede3faea47f8dd8a530e92e/utils/transform.cpp#L3696-L3705
# blob/bbb2ef78591dec2c1ede3faea47f8dd8a530e92e/utils/transform.cpp#L3548-L3568
M = dst.as_affine() @ sa["m_L"] @ np.linalg.inv(src.as_affine())
sa["m_L"] = M
sa["type"] = new_type
return
raise NotImplementedError(
"Converting {0} to {1} is not yet available".format(
transform_codes.label[current], transform_codes.label[new_type]
)
)
def to_ras(self, moving=None, reference=None):
"""
Return a nitransforms' internal RAS+ array.
Seemingly, the matrix of an LTA is defined such that it
maps coordinates from the ``dest volume`` to the ``src volume``.
Therefore, without inversion, the LTA matrix is appropiate
to move the information from ``src volume`` into the
``dest volume``'s grid.
.. important ::
The ``moving`` and ``reference`` parameters are dismissed
because ``VOX2VOX`` LTAs are converted to ``RAS2RAS`` type
before returning the RAS+ matrix, using the ``dest`` and
``src`` contained in the LTA. Both arguments are kept for
API compatibility.
Parameters
----------
moving : dismissed
The spatial reference of moving images.
reference : dismissed
The spatial reference of moving images.
Returns
-------
matrix : :obj:`numpy.ndarray`
The RAS+ affine matrix corresponding to the LTA.
"""
self.set_type(1)
return np.linalg.inv(self.structarr["m_L"])
def to_string(self, partial=False):
"""Convert this transform to text."""
sa = self.structarr
lines = [
"# LTA file created by NiTransforms",
"type = {}".format(sa["type"]),
"nxforms = 1",
] if not partial else []
# Standard preamble
lines += [
"mean = {:6.4f} {:6.4f} {:6.4f}".format(*sa["mean"].flatten()),
"sigma = {:6.4f}".format(float(sa["sigma"])),
"1 4 4",
]
# Format parameters matrix
lines += [
" ".join(f"{v:18.15e}" for v in sa["m_L"][i])
for i in range(4)
]
lines += [
"src volume info",
str(self["src"]),
"dst volume info",
str(self["dst"]),
]
lines += [] if partial else [""]
return "\n".join(lines)
@classmethod
def from_string(cls, string, partial=False):
"""Read a transform from text."""
lt = cls()
sa = lt.structarr
# Drop commented out lines
lines = _drop_comments(string).splitlines()
fields = ("type", "nxforms", "mean", "sigma")
for key in fields[partial * 2:]:
label, valstring = lines.pop(0).split(" = ")
assert label.strip() == key
if key != "nxforms":
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape)
else:
assert valstring.strip() == "1"
assert lines.pop(0) == "1 4 4" # xforms, shape + 1, shape + 1
val = np.genfromtxt([valstring.encode() for valstring in lines[:4]], dtype="f4")
sa["m_L"] = val
lines = lines[4:]
assert lines.pop(0) == "src volume info"
sa["src"] = np.asanyarray(VolumeGeometry.from_string("\n".join(lines[:8])))
lines = lines[8:]
assert lines.pop(0) == "dst volume info"
sa["dst"] = np.asanyarray(VolumeGeometry.from_string("\n".join(lines)))
return lt
@classmethod
def from_ras(cls, ras, moving=None, reference=None):
"""Create an affine from a nitransform's RAS+ matrix."""
lt = cls()
sa = lt.structarr
sa["sigma"] = 1.0
sa["mean"] = np.zeros((3, 1), dtype="float")
sa["type"] = 1 # RAS2RAS
# Just for reference, nitransforms does not write VOX2VOX
# PLEASE NOTE THAT LTA USES THE "POINTS" CONVENTION, therefore
# the source is the reference (coordinates for which we need
# to find a projection) and destination is the moving image
# (from which data is pulled-back).
if reference is not None:
sa["src"] = np.asanyarray(VolumeGeometry.from_image(reference))
if moving is not None:
sa["dst"] = np.asanyarray(VolumeGeometry.from_image(moving))
# However, the affine needs to be inverted
# (i.e., it is not a pure "points" convention).
# This inversion is consistent with self.to_ras()
sa["m_L"] = np.linalg.inv(ras)
# to make LTA file format
return lt
class FSLinearTransformArray(BaseLinearTransformList):
"""A list of linear transforms generated by FreeSurfer."""
template_dtype = np.dtype(
[("type", "i4"), ("nxforms", "i4"), ("subject", "U1024"), ("fscale", "f4")]
)
dtype = template_dtype
_inner_type = FSLinearTransform
def __getitem__(self, idx):
"""Allow dictionary access to the transforms."""
if idx == "xforms":
return self._xforms
if idx == "nxforms":
return len(self._xforms)
return self.structarr[idx]
def to_ras(self, moving=None, reference=None):
"""Set type to RAS2RAS and return the new matrix."""
self.structarr["type"] = 1
return [
xfm.to_ras(moving=moving, reference=reference)
for xfm in self.xforms
]
def to_string(self):
"""Convert this LTA into text format."""
code = int(self["type"])
header = [
"# LTA-array file created by NiTransforms",
"type = {} # {}".format(code, transform_codes.label[code]),
"nxforms = {}".format(self["nxforms"]),
]
xforms = [xfm.to_string(partial=True) for xfm in self._xforms]
footer = [
"subject {}".format(self["subject"]),
"fscale {:.6f}".format(float(self["fscale"])),
"",
]
return "\n".join(header + xforms + footer)
@classmethod
def from_string(cls, string):
"""Read this LTA from a text string."""
lta = cls()
sa = lta.structarr
# Drop commented out lines
lines = _drop_comments(string).splitlines()
if not lines or not lines[0].startswith("type"):
raise TransformFileError("Invalid LTA format")
for key in ("type", "nxforms"):
label, valstring = lines.pop(0).split(" = ")
assert label.strip() == key
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape) if val.size else ""
for _ in range(sa["nxforms"]):
lta._xforms.append(
cls._inner_type.from_string("\n".join(lines[:25]), partial=True)
)
lta._xforms[-1].structarr["type"] = sa["type"]
lines = lines[25:]
for key in ("subject", "fscale"):
# Optional keys
if not (lines and lines[0].startswith(key)):
continue
try:
label, valstring = lines.pop(0).split(" ")
except ValueError:
sa[key] = ""
else:
assert label.strip() == key
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape) if val.size else ""
assert len(lta._xforms) == sa["nxforms"]
return lta
@classmethod
def from_ras(cls, ras, moving=None, reference=None):
"""Create an affine from a nitransform's RAS+ matrix."""
if ras.ndim == 2:
return cls._inner_type.from_ras(ras, moving=moving, reference=reference)
lt = cls()
sa = lt.structarr
sa["type"] = 1
sa["nxforms"] = ras.shape[0]
for i in range(sa["nxforms"]):
lt._xforms.append(cls._inner_type.from_ras(
ras[i, ...], moving=moving, reference=reference
))
sa["subject"] = "unset"
sa["fscale"] = 0.0
return lt
def _drop_comments(string):
"""Drop comments."""
return "\n".join([
line.split("#")[0].strip()
for line in string.splitlines()
if line.split("#")[0].strip()
]) | nitransforms/io/lta.py | import numpy as np
from nibabel.volumeutils import Recoder
from nibabel.affines import voxel_sizes, from_matvec
from .base import (
BaseLinearTransformList,
StringBasedStruct,
LinearTransformStruct,
TransformFileError,
)
transform_codes = Recoder(
(
(0, "LINEAR_VOX_TO_VOX"),
(1, "LINEAR_RAS_TO_RAS"),
(2, "LINEAR_PHYSVOX_TO_PHYSVOX"),
(14, "REGISTER_DAT"),
(21, "LINEAR_COR_TO_COR"),
),
fields=("code", "label"),
)
class VolumeGeometry(StringBasedStruct):
"""Data structure for regularly gridded images."""
template_dtype = np.dtype(
[
("valid", "i4"), # Valid values: 0, 1
("volume", "i4", (3, )), # width, height, depth
("voxelsize", "f4", (3, )), # xsize, ysize, zsize
("xras", "f8", (3, 1)), # x_r, x_a, x_s
("yras", "f8", (3, 1)), # y_r, y_a, y_s
("zras", "f8", (3, 1)), # z_r, z_a, z_s
("cras", "f8", (3, )), # c_r, c_a, c_s
("filename", "U1024"),
]
) # Not conformant (may be >1024 bytes)
dtype = template_dtype
def as_affine(self):
"""Return the internal affine of this regular grid."""
sa = self.structarr
A = np.hstack((sa["xras"], sa["yras"], sa["zras"])) * sa["voxelsize"]
b = sa["cras"] - A @ sa["volume"] / 2
return from_matvec(A, b)
def __str__(self):
"""Format the structure as a text file."""
sa = self.structarr
lines = [
"valid = {} # volume info {:s}valid".format(
sa["valid"], "" if sa["valid"] else "in"
),
"filename = {}".format(sa["filename"]),
"volume = {:d} {:d} {:d}".format(*sa["volume"]),
"voxelsize = {:.15e} {:.15e} {:.15e}".format(*sa["voxelsize"]),
"xras = {:.15e} {:.15e} {:.15e}".format(*sa["xras"].flatten()),
"yras = {:.15e} {:.15e} {:.15e}".format(*sa["yras"].flatten()),
"zras = {:.15e} {:.15e} {:.15e}".format(*sa["zras"].flatten()),
"cras = {:.15e} {:.15e} {:.15e}".format(*sa["cras"].flatten()),
]
return "\n".join(lines)
def to_string(self):
"""Format the structure as a text file."""
return self.__str__()
@classmethod
def from_image(cls, img):
"""Create struct from an image."""
volgeom = cls()
sa = volgeom.structarr
sa["valid"] = 1
sa["volume"] = img.shape[:3] # Assumes xyzt-ordered image
sa["voxelsize"] = voxel_sizes(img.affine)[:3]
A = img.affine[:3, :3]
b = img.affine[:3, 3]
cols = A / sa["voxelsize"]
sa["xras"] = cols[:, [0]]
sa["yras"] = cols[:, [1]]
sa["zras"] = cols[:, [2]]
sa["cras"] = b + A @ sa["volume"] / 2
try:
sa["filename"] = img.file_map["image"].filename
except Exception:
pass
return volgeom
@classmethod
def from_string(cls, string):
"""Create a volume structure off of text."""
volgeom = cls()
sa = volgeom.structarr
lines = string.splitlines()
for key in (
"valid",
"filename",
"volume",
"voxelsize",
"xras",
"yras",
"zras",
"cras",
):
label, valstring = lines.pop(0).split(" =")
assert label.strip() == key
val = ""
if valstring.strip():
parsed = np.genfromtxt(
[valstring.encode()], autostrip=True, dtype=cls.dtype[key]
)
if parsed.size:
val = parsed.reshape(sa[key].shape)
sa[key] = val
return volgeom
class FSLinearTransform(LinearTransformStruct):
"""Represents a single LTA's transform structure."""
template_dtype = np.dtype(
[
("type", "i4"),
("mean", "f4", (3, 1)), # x0, y0, z0
("sigma", "f4"),
("m_L", "f8", (4, 4)),
("m_dL", "f8", (4, 4)),
("m_last_dL", "f8", (4, 4)),
("src", VolumeGeometry),
("dst", VolumeGeometry),
("label", "i4"),
]
)
dtype = template_dtype
def __getitem__(self, idx):
"""Implement dictionary access."""
val = super().__getitem__(idx)
if idx in ("src", "dst"):
val = VolumeGeometry(val)
return val
def set_type(self, new_type):
"""
Convert the internal transformation matrix to a different type inplace.
Parameters
----------
new_type : str, int
Tranformation type
"""
sa = self.structarr
current = sa["type"]
if isinstance(new_type, str):
new_type = transform_codes.code[new_type]
if current == new_type:
return
# VOX2VOX -> RAS2RAS
if (current, new_type) == (0, 1):
src = VolumeGeometry(sa["src"])
dst = VolumeGeometry(sa["dst"])
# See https://github.com/freesurfer/freesurfer/
# blob/bbb2ef78591dec2c1ede3faea47f8dd8a530e92e/utils/transform.cpp#L3696-L3705
# blob/bbb2ef78591dec2c1ede3faea47f8dd8a530e92e/utils/transform.cpp#L3548-L3568
M = dst.as_affine() @ sa["m_L"] @ np.linalg.inv(src.as_affine())
sa["m_L"] = M
sa["type"] = new_type
return
raise NotImplementedError(
"Converting {0} to {1} is not yet available".format(
transform_codes.label[current], transform_codes.label[new_type]
)
)
def to_ras(self, moving=None, reference=None):
"""
Return a nitransforms' internal RAS+ array.
Seemingly, the matrix of an LTA is defined such that it
maps coordinates from the ``dest volume`` to the ``src volume``.
Therefore, without inversion, the LTA matrix is appropiate
to move the information from ``src volume`` into the
``dest volume``'s grid.
.. important ::
The ``moving`` and ``reference`` parameters are dismissed
because ``VOX2VOX`` LTAs are converted to ``RAS2RAS`` type
before returning the RAS+ matrix, using the ``dest`` and
``src`` contained in the LTA. Both arguments are kept for
API compatibility.
Parameters
----------
moving : dismissed
The spatial reference of moving images.
reference : dismissed
The spatial reference of moving images.
Returns
-------
matrix : :obj:`numpy.ndarray`
The RAS+ affine matrix corresponding to the LTA.
"""
self.set_type(1)
return np.linalg.inv(self.structarr["m_L"])
def to_string(self, partial=False):
"""Convert this transform to text."""
sa = self.structarr
lines = [
"# LTA file created by NiTransforms",
"type = {}".format(sa["type"]),
"nxforms = 1",
] if not partial else []
# Standard preamble
lines += [
"mean = {:6.4f} {:6.4f} {:6.4f}".format(*sa["mean"].flatten()),
"sigma = {:6.4f}".format(float(sa["sigma"])),
"1 4 4",
]
# Format parameters matrix
lines += [
" ".join(f"{v:18.15e}" for v in sa["m_L"][i])
for i in range(4)
]
lines += [
"src volume info",
str(self["src"]),
"dst volume info",
str(self["dst"]),
]
lines += [] if partial else [""]
return "\n".join(lines)
@classmethod
def from_string(cls, string, partial=False):
"""Read a transform from text."""
lt = cls()
sa = lt.structarr
# Drop commented out lines
lines = _drop_comments(string).splitlines()
fields = ("type", "nxforms", "mean", "sigma")
for key in fields[partial * 2:]:
label, valstring = lines.pop(0).split(" = ")
assert label.strip() == key
if key != "nxforms":
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape)
else:
assert valstring.strip() == "1"
assert lines.pop(0) == "1 4 4" # xforms, shape + 1, shape + 1
val = np.genfromtxt([valstring.encode() for valstring in lines[:4]], dtype="f4")
sa["m_L"] = val
lines = lines[4:]
assert lines.pop(0) == "src volume info"
sa["src"] = np.asanyarray(VolumeGeometry.from_string("\n".join(lines[:8])))
lines = lines[8:]
assert lines.pop(0) == "dst volume info"
sa["dst"] = np.asanyarray(VolumeGeometry.from_string("\n".join(lines)))
return lt
@classmethod
def from_ras(cls, ras, moving=None, reference=None):
"""Create an affine from a nitransform's RAS+ matrix."""
lt = cls()
sa = lt.structarr
sa["sigma"] = 1.0
sa["mean"] = np.zeros((3, 1), dtype="float")
sa["type"] = 1 # RAS2RAS
# Just for reference, nitransforms does not write VOX2VOX
# PLEASE NOTE THAT LTA USES THE "POINTS" CONVENTION, therefore
# the source is the reference (coordinates for which we need
# to find a projection) and destination is the moving image
# (from which data is pulled-back).
if reference is not None:
sa["src"] = np.asanyarray(VolumeGeometry.from_image(reference))
if moving is not None:
sa["dst"] = np.asanyarray(VolumeGeometry.from_image(moving))
# However, the affine needs to be inverted
# (i.e., it is not a pure "points" convention).
# This inversion is consistent with self.to_ras()
sa["m_L"] = np.linalg.inv(ras)
# to make LTA file format
return lt
class FSLinearTransformArray(BaseLinearTransformList):
"""A list of linear transforms generated by FreeSurfer."""
template_dtype = np.dtype(
[("type", "i4"), ("nxforms", "i4"), ("subject", "U1024"), ("fscale", "f4")]
)
dtype = template_dtype
_inner_type = FSLinearTransform
def __getitem__(self, idx):
"""Allow dictionary access to the transforms."""
if idx == "xforms":
return self._xforms
if idx == "nxforms":
return len(self._xforms)
return self.structarr[idx]
def to_ras(self, moving=None, reference=None):
"""Set type to RAS2RAS and return the new matrix."""
self.structarr["type"] = 1
return [
xfm.to_ras(moving=moving, reference=reference)
for xfm in self.xforms
]
def to_string(self):
"""Convert this LTA into text format."""
code = int(self["type"])
header = [
"# LTA-array file created by NiTransforms",
"type = {} # {}".format(code, transform_codes.label[code]),
"nxforms = {}".format(self["nxforms"]),
]
xforms = [xfm.to_string(partial=True) for xfm in self._xforms]
footer = [
"subject {}".format(self["subject"]),
"fscale {:.6f}".format(float(self["fscale"])),
"",
]
return "\n".join(header + xforms + footer)
@classmethod
def from_string(cls, string):
"""Read this LTA from a text string."""
lta = cls()
sa = lta.structarr
# Drop commented out lines
lines = _drop_comments(string).splitlines()
if not lines or not lines[0].startswith("type"):
raise TransformFileError("Invalid LTA format")
for key in ("type", "nxforms"):
label, valstring = lines.pop(0).split(" = ")
assert label.strip() == key
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape) if val.size else ""
for _ in range(sa["nxforms"]):
lta._xforms.append(
cls._inner_type.from_string("\n".join(lines[:25]), partial=True)
)
lta._xforms[-1].structarr["type"] = sa["type"]
lines = lines[25:]
for key in ("subject", "fscale"):
# Optional keys
if not (lines and lines[0].startswith(key)):
continue
try:
label, valstring = lines.pop(0).split(" ")
except ValueError:
sa[key] = ""
else:
assert label.strip() == key
val = np.genfromtxt([valstring.encode()], dtype=cls.dtype[key])
sa[key] = val.reshape(sa[key].shape) if val.size else ""
assert len(lta._xforms) == sa["nxforms"]
return lta
@classmethod
def from_ras(cls, ras, moving=None, reference=None):
"""Create an affine from a nitransform's RAS+ matrix."""
if ras.ndim == 2:
return cls._inner_type.from_ras(ras, moving=moving, reference=reference)
lt = cls()
sa = lt.structarr
sa["type"] = 1
sa["nxforms"] = ras.shape[0]
for i in range(sa["nxforms"]):
lt._xforms.append(cls._inner_type.from_ras(
ras[i, ...], moving=moving, reference=reference
))
sa["subject"] = "unset"
sa["fscale"] = 0.0
return lt
def _drop_comments(string):
"""Drop comments."""
return "\n".join([
line.split("#")[0].strip()
for line in string.splitlines()
if line.split("#")[0].strip()
]) | 0.845081 | 0.482734 |
from __future__ import print_function
import sys
import os
from logging import getLogger
if os.name == "nt":
if sys.version.startswith("3."):
import winreg
else:
import _winreg as winreg
else:
# If Sphinx is running there is no need to raise any import errors as Sphinx is just generating documentation
if 'sphinx' not in sys.modules:
raise ImportError("This CDC module is for Windows only")
class CDC:
"""
Windows-only implementation of hid device to CDC serial port map.
"""
def __init__(self):
"""
Hook onto logger
"""
self.logger = getLogger(__name__)
def func_name(self):
"""
Get function name
"""
return "%s::%s" % (__name__, sys._getframe(1).f_code.co_name)
def iter_keys_as_str(self, key):
"""
Iterate over subkeys of a key returning subkey as string
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.EnumKey(key, i)
def iter_keys(self, key):
"""
Iterate over subkeys of a key
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.OpenKey(key, winreg.EnumKey(key, i))
def iter_vals(self, key):
"""
Iterate over values of a key
"""
for i in range(winreg.QueryInfoKey(key)[1]):
yield winreg.EnumValue(key, i)
def find_cdc_port(self, tool, serial):
"""
Find virtual serial port name based on tool name and serial number
"""
if tool == 'edbgc':
tool = 'edbg'
if tool == 'nedbg':
tool = 'Curiosity'
winreg.Enum = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Enum')
usb_devs = winreg.OpenKey(winreg.Enum, 'USB')
parentids = []
# Iterate over all devices to find the parent ID of the one with the correct serial number
for key in self.iter_keys(usb_devs):
for subkey in self.iter_keys(key):
try:
devparams = winreg.OpenKey(subkey, "Device Parameters")
symbname = winreg.QueryValueEx(devparams, 'SymbolicName')[0]
if serial in symbname:
parentids += [winreg.QueryValueEx(subkey, 'ParentIdPrefix')[0]]
except:
pass
# Iterate over all devices to find the COM port number matching the previously found parent ID
for key in self.iter_keys(usb_devs):
for subkey_string in self.iter_keys_as_str(key):
subkey = winreg.OpenKey(key, subkey_string)
for val in self.iter_vals(subkey):
if val[0] == 'FriendlyName' and '{} virtual com port'.format(tool.lower()) in val[1].lower():
for parentid in parentids:
if parentid in subkey_string:
self.logger.info("Found CDC port:")
self.logger.info("%s", val[1])
return val[1].split('(')[1].split(')')[0]
return None | pyedbglib/serialport/wincdc.py | from __future__ import print_function
import sys
import os
from logging import getLogger
if os.name == "nt":
if sys.version.startswith("3."):
import winreg
else:
import _winreg as winreg
else:
# If Sphinx is running there is no need to raise any import errors as Sphinx is just generating documentation
if 'sphinx' not in sys.modules:
raise ImportError("This CDC module is for Windows only")
class CDC:
"""
Windows-only implementation of hid device to CDC serial port map.
"""
def __init__(self):
"""
Hook onto logger
"""
self.logger = getLogger(__name__)
def func_name(self):
"""
Get function name
"""
return "%s::%s" % (__name__, sys._getframe(1).f_code.co_name)
def iter_keys_as_str(self, key):
"""
Iterate over subkeys of a key returning subkey as string
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.EnumKey(key, i)
def iter_keys(self, key):
"""
Iterate over subkeys of a key
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.OpenKey(key, winreg.EnumKey(key, i))
def iter_vals(self, key):
"""
Iterate over values of a key
"""
for i in range(winreg.QueryInfoKey(key)[1]):
yield winreg.EnumValue(key, i)
def find_cdc_port(self, tool, serial):
"""
Find virtual serial port name based on tool name and serial number
"""
if tool == 'edbgc':
tool = 'edbg'
if tool == 'nedbg':
tool = 'Curiosity'
winreg.Enum = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Enum')
usb_devs = winreg.OpenKey(winreg.Enum, 'USB')
parentids = []
# Iterate over all devices to find the parent ID of the one with the correct serial number
for key in self.iter_keys(usb_devs):
for subkey in self.iter_keys(key):
try:
devparams = winreg.OpenKey(subkey, "Device Parameters")
symbname = winreg.QueryValueEx(devparams, 'SymbolicName')[0]
if serial in symbname:
parentids += [winreg.QueryValueEx(subkey, 'ParentIdPrefix')[0]]
except:
pass
# Iterate over all devices to find the COM port number matching the previously found parent ID
for key in self.iter_keys(usb_devs):
for subkey_string in self.iter_keys_as_str(key):
subkey = winreg.OpenKey(key, subkey_string)
for val in self.iter_vals(subkey):
if val[0] == 'FriendlyName' and '{} virtual com port'.format(tool.lower()) in val[1].lower():
for parentid in parentids:
if parentid in subkey_string:
self.logger.info("Found CDC port:")
self.logger.info("%s", val[1])
return val[1].split('(')[1].split(')')[0]
return None | 0.384912 | 0.092647 |
from datetime import date, datetime, timedelta
from django.test import TestCase
from icalendar import Calendar
from rest_framework.exceptions import ParseError
from .renderers import ICalRenderer
from .utils import parse_date
class ICalSerializerTest(TestCase):
def test_data(self):
renderer = ICalRenderer()
events = [{'full_url': 'http://oxtalks.com/test/1',
'ics_feed_title': 'Talk 1',
'description': 'Description'},
{'full_url': 'http://oxtalks.com/test/2',
'ics_feed_title': 'Talk 2',
'description': ''}]
data = renderer.render(events)
cal = Calendar.from_ical(data)
print cal.subcomponents[1]
self.assertEquals(len(cal.subcomponents), 2)
self.assertEquals(cal.subcomponents[0]['SUMMARY'], 'Talk 1')
self.assertEquals(cal.subcomponents[0]['DESCRIPTION'], 'Description')
self.assertEquals(cal.subcomponents[0]['URL'], 'http://oxtalks.com/test/1')
self.assertEquals(cal.subcomponents[1]['SUMMARY'], 'Talk 2')
self.assertEquals(cal.subcomponents[1]['URL'], 'http://oxtalks.com/test/2')
def test_event_to_ics(self):
renderer = ICalRenderer()
events = {'full_url': 'http://oxtalks.com/test/1',
'ics_feed_title': 'Talk 1',
'description': 'Description'}
data = renderer.render(events)
cal = Calendar.from_ical(data)
self.assertEquals(len(cal.subcomponents), 1)
self.assertEquals(cal.subcomponents[0]['SUMMARY'], 'Talk 1')
self.assertEquals(cal.subcomponents[0]['DESCRIPTION'], 'Description')
self.assertEquals(cal.subcomponents[0]['URL'], 'http://oxtalks.com/test/1')
class UtilsParseDate(TestCase):
def test_today(self):
result = parse_date("today")
self.assertEquals(result, date.today())
def test_tomorrow(self):
result = parse_date("tomorrow")
self.assertEquals(result, date.today() + timedelta(days=1))
def test_invalid_date(self):
self.assertRaises(ParseError, parse_date, "01/01/aa")
def test_custom_date(self):
result = parse_date("13/02/15")
self.assertEquals(result, datetime(2015, 2, 13, 0, 0)) | talks/core/tests.py | from datetime import date, datetime, timedelta
from django.test import TestCase
from icalendar import Calendar
from rest_framework.exceptions import ParseError
from .renderers import ICalRenderer
from .utils import parse_date
class ICalSerializerTest(TestCase):
def test_data(self):
renderer = ICalRenderer()
events = [{'full_url': 'http://oxtalks.com/test/1',
'ics_feed_title': 'Talk 1',
'description': 'Description'},
{'full_url': 'http://oxtalks.com/test/2',
'ics_feed_title': 'Talk 2',
'description': ''}]
data = renderer.render(events)
cal = Calendar.from_ical(data)
print cal.subcomponents[1]
self.assertEquals(len(cal.subcomponents), 2)
self.assertEquals(cal.subcomponents[0]['SUMMARY'], 'Talk 1')
self.assertEquals(cal.subcomponents[0]['DESCRIPTION'], 'Description')
self.assertEquals(cal.subcomponents[0]['URL'], 'http://oxtalks.com/test/1')
self.assertEquals(cal.subcomponents[1]['SUMMARY'], 'Talk 2')
self.assertEquals(cal.subcomponents[1]['URL'], 'http://oxtalks.com/test/2')
def test_event_to_ics(self):
renderer = ICalRenderer()
events = {'full_url': 'http://oxtalks.com/test/1',
'ics_feed_title': 'Talk 1',
'description': 'Description'}
data = renderer.render(events)
cal = Calendar.from_ical(data)
self.assertEquals(len(cal.subcomponents), 1)
self.assertEquals(cal.subcomponents[0]['SUMMARY'], 'Talk 1')
self.assertEquals(cal.subcomponents[0]['DESCRIPTION'], 'Description')
self.assertEquals(cal.subcomponents[0]['URL'], 'http://oxtalks.com/test/1')
class UtilsParseDate(TestCase):
def test_today(self):
result = parse_date("today")
self.assertEquals(result, date.today())
def test_tomorrow(self):
result = parse_date("tomorrow")
self.assertEquals(result, date.today() + timedelta(days=1))
def test_invalid_date(self):
self.assertRaises(ParseError, parse_date, "01/01/aa")
def test_custom_date(self):
result = parse_date("13/02/15")
self.assertEquals(result, datetime(2015, 2, 13, 0, 0)) | 0.637821 | 0.271617 |
import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
CLICKHOUSE_DATABASE = 'test'
def initialize_database(nodes, shard):
for node in nodes:
node.query('''
CREATE DATABASE {database};
CREATE TABLE `{database}`.src (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
'''.format(shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
initialize_database([node1, node2], 1)
yield cluster
except Exception as ex:
print ex
finally:
cluster.shutdown()
def test_consistent_part_after_move_partition(start_cluster):
# insert into all replicas
for i in range(100):
node1.query('INSERT INTO `{database}`.src VALUES ({value} % 2, {value})'.format(database=CLICKHOUSE_DATABASE,
value=i))
query_source = 'SELECT COUNT(*) FROM `{database}`.src'.format(database=CLICKHOUSE_DATABASE)
query_dest = 'SELECT COUNT(*) FROM `{database}`.dest'.format(database=CLICKHOUSE_DATABASE)
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
node1.query('ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest'.format(database=CLICKHOUSE_DATABASE))
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest)) | dbms/tests/integration/test_consistant_parts_after_move_partition/test.py | import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
CLICKHOUSE_DATABASE = 'test'
def initialize_database(nodes, shard):
for node in nodes:
node.query('''
CREATE DATABASE {database};
CREATE TABLE `{database}`.src (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
'''.format(shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
initialize_database([node1, node2], 1)
yield cluster
except Exception as ex:
print ex
finally:
cluster.shutdown()
def test_consistent_part_after_move_partition(start_cluster):
# insert into all replicas
for i in range(100):
node1.query('INSERT INTO `{database}`.src VALUES ({value} % 2, {value})'.format(database=CLICKHOUSE_DATABASE,
value=i))
query_source = 'SELECT COUNT(*) FROM `{database}`.src'.format(database=CLICKHOUSE_DATABASE)
query_dest = 'SELECT COUNT(*) FROM `{database}`.dest'.format(database=CLICKHOUSE_DATABASE)
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
node1.query('ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest'.format(database=CLICKHOUSE_DATABASE))
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest)) | 0.409221 | 0.136407 |
import sys
import os
import time
from typing import Dict, List, Tuple
import re
State = List[int]
Notes = Dict[int, int]
def get_state_value(index: int, state: State) -> int:
return sum([1 << i for i in range(5) if i + index - 2 in state])
def run_generations(state: State, notes: Notes, generations: int) -> State:
generation = 0
while generation < generations:
state = [index for index in range(
min(state) - 2, max(state) + 2) if notes[get_state_value(index, state)]]
generation += 1
return state
def part2(state: State, notes: Notes) -> int:
jump = 200
first_state = run_generations(state, notes, jump)
first_sum = sum(first_state)
second_state = run_generations(first_state, notes, jump)
diff = sum(second_state) - first_sum
target = 5 * 10 ** 10
return first_sum + diff * (target // jump - 1)
def solve(puzzle_input: Tuple[State, Notes]) -> Tuple[int, int]:
state, notes = puzzle_input
return (
sum(run_generations(state, notes, 20)),
part2(state, notes)
)
def parse_initial_state(line: str) -> List[int]:
return [index for index, c in enumerate(line[15:]) if c == "#"]
def compute_pattern(pattern: str) -> int:
return sum([1 << index for index, c in enumerate(pattern) if c == "#"])
notes_regex = re.compile(
r"^(?P<pattern>[#\.]{5})\s=>\s(?P<result>[#\.])$", re.MULTILINE)
def parse_notes(note_lines: str) -> Notes:
return {compute_pattern(match.group("pattern")): 1 if match.group("result") == "#" else 0
for match in notes_regex.finditer(note_lines)}
def get_input(file_path: str) -> Tuple[State, Notes]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
initial_state_line, note_lines = file.read().split("\n\n")
return parse_initial_state(initial_state_line), parse_notes(note_lines)
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main() | 2018/12/py/run.py |
import sys
import os
import time
from typing import Dict, List, Tuple
import re
State = List[int]
Notes = Dict[int, int]
def get_state_value(index: int, state: State) -> int:
return sum([1 << i for i in range(5) if i + index - 2 in state])
def run_generations(state: State, notes: Notes, generations: int) -> State:
generation = 0
while generation < generations:
state = [index for index in range(
min(state) - 2, max(state) + 2) if notes[get_state_value(index, state)]]
generation += 1
return state
def part2(state: State, notes: Notes) -> int:
jump = 200
first_state = run_generations(state, notes, jump)
first_sum = sum(first_state)
second_state = run_generations(first_state, notes, jump)
diff = sum(second_state) - first_sum
target = 5 * 10 ** 10
return first_sum + diff * (target // jump - 1)
def solve(puzzle_input: Tuple[State, Notes]) -> Tuple[int, int]:
state, notes = puzzle_input
return (
sum(run_generations(state, notes, 20)),
part2(state, notes)
)
def parse_initial_state(line: str) -> List[int]:
return [index for index, c in enumerate(line[15:]) if c == "#"]
def compute_pattern(pattern: str) -> int:
return sum([1 << index for index, c in enumerate(pattern) if c == "#"])
notes_regex = re.compile(
r"^(?P<pattern>[#\.]{5})\s=>\s(?P<result>[#\.])$", re.MULTILINE)
def parse_notes(note_lines: str) -> Notes:
return {compute_pattern(match.group("pattern")): 1 if match.group("result") == "#" else 0
for match in notes_regex.finditer(note_lines)}
def get_input(file_path: str) -> Tuple[State, Notes]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
initial_state_line, note_lines = file.read().split("\n\n")
return parse_initial_state(initial_state_line), parse_notes(note_lines)
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main() | 0.446012 | 0.517571 |
import random
import tkinter as tk
from tkinter import *
from PIL import ImageTk,Image
# https://www.youtube.com/watch?v=8QTsK1aVMI0&t=1133s - used as a guide on how to go about creating the game in the shell form
# Used to Create All the Cards - https://stackoverflow.com/questions/41970795/what-is-the-best-way-to-create-a-deck-of-cards/41970851
# How to use classes with functions, __init__, __str__, (sel)f - https://www.youtube.com/watch?v=wfcWRAxRVBA
# how to use tKinter: https://realpython.com/python-gui-tkinter/
# File with 52 Cards - https://boardgames.stackexchange.com/questions/51426/where-can-i-download-high-quality-images-of-poker-cards (names of cards were changed to match code)
# How to pip install PIL https://pillow.readthedocs.io/en/stable/installation.html
# How to get started with building a GUI with TKinter: https://www.youtube.com/watch?v=jE-SpRI3K5g
playing = True # We will use gloabal logic to coninuosly play the game
suits = ('Spades', 'Clubs', 'Hearts', 'Diamonds')
names = ('Ace', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King')
values = {'Ace': 11, 'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10, 'Jack': 10, 'Queen': 10, 'King': 10}
class individualCardCreator:
def __init__ (self, suit, name):
self.suit = suit
self.name = name
def __str__ (self):
return self.name + ' of ' + self.suit
class Deck: # Creates a deck
def __init__(self):
self.deck = [] # Initializes deck list
for suit in suits:
for suitNumb in names:
self.deck.append(individualCardCreator(suit, suitNumb))
def shuffle(self):
random.shuffle(self.deck) # Shuffles deck
def deal(self):
dealt_card = self.deck.pop() # Picks a card from the deck, and removes it
return dealt_card # Returns picked card
class Hand:
def __init__(self):
self.cards = [] # Creats the hand
self.value = 0 # Initializes the hand value
self.aces = 0 # Need to account for aces b/c troublesome
def add_card(self, card): # adds card to the player's or dealer's hand
self.cards.append(card)
self.value += values[card.name]
if card.name == 'Ace':
self.aces += 1
def ace_adjust(self):
while self.value > 21 and self.aces:
self.value -= 10 # Accounts for Ace counting as 11, or 1 when the hand total is over 21
self.aces -=1 # Removes the count of ace after it is accounted for once, this allows for the loop to run twice just in case there is a senario of double aces
def hit(deck, hand):
hand.add_card(deck.deal())
hand.ace_adjust()
def HoS(deck, hand):
global playing
while True:
HorS = input("\nHit or Stand? (Enter 'h' to hit or 's' to stand) ")
if HorS[0].lower() == 'h':
hit(deck, hand)
elif HorS[0].lower() == 's':
print("You have chosen to stand. The deal will play his turn now.")
playing = False # Ends global 'True' on play, everything is now automatic
else:
print("Invalid input. Try again!")
continue
break # https://www.tutorialspoint.com/python/python_loop_control.htm
def hidden(player, dealer):
print("\nDealer's Hand: ")
print(" <card hidden>")
print("", dealer.cards[1])
print("\nPlayer's Hand: ", *player.cards, sep='\n ')
def shown(player, dealer):
print("\nDealer's Hand: ", *dealer.cards, sep='\n ')
print("Dealer's Hand =", dealer.value)
print("\nPlayer's Hand: ", *player.cards, sep='\n ')
print("Player's Hand =", player.value)
class Bank:
def __init__(self):
self.total = 1000
self.bet = 0
def betW(self):
self.total += self.bet
def betL(self):
self.total -= self.bet
def wager(cash):
while True:
try:
cash.bet = int(input("How much money would you like to wager? (Enter dollar amount) "))
except ValueError: # https://www.journaldev.com/33500/python-valueerror-exception-handling-examples#:~:text=Python%20ValueError%20is%20raised%20when,precise%20exception%20such%20as%20IndexError.
print("Invalid input. Please enter the amount of money you would like to bet: ")
else:
if cash.bet > cash.total:
print("Your bet cannot exceed: $1000!")
else:
break
# Game Situations
def playerL(player, dealer, cash): # Player Wins
print("PLAYER LOSES!")
cash.betL()
def playerW(player, dealer, cash): # Player Loses
print("PLAYER WINS!")
cash.betW()
def dealerW(player, dealer, cash): # Dealer Wins
print("DEALER WINS!")
cash.betL()
def dealerL(player, dealer, cash): # Dealer Loses
print("DEALER LOSES!")
cash.betW()
def tie(player, dealer): # Tie
print("Player and Dealer tie!")
# Game
while True:
print("WELCOME TO <NAME>!")
print("Bank Amount: $1000")
#----------------------------------------------------------------------------------------------
root = tk.Tk()
root.title("Black Jack")
canvas = tk.Canvas(root, height=900, width=1200, bg="#263D42")
canvas.create_text(600, 37, text="<NAME>", fill="white", font=('Helvetica 25 bold'))
canvas.create_text(600, 125, text="Dealer's Hand", fill="white", font=('Helvetica 15 bold'))
canvas.create_text(600, 525, text="Player's Hand", fill="white", font=('Helvetica 15 bold'))
canvas.pack()
middle = tk.Frame(root, bg="white")
middle.place(width=800, height=15, x=200, y=450)
canvas.pack()
#----------------------------------------------------------------------------------------------
deck = Deck() # Creates a deck with 52 cards
deck.shuffle() #Shuffles a 52 card deck
playerH = Hand() # Creates a player hand
dealerH = Hand() # Creates a dealer hand
playerH.add_card(deck.deal()) # 1st Player Card
playerH.add_card(deck.deal()) # 2nd Player Card
dealerH.add_card(deck.deal()) # 1st Dealer Card
dealerH.add_card(deck.deal()) # 2nd Dealer Card
player_bank = Bank() # Creates a bank for the player with an intial amount of $1000
wager(player_bank) # Allows for a way to take in wager
hidden(playerH, dealerH) # Prints hands, 1 of dealer card is hidden
while playing:
HoS(deck, playerH)
hidden(playerH, dealerH)
if playerH.value > 21:
playerL(playerH, dealerH, player_bank)
break
if playerH.value <= 21:
while dealerH.value < 17:
hit(deck, dealerH)
shown(playerH, dealerH)
if dealerH.value > 21:
dealerL(playerH, dealerH, player_bank)
elif dealerH.value > playerH.value:
dealerW(playerH, dealerH, player_bank)
elif dealerH.value < playerH.value:
playerW(playerH, dealerH, player_bank)
if playerH.value > 21:
playerL(playerH, dealerH, player_bank)
print("\nPlayer's bank: ", player_bank.total)
print("\nThank you for playing!")
aP = 0
imgs = {} # To create new variable names within loops: https://stackoverflow.com/questions/36767496/creating-multiple-variables-during-each-iteration-of-for-loop-and-executing-tk-c
imgs_r = {}
imgs_n = {}
for i in list(playerH.cards):
name = "img" + str(aP)
i = str(i)
imgs[name] = Image.open(i+".png") # How to get image from file and put onto GUI: https://www.c-sharpcorner.com/blogs/basics-for-displaying-image-in-tkinter-python
imgs_r[name] = imgs[name].resize((130,195), Image.ANTIALIAS)
imgs_n[name] = ImageTk.PhotoImage(imgs_r[name])
canvas.create_image((215+160*aP),597, anchor=NW, image=imgs_n[name])
aP = aP + 1
dimgs = {}
dimgs_r = {}
dimgs_n = {}
dP = 0
for z in list(dealerH.cards):
name = "dimg" + str(dP)
z = str(z)
dimgs[name] = Image.open(z+".png")
dimgs_r[name] = dimgs[name].resize((130,195), Image.ANTIALIAS)
dimgs_n[name] = ImageTk.PhotoImage(dimgs_r[name])
canvas.create_image((215+160*dP),197, anchor=NW, image=dimgs_n[name])
dP = dP + 1
root = mainloop()
break | AA_FINAL PROJECT.py | import random
import tkinter as tk
from tkinter import *
from PIL import ImageTk,Image
# https://www.youtube.com/watch?v=8QTsK1aVMI0&t=1133s - used as a guide on how to go about creating the game in the shell form
# Used to Create All the Cards - https://stackoverflow.com/questions/41970795/what-is-the-best-way-to-create-a-deck-of-cards/41970851
# How to use classes with functions, __init__, __str__, (sel)f - https://www.youtube.com/watch?v=wfcWRAxRVBA
# how to use tKinter: https://realpython.com/python-gui-tkinter/
# File with 52 Cards - https://boardgames.stackexchange.com/questions/51426/where-can-i-download-high-quality-images-of-poker-cards (names of cards were changed to match code)
# How to pip install PIL https://pillow.readthedocs.io/en/stable/installation.html
# How to get started with building a GUI with TKinter: https://www.youtube.com/watch?v=jE-SpRI3K5g
playing = True # We will use gloabal logic to coninuosly play the game
suits = ('Spades', 'Clubs', 'Hearts', 'Diamonds')
names = ('Ace', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King')
values = {'Ace': 11, 'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10, 'Jack': 10, 'Queen': 10, 'King': 10}
class individualCardCreator:
def __init__ (self, suit, name):
self.suit = suit
self.name = name
def __str__ (self):
return self.name + ' of ' + self.suit
class Deck: # Creates a deck
def __init__(self):
self.deck = [] # Initializes deck list
for suit in suits:
for suitNumb in names:
self.deck.append(individualCardCreator(suit, suitNumb))
def shuffle(self):
random.shuffle(self.deck) # Shuffles deck
def deal(self):
dealt_card = self.deck.pop() # Picks a card from the deck, and removes it
return dealt_card # Returns picked card
class Hand:
def __init__(self):
self.cards = [] # Creats the hand
self.value = 0 # Initializes the hand value
self.aces = 0 # Need to account for aces b/c troublesome
def add_card(self, card): # adds card to the player's or dealer's hand
self.cards.append(card)
self.value += values[card.name]
if card.name == 'Ace':
self.aces += 1
def ace_adjust(self):
while self.value > 21 and self.aces:
self.value -= 10 # Accounts for Ace counting as 11, or 1 when the hand total is over 21
self.aces -=1 # Removes the count of ace after it is accounted for once, this allows for the loop to run twice just in case there is a senario of double aces
def hit(deck, hand):
hand.add_card(deck.deal())
hand.ace_adjust()
def HoS(deck, hand):
global playing
while True:
HorS = input("\nHit or Stand? (Enter 'h' to hit or 's' to stand) ")
if HorS[0].lower() == 'h':
hit(deck, hand)
elif HorS[0].lower() == 's':
print("You have chosen to stand. The deal will play his turn now.")
playing = False # Ends global 'True' on play, everything is now automatic
else:
print("Invalid input. Try again!")
continue
break # https://www.tutorialspoint.com/python/python_loop_control.htm
def hidden(player, dealer):
print("\nDealer's Hand: ")
print(" <card hidden>")
print("", dealer.cards[1])
print("\nPlayer's Hand: ", *player.cards, sep='\n ')
def shown(player, dealer):
print("\nDealer's Hand: ", *dealer.cards, sep='\n ')
print("Dealer's Hand =", dealer.value)
print("\nPlayer's Hand: ", *player.cards, sep='\n ')
print("Player's Hand =", player.value)
class Bank:
def __init__(self):
self.total = 1000
self.bet = 0
def betW(self):
self.total += self.bet
def betL(self):
self.total -= self.bet
def wager(cash):
while True:
try:
cash.bet = int(input("How much money would you like to wager? (Enter dollar amount) "))
except ValueError: # https://www.journaldev.com/33500/python-valueerror-exception-handling-examples#:~:text=Python%20ValueError%20is%20raised%20when,precise%20exception%20such%20as%20IndexError.
print("Invalid input. Please enter the amount of money you would like to bet: ")
else:
if cash.bet > cash.total:
print("Your bet cannot exceed: $1000!")
else:
break
# Game Situations
def playerL(player, dealer, cash): # Player Wins
print("PLAYER LOSES!")
cash.betL()
def playerW(player, dealer, cash): # Player Loses
print("PLAYER WINS!")
cash.betW()
def dealerW(player, dealer, cash): # Dealer Wins
print("DEALER WINS!")
cash.betL()
def dealerL(player, dealer, cash): # Dealer Loses
print("DEALER LOSES!")
cash.betW()
def tie(player, dealer): # Tie
print("Player and Dealer tie!")
# Game
while True:
print("WELCOME TO <NAME>!")
print("Bank Amount: $1000")
#----------------------------------------------------------------------------------------------
root = tk.Tk()
root.title("Black Jack")
canvas = tk.Canvas(root, height=900, width=1200, bg="#263D42")
canvas.create_text(600, 37, text="<NAME>", fill="white", font=('Helvetica 25 bold'))
canvas.create_text(600, 125, text="Dealer's Hand", fill="white", font=('Helvetica 15 bold'))
canvas.create_text(600, 525, text="Player's Hand", fill="white", font=('Helvetica 15 bold'))
canvas.pack()
middle = tk.Frame(root, bg="white")
middle.place(width=800, height=15, x=200, y=450)
canvas.pack()
#----------------------------------------------------------------------------------------------
deck = Deck() # Creates a deck with 52 cards
deck.shuffle() #Shuffles a 52 card deck
playerH = Hand() # Creates a player hand
dealerH = Hand() # Creates a dealer hand
playerH.add_card(deck.deal()) # 1st Player Card
playerH.add_card(deck.deal()) # 2nd Player Card
dealerH.add_card(deck.deal()) # 1st Dealer Card
dealerH.add_card(deck.deal()) # 2nd Dealer Card
player_bank = Bank() # Creates a bank for the player with an intial amount of $1000
wager(player_bank) # Allows for a way to take in wager
hidden(playerH, dealerH) # Prints hands, 1 of dealer card is hidden
while playing:
HoS(deck, playerH)
hidden(playerH, dealerH)
if playerH.value > 21:
playerL(playerH, dealerH, player_bank)
break
if playerH.value <= 21:
while dealerH.value < 17:
hit(deck, dealerH)
shown(playerH, dealerH)
if dealerH.value > 21:
dealerL(playerH, dealerH, player_bank)
elif dealerH.value > playerH.value:
dealerW(playerH, dealerH, player_bank)
elif dealerH.value < playerH.value:
playerW(playerH, dealerH, player_bank)
if playerH.value > 21:
playerL(playerH, dealerH, player_bank)
print("\nPlayer's bank: ", player_bank.total)
print("\nThank you for playing!")
aP = 0
imgs = {} # To create new variable names within loops: https://stackoverflow.com/questions/36767496/creating-multiple-variables-during-each-iteration-of-for-loop-and-executing-tk-c
imgs_r = {}
imgs_n = {}
for i in list(playerH.cards):
name = "img" + str(aP)
i = str(i)
imgs[name] = Image.open(i+".png") # How to get image from file and put onto GUI: https://www.c-sharpcorner.com/blogs/basics-for-displaying-image-in-tkinter-python
imgs_r[name] = imgs[name].resize((130,195), Image.ANTIALIAS)
imgs_n[name] = ImageTk.PhotoImage(imgs_r[name])
canvas.create_image((215+160*aP),597, anchor=NW, image=imgs_n[name])
aP = aP + 1
dimgs = {}
dimgs_r = {}
dimgs_n = {}
dP = 0
for z in list(dealerH.cards):
name = "dimg" + str(dP)
z = str(z)
dimgs[name] = Image.open(z+".png")
dimgs_r[name] = dimgs[name].resize((130,195), Image.ANTIALIAS)
dimgs_n[name] = ImageTk.PhotoImage(dimgs_r[name])
canvas.create_image((215+160*dP),197, anchor=NW, image=dimgs_n[name])
dP = dP + 1
root = mainloop()
break | 0.515864 | 0.262475 |
DOCUMENTATION = '''
---
module: napalm_get_facts
author: "<NAME> (@fooelisa)"
version_added: "2.1"
short_description: "Gathers facts from a network device via napalm"
description:
- "Gathers facts from a network device via the Python module napalm"
requirements:
- napalm
options:
hostname:
description:
- IP or FQDN of the device you want to connect to
required: False
username:
description:
- Username
required: False
password:
description:
- Password
required: False
dev_os:
description:
- OS of the device
required: False
choices: ['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos', 'vyos']
provider:
description:
- Dictionary which acts as a collection of arguments used to define the characteristics
of how to connect to the device.
Note - hostname, username, password and dev_os must be defined in either provider
or local param
Note - local param takes precedence, e.g. hostname is preferred to provider['hostname']
required: False
timeout:
description:
- Time in seconds to wait for the device to respond
required: False
default: 60
optional_args:
description:
- Dictionary of additional arguments passed to underlying driver
required: False
default: None
ignore_notimplemented:
description:
- Ignores NotImplementedError for filters which aren't supported by the driver. Returns
invalid filters in a list called: not_implemented
required: False
default: False
choices: [True, False]
filter:
description:
- A list of facts to retreive from a device and provided though C(ansible_facts)
The list of facts available are maintained at: http://napalm.readthedocs.io/en/latest/support/
Note- not all getters are implemented on all supported device types
required: False
default: ['facts']
'''
EXAMPLES = '''
vars:
ios_provider:
hostname: "{{ inventory_hostname }}"
username: "napalm"
password: "<PASSWORD>"
dev_os: "ios"
- name: get facts from device
napalm_get_facts:
hostname={{ inventory_hostname }}
username={{ user }}
dev_os={{ os }}
password={{ <PASSWORD> }}
filter=['facts']
register: result
- name: print data
debug: var=result
- name: Getters
napalm_get_facts:
provider: "{{ ios_provider }}"
filter:
- "lldp_neighbors_detail"
- "interfaces"
'''
RETURN = '''
changed:
description: "whether the command has been executed on the device"
returned: always
type: bool
sample: True
ansible_facts:
description: "Facts gathered on the device provided via C(ansible_facts)"
returned: certain keys are returned depending on filter
type: dict
'''
try:
from napalm_base import get_network_driver
except ImportError:
napalm_found = False
else:
napalm_found = True
def main():
os_choices = ['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos', 'vyos']
module = AnsibleModule(
argument_spec=dict(
hostname=dict(type='str', required=False, aliases=['host']),
username=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
provider=dict(type='dict', required=False, no_log=True),
dev_os=dict(type='str', required=False, choices=os_choices),
timeout=dict(type='int', required=False, default=60),
ignore_notimplemented=dict(type='bool', required=False, default=False),
optional_args=dict(type='dict', required=False, default=None),
filter=dict(type='list', required=False, default=['facts']),
),
supports_check_mode=True
)
if not napalm_found:
module.fail_json(msg="the python module napalm is required")
provider = module.params['provider'] or {}
# allow host or hostname
provider['hostname'] = provider.get('hostname', None) or provider.get('host', None)
# allow local params to override provider
for param, pvalue in provider.items():
if module.params.get(param) != False:
module.params[param] = module.params.get(param) or pvalue
hostname = module.params['hostname']
username = module.params['username']
dev_os = module.params['dev_os']
password = module.params['password']
timeout = module.params['timeout']
filter_list = module.params['filter']
ignore_notimplemented = module.params['ignore_notimplemented']
implementation_errors = []
argument_check = { 'hostname': hostname, 'username': username, 'dev_os': dev_os, 'password': password }
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
# use checks outside of ansible defined checks, since params come can come from provider
if dev_os not in os_choices:
module.fail_json(msg="dev_os is not set to " + str(os_choices))
if module.params['optional_args'] is None:
optional_args = {}
else:
optional_args = module.params['optional_args']
# open device connection
try:
network_driver = get_network_driver(dev_os)
device = network_driver(hostname=hostname,
username=username,
password=password,
timeout=timeout,
optional_args=optional_args)
device.open()
except Exception, e:
module.fail_json(msg="cannot connect to device: " + str(e))
# retreive data from device
facts = {}
NAPALM_GETTERS=[getter for getter in dir(network_driver) if getter.startswith("get_")]
for getter in filter_list:
getter_function = "get_{}".format(getter)
if getter_function not in NAPALM_GETTERS:
module.fail_json(msg="filter not recognized: " + getter)
try:
get_func = getattr(device, getter_function)
result = get_func()
facts[getter] = result
except NotImplementedError:
if ignore_notimplemented:
implementation_errors.append(getter)
else:
module.fail_json(
msg="The filter {} is not supported in napalm-{} [get_{}()]".format(
getter, dev_os, getter))
except Exception, e:
module.fail_json(msg="[{}] cannot retrieve device data: ".format(getter) + str(e))
# close device connection
try:
device.close()
except Exception, e:
module.fail_json(msg="cannot close device connection: " + str(e))
results = {}
results['ansible_facts'] = facts
if ignore_notimplemented:
results['not_implemented'] = sorted(implementation_errors)
module.exit_json(**results)
# standard ansible module imports
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | napalm-ansible/library/napalm_get_facts.py | DOCUMENTATION = '''
---
module: napalm_get_facts
author: "<NAME> (@fooelisa)"
version_added: "2.1"
short_description: "Gathers facts from a network device via napalm"
description:
- "Gathers facts from a network device via the Python module napalm"
requirements:
- napalm
options:
hostname:
description:
- IP or FQDN of the device you want to connect to
required: False
username:
description:
- Username
required: False
password:
description:
- Password
required: False
dev_os:
description:
- OS of the device
required: False
choices: ['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos', 'vyos']
provider:
description:
- Dictionary which acts as a collection of arguments used to define the characteristics
of how to connect to the device.
Note - hostname, username, password and dev_os must be defined in either provider
or local param
Note - local param takes precedence, e.g. hostname is preferred to provider['hostname']
required: False
timeout:
description:
- Time in seconds to wait for the device to respond
required: False
default: 60
optional_args:
description:
- Dictionary of additional arguments passed to underlying driver
required: False
default: None
ignore_notimplemented:
description:
- Ignores NotImplementedError for filters which aren't supported by the driver. Returns
invalid filters in a list called: not_implemented
required: False
default: False
choices: [True, False]
filter:
description:
- A list of facts to retreive from a device and provided though C(ansible_facts)
The list of facts available are maintained at: http://napalm.readthedocs.io/en/latest/support/
Note- not all getters are implemented on all supported device types
required: False
default: ['facts']
'''
EXAMPLES = '''
vars:
ios_provider:
hostname: "{{ inventory_hostname }}"
username: "napalm"
password: "<PASSWORD>"
dev_os: "ios"
- name: get facts from device
napalm_get_facts:
hostname={{ inventory_hostname }}
username={{ user }}
dev_os={{ os }}
password={{ <PASSWORD> }}
filter=['facts']
register: result
- name: print data
debug: var=result
- name: Getters
napalm_get_facts:
provider: "{{ ios_provider }}"
filter:
- "lldp_neighbors_detail"
- "interfaces"
'''
RETURN = '''
changed:
description: "whether the command has been executed on the device"
returned: always
type: bool
sample: True
ansible_facts:
description: "Facts gathered on the device provided via C(ansible_facts)"
returned: certain keys are returned depending on filter
type: dict
'''
try:
from napalm_base import get_network_driver
except ImportError:
napalm_found = False
else:
napalm_found = True
def main():
os_choices = ['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos', 'vyos']
module = AnsibleModule(
argument_spec=dict(
hostname=dict(type='str', required=False, aliases=['host']),
username=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
provider=dict(type='dict', required=False, no_log=True),
dev_os=dict(type='str', required=False, choices=os_choices),
timeout=dict(type='int', required=False, default=60),
ignore_notimplemented=dict(type='bool', required=False, default=False),
optional_args=dict(type='dict', required=False, default=None),
filter=dict(type='list', required=False, default=['facts']),
),
supports_check_mode=True
)
if not napalm_found:
module.fail_json(msg="the python module napalm is required")
provider = module.params['provider'] or {}
# allow host or hostname
provider['hostname'] = provider.get('hostname', None) or provider.get('host', None)
# allow local params to override provider
for param, pvalue in provider.items():
if module.params.get(param) != False:
module.params[param] = module.params.get(param) or pvalue
hostname = module.params['hostname']
username = module.params['username']
dev_os = module.params['dev_os']
password = module.params['password']
timeout = module.params['timeout']
filter_list = module.params['filter']
ignore_notimplemented = module.params['ignore_notimplemented']
implementation_errors = []
argument_check = { 'hostname': hostname, 'username': username, 'dev_os': dev_os, 'password': password }
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
# use checks outside of ansible defined checks, since params come can come from provider
if dev_os not in os_choices:
module.fail_json(msg="dev_os is not set to " + str(os_choices))
if module.params['optional_args'] is None:
optional_args = {}
else:
optional_args = module.params['optional_args']
# open device connection
try:
network_driver = get_network_driver(dev_os)
device = network_driver(hostname=hostname,
username=username,
password=password,
timeout=timeout,
optional_args=optional_args)
device.open()
except Exception, e:
module.fail_json(msg="cannot connect to device: " + str(e))
# retreive data from device
facts = {}
NAPALM_GETTERS=[getter for getter in dir(network_driver) if getter.startswith("get_")]
for getter in filter_list:
getter_function = "get_{}".format(getter)
if getter_function not in NAPALM_GETTERS:
module.fail_json(msg="filter not recognized: " + getter)
try:
get_func = getattr(device, getter_function)
result = get_func()
facts[getter] = result
except NotImplementedError:
if ignore_notimplemented:
implementation_errors.append(getter)
else:
module.fail_json(
msg="The filter {} is not supported in napalm-{} [get_{}()]".format(
getter, dev_os, getter))
except Exception, e:
module.fail_json(msg="[{}] cannot retrieve device data: ".format(getter) + str(e))
# close device connection
try:
device.close()
except Exception, e:
module.fail_json(msg="cannot close device connection: " + str(e))
results = {}
results['ansible_facts'] = facts
if ignore_notimplemented:
results['not_implemented'] = sorted(implementation_errors)
module.exit_json(**results)
# standard ansible module imports
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | 0.522202 | 0.316884 |
import sys
DELIMITERS = ". , ; : ? $ @ ^ < > # % ` ! * - = ( ) [ ] { } / \" '".split()
def load_text(filename):
"""
Load lines from a plain-text file and return these as a list, with
trailing newlines stripped.
"""
with open(filename) as input_fd:
lines = input_fd.read().splitlines()
return lines
def save_word_counts(filename, counts):
"""
Save a list of [word, count, percentage] lists to a file, in the form
"word count percentage", one tuple per line.
"""
with open(filename, 'w') as output:
for count in counts:
output.write("%s\n" % " ".join(str(c) for c in count))
def load_word_counts(filename):
"""
Load a list of (word, count, percentage) tuples from a file where each
line is of the form "word count percentage". Lines starting with # are
ignored.
"""
counts = []
with open(filename, "r") as input_fd:
for line in input_fd:
if not line.startswith("#"):
fields = line.split()
counts.append((fields[0], int(fields[1]), float(fields[2])))
return counts
def update_word_counts(line, counts):
"""
Given a string, parse the string and update a dictionary of word
counts (mapping words to counts of their frequencies). DELIMITERS are
removed before the string is parsed. The function is case-insensitive
and words in the dictionary are in lower-case.
"""
for purge in DELIMITERS:
line = line.replace(purge, " ")
words = line.split()
for word in words:
word = word.lower().strip()
if word in counts:
counts[word] += 1
else:
counts[word] = 1
def calculate_word_counts(lines):
"""
Given a list of strings, parse each string and create a dictionary of
word counts (mapping words to counts of their frequencies). DELIMITERS
are removed before the string is parsed. The function is
case-insensitive and words in the dictionary are in lower-case.
"""
counts = {}
for line in lines:
update_word_counts(line, counts)
return counts
def word_count_dict_to_tuples(counts, decrease=True):
"""
Given a dictionary of word counts (mapping words to counts of their
frequencies), convert this into an ordered list of tuples (word,
count). The list is ordered by decreasing count, unless increase is
True.
"""
return sorted(list(counts.items()), key=lambda key_value: key_value[1],
reverse=decrease)
def filter_word_counts(counts, min_length=1):
"""
Given a list of (word, count) tuples, create a new list with only
those tuples whose word is >= min_length.
"""
stripped = []
for (word, count) in counts:
if len(word) >= min_length:
stripped.append((word, count))
return stripped
def calculate_percentages(counts):
"""
Given a list of (word, count) tuples, create a new list (word, count,
percentage) where percentage is the percentage number of occurrences
of this word compared to the total number of words.
"""
total = 0
for count in counts:
total += count[1]
tuples = [(word, count, (float(count) / total) * 100.0)
for (word, count) in counts]
return tuples
def word_count(input_file, output_file, min_length=1):
"""
Load a file, calculate the frequencies of each word in the file and
save in a new file the words, counts and percentages of the total in
descending order. Only words whose length is >= min_length are
included.
"""
lines = load_text(input_file)
counts = calculate_word_counts(lines)
sorted_counts = word_count_dict_to_tuples(counts)
sorted_counts = filter_word_counts(sorted_counts, min_length)
percentage_counts = calculate_percentages(sorted_counts)
save_word_counts(output_file, percentage_counts)
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
min_length = 1
if len(sys.argv) > 3:
min_length = int(sys.argv[3])
word_count(input_file, output_file, min_length) | code/countwords.py |
import sys
DELIMITERS = ". , ; : ? $ @ ^ < > # % ` ! * - = ( ) [ ] { } / \" '".split()
def load_text(filename):
"""
Load lines from a plain-text file and return these as a list, with
trailing newlines stripped.
"""
with open(filename) as input_fd:
lines = input_fd.read().splitlines()
return lines
def save_word_counts(filename, counts):
"""
Save a list of [word, count, percentage] lists to a file, in the form
"word count percentage", one tuple per line.
"""
with open(filename, 'w') as output:
for count in counts:
output.write("%s\n" % " ".join(str(c) for c in count))
def load_word_counts(filename):
"""
Load a list of (word, count, percentage) tuples from a file where each
line is of the form "word count percentage". Lines starting with # are
ignored.
"""
counts = []
with open(filename, "r") as input_fd:
for line in input_fd:
if not line.startswith("#"):
fields = line.split()
counts.append((fields[0], int(fields[1]), float(fields[2])))
return counts
def update_word_counts(line, counts):
"""
Given a string, parse the string and update a dictionary of word
counts (mapping words to counts of their frequencies). DELIMITERS are
removed before the string is parsed. The function is case-insensitive
and words in the dictionary are in lower-case.
"""
for purge in DELIMITERS:
line = line.replace(purge, " ")
words = line.split()
for word in words:
word = word.lower().strip()
if word in counts:
counts[word] += 1
else:
counts[word] = 1
def calculate_word_counts(lines):
"""
Given a list of strings, parse each string and create a dictionary of
word counts (mapping words to counts of their frequencies). DELIMITERS
are removed before the string is parsed. The function is
case-insensitive and words in the dictionary are in lower-case.
"""
counts = {}
for line in lines:
update_word_counts(line, counts)
return counts
def word_count_dict_to_tuples(counts, decrease=True):
"""
Given a dictionary of word counts (mapping words to counts of their
frequencies), convert this into an ordered list of tuples (word,
count). The list is ordered by decreasing count, unless increase is
True.
"""
return sorted(list(counts.items()), key=lambda key_value: key_value[1],
reverse=decrease)
def filter_word_counts(counts, min_length=1):
"""
Given a list of (word, count) tuples, create a new list with only
those tuples whose word is >= min_length.
"""
stripped = []
for (word, count) in counts:
if len(word) >= min_length:
stripped.append((word, count))
return stripped
def calculate_percentages(counts):
"""
Given a list of (word, count) tuples, create a new list (word, count,
percentage) where percentage is the percentage number of occurrences
of this word compared to the total number of words.
"""
total = 0
for count in counts:
total += count[1]
tuples = [(word, count, (float(count) / total) * 100.0)
for (word, count) in counts]
return tuples
def word_count(input_file, output_file, min_length=1):
"""
Load a file, calculate the frequencies of each word in the file and
save in a new file the words, counts and percentages of the total in
descending order. Only words whose length is >= min_length are
included.
"""
lines = load_text(input_file)
counts = calculate_word_counts(lines)
sorted_counts = word_count_dict_to_tuples(counts)
sorted_counts = filter_word_counts(sorted_counts, min_length)
percentage_counts = calculate_percentages(sorted_counts)
save_word_counts(output_file, percentage_counts)
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
min_length = 1
if len(sys.argv) > 3:
min_length = int(sys.argv[3])
word_count(input_file, output_file, min_length) | 0.520253 | 0.4856 |
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from util import get_median_inter_mnist, Kernel, load_data, ROOT_PATH,_sqdist,remove_outliers, nystrom_decomp, chol_inv
import time
import rpy2.robjects as robjects
import matplotlib.pyplot as plt
Nfeval = 1
seed = 527
np.random.seed(seed)
JITTER = 1e-7
nystr_M = 300
EYE_nystr = np.eye(nystr_M)
opt_params = None
prev_norm = None
opt_test_err = None
def experiment(nystr=True,IV=True):
def LMO_err(params,M=2):
params = np.exp(params)
al,bl = params[:-1], params[-1]
L = bl*bl*np.exp(-L0[0]/al[0]/al[0]/2)+bl*bl*np.exp(-L0[1]/al[1]/al[1]/2) +1e-6*EYEN # l(X,None,al,bl)# +1e-6*EYEN
if nystr:
tmp_mat = L@eig_vec_K
C = L-tmp_mat@np.linalg.inv(eig_vec_K.T@tmp_mat/N2+inv_eig_val_K)@tmp_mat.T/N2
c = C@W_nystr_Y*N2
else:
LWL_inv = chol_inv(L@W@L+L/N2 +JITTER*EYEN)# chol_inv(W*N2+L_inv) # chol_inv(L@W@L+L/N2 +JITTER*EYEN)
C = L@LWL_inv@L/N2
c = C@W@Y*N2
c_y = c-Y
lmo_err = 0
N = 0
for ii in range(1):
permutation = np.random.permutation(X.shape[0])
for i in range(0, X.shape[0], M):
indices = permutation[i:i + M]
K_i = W[np.ix_(indices,indices)]*N2
C_i = C[np.ix_(indices,indices)]
c_y_i = c_y[indices]
b_y = np.linalg.inv(np.eye(C_i.shape[0])-C_i@K_i)@c_y_i
# print(I_CW_inv.shape,c_y_i.shape)
lmo_err += b_y.T@K_i@b_y
N += 1
return lmo_err[0,0]/N/M**2
def callback0(params, timer=None):
global Nfeval, prev_norm, opt_params, opt_test_err
if Nfeval % 1 == 0:
params = np.exp(params)
al,bl = params[:-1], params[-1]
L = bl*bl*np.exp(-L0[0]/al[0]/al[0]/2)+bl*bl*np.exp(-L0[1]/al[1]/al[1]/2) +1e-6*EYEN
if nystr:
alpha = EYEN-eig_vec_K@np.linalg.inv(eig_vec_K.T@L@eig_vec_K/N2+np.diag(1/eig_val_K/N2))@eig_vec_K.T@L/N2
alpha = alpha@W_nystr@Y*N2
else:
LWL_inv = chol_inv(L@W@L+L/N2+JITTER*EYEN)
alpha = LWL_inv@L@W@Y
pred_mean = L@alpha
if timer:
return
norm = alpha.T @ L @ alpha
Nfeval += 1
if prev_norm is not None:
if norm[0,0]/prev_norm >=3:
if opt_params is None:
opt_params = params
opt_test_err = ((pred_mean-Y)**2).mean()
print(True,opt_params, opt_test_err,prev_norm)
raise Exception
if prev_norm is None or norm[0,0]<= prev_norm:
prev_norm = norm[0,0]
opt_params = params
opt_test_err = ((pred_mean-Y)**2).mean()
print('params,test_err, norm:',opt_params, opt_test_err, prev_norm)
ages = np.linspace(min(X[:, 0]) - abs(min(X[:, 0])) * 0.05, max(X[:, 0]) + abs(max(X[:, 0])) * 0.05, 32)
vitd = np.linspace(min(X[:, 1]) - abs(min(X[:, 1])) * 0.05, max(X[:, 1]) + abs(max(X[:, 1])) * 0.05, 64)
X_mesh, Y_mesh = np.meshgrid(ages, vitd)
table = bl ** 2 * np.hstack([np.exp(
-_sqdist(X_mesh[:, [i]], X[:, [0]]) / al[0] ** 2 / 2 - _sqdist(Y_mesh[:, [i]], X[:, [1]]) / al[
1] ** 2 / 2) @ alpha for i in range(X_mesh.shape[1])])
maxv = np.max(table[:])
minv = np.min(table[:])
fig = plt.figure()
ax = fig.add_subplot(111)
# Generate a contour plot
Y0 = data0[:, [4]]
X0 = data0[:, [0, 2]]
Z0 = data0[:, [0, 1]]
ages = np.linspace(min(X0[:, 0]) - abs(min(X0[:, 0])) * 0.05, max(X0[:, 0]) + abs(max(X0[:, 0])) * 0.05, 32)
vitd = np.linspace(min(X0[:, 1]) - abs(min(X0[:, 1])) * 0.05, max(X0[:, 1]) + abs(max(X0[:, 1])) * 0.05, 64)
X_mesh, Y_mesh = np.meshgrid(ages, vitd)
cpf = ax.contourf(X_mesh, Y_mesh, (table - minv) / (maxv - minv))
# cp = ax.contour(X_mesh, Y_mesh, table)
plt.colorbar(cpf, ax=ax)
plt.xlabel('Age', fontsize=12)
plt.ylabel('Vitamin D', fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
if IV:
plt.savefig('VitD_IV.pdf', bbox_inches='tight')
else:
plt.savefig('VitD.pdf', bbox_inches='tight')
plt.close('all')
robjects.r['load'](ROOT_PATH+"/data/VitD.RData")
data = np.array(robjects.r['VitD']).T
# plot data
fig = plt.figure()
plt.scatter((data[:,0])[data[:,4]>0],(data[:,2])[data[:,4]>0],marker='s',s=3,c='r',label='dead')
plt.scatter((data[:,0])[data[:,4]==0],(data[:,2])[data[:,4]==0],marker='o',s=1,c='b',label='alive')
lgnd = plt.legend()
lgnd.legendHandles[0]._sizes = [30]
lgnd.legendHandles[1]._sizes = [30]
plt.xlabel('Age',fontsize=12)
plt.ylabel('Vitamin D',fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig('VitD_data.pdf',bbox_inches='tight')
plt.close('all')
data0 = data.copy()
for i in range(data.shape[1]):
data[:,i] = (data[:,i]-data[:,i].mean())/data[:,i].std()
Y = data[:,[4]]
X = data[:,[0,2]]
Z = data[:,[0,1]]
t0 = time.time()
EYEN = np.eye(X.shape[0])
N2 = X.shape[0]**2
if IV:
ak = get_median_inter_mnist(Z)
W0 = _sqdist(Z,None)
W = (np.exp(-W0/ak/ak/2)+np.exp(-W0/ak/ak/200)+np.exp(-W0/ak/ak*50))/3/N2
del W0
else:
W = EYEN/N2
L0 = np.array([_sqdist(X[:,[i]],None) for i in range(X.shape[1])])
params0 =np.random.randn(3)/10
bounds = None # [[0.01,10],[0.01,5]]
if nystr:
for _ in range(seed+1):
random_indices = np.sort(np.random.choice(range(W.shape[0]),nystr_M,replace=False))
eig_val_K,eig_vec_K = nystrom_decomp(W*N2, random_indices)
inv_eig_val_K = np.diag(1/eig_val_K/N2)
W_nystr = eig_vec_K @ np.diag(eig_val_K)@eig_vec_K.T/N2
W_nystr_Y = W_nystr@Y
obj_grad = value_and_grad(lambda params: LMO_err(params))
res = minimize(obj_grad, x0=params0,bounds=bounds, method='L-BFGS-B',jac=True,options={'maxiter':5000}, callback=callback0)
if __name__ == '__main__':
experiment(IV=True)
experiment(IV=False) | MMR_IVs/rkhs_model_LMO_nystr_vitD.py | import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from util import get_median_inter_mnist, Kernel, load_data, ROOT_PATH,_sqdist,remove_outliers, nystrom_decomp, chol_inv
import time
import rpy2.robjects as robjects
import matplotlib.pyplot as plt
Nfeval = 1
seed = 527
np.random.seed(seed)
JITTER = 1e-7
nystr_M = 300
EYE_nystr = np.eye(nystr_M)
opt_params = None
prev_norm = None
opt_test_err = None
def experiment(nystr=True,IV=True):
def LMO_err(params,M=2):
params = np.exp(params)
al,bl = params[:-1], params[-1]
L = bl*bl*np.exp(-L0[0]/al[0]/al[0]/2)+bl*bl*np.exp(-L0[1]/al[1]/al[1]/2) +1e-6*EYEN # l(X,None,al,bl)# +1e-6*EYEN
if nystr:
tmp_mat = L@eig_vec_K
C = L-tmp_mat@np.linalg.inv(eig_vec_K.T@tmp_mat/N2+inv_eig_val_K)@tmp_mat.T/N2
c = C@W_nystr_Y*N2
else:
LWL_inv = chol_inv(L@W@L+L/N2 +JITTER*EYEN)# chol_inv(W*N2+L_inv) # chol_inv(L@W@L+L/N2 +JITTER*EYEN)
C = L@LWL_inv@L/N2
c = C@W@Y*N2
c_y = c-Y
lmo_err = 0
N = 0
for ii in range(1):
permutation = np.random.permutation(X.shape[0])
for i in range(0, X.shape[0], M):
indices = permutation[i:i + M]
K_i = W[np.ix_(indices,indices)]*N2
C_i = C[np.ix_(indices,indices)]
c_y_i = c_y[indices]
b_y = np.linalg.inv(np.eye(C_i.shape[0])-C_i@K_i)@c_y_i
# print(I_CW_inv.shape,c_y_i.shape)
lmo_err += b_y.T@K_i@b_y
N += 1
return lmo_err[0,0]/N/M**2
def callback0(params, timer=None):
global Nfeval, prev_norm, opt_params, opt_test_err
if Nfeval % 1 == 0:
params = np.exp(params)
al,bl = params[:-1], params[-1]
L = bl*bl*np.exp(-L0[0]/al[0]/al[0]/2)+bl*bl*np.exp(-L0[1]/al[1]/al[1]/2) +1e-6*EYEN
if nystr:
alpha = EYEN-eig_vec_K@np.linalg.inv(eig_vec_K.T@L@eig_vec_K/N2+np.diag(1/eig_val_K/N2))@eig_vec_K.T@L/N2
alpha = alpha@W_nystr@Y*N2
else:
LWL_inv = chol_inv(L@W@L+L/N2+JITTER*EYEN)
alpha = LWL_inv@L@W@Y
pred_mean = L@alpha
if timer:
return
norm = alpha.T @ L @ alpha
Nfeval += 1
if prev_norm is not None:
if norm[0,0]/prev_norm >=3:
if opt_params is None:
opt_params = params
opt_test_err = ((pred_mean-Y)**2).mean()
print(True,opt_params, opt_test_err,prev_norm)
raise Exception
if prev_norm is None or norm[0,0]<= prev_norm:
prev_norm = norm[0,0]
opt_params = params
opt_test_err = ((pred_mean-Y)**2).mean()
print('params,test_err, norm:',opt_params, opt_test_err, prev_norm)
ages = np.linspace(min(X[:, 0]) - abs(min(X[:, 0])) * 0.05, max(X[:, 0]) + abs(max(X[:, 0])) * 0.05, 32)
vitd = np.linspace(min(X[:, 1]) - abs(min(X[:, 1])) * 0.05, max(X[:, 1]) + abs(max(X[:, 1])) * 0.05, 64)
X_mesh, Y_mesh = np.meshgrid(ages, vitd)
table = bl ** 2 * np.hstack([np.exp(
-_sqdist(X_mesh[:, [i]], X[:, [0]]) / al[0] ** 2 / 2 - _sqdist(Y_mesh[:, [i]], X[:, [1]]) / al[
1] ** 2 / 2) @ alpha for i in range(X_mesh.shape[1])])
maxv = np.max(table[:])
minv = np.min(table[:])
fig = plt.figure()
ax = fig.add_subplot(111)
# Generate a contour plot
Y0 = data0[:, [4]]
X0 = data0[:, [0, 2]]
Z0 = data0[:, [0, 1]]
ages = np.linspace(min(X0[:, 0]) - abs(min(X0[:, 0])) * 0.05, max(X0[:, 0]) + abs(max(X0[:, 0])) * 0.05, 32)
vitd = np.linspace(min(X0[:, 1]) - abs(min(X0[:, 1])) * 0.05, max(X0[:, 1]) + abs(max(X0[:, 1])) * 0.05, 64)
X_mesh, Y_mesh = np.meshgrid(ages, vitd)
cpf = ax.contourf(X_mesh, Y_mesh, (table - minv) / (maxv - minv))
# cp = ax.contour(X_mesh, Y_mesh, table)
plt.colorbar(cpf, ax=ax)
plt.xlabel('Age', fontsize=12)
plt.ylabel('Vitamin D', fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
if IV:
plt.savefig('VitD_IV.pdf', bbox_inches='tight')
else:
plt.savefig('VitD.pdf', bbox_inches='tight')
plt.close('all')
robjects.r['load'](ROOT_PATH+"/data/VitD.RData")
data = np.array(robjects.r['VitD']).T
# plot data
fig = plt.figure()
plt.scatter((data[:,0])[data[:,4]>0],(data[:,2])[data[:,4]>0],marker='s',s=3,c='r',label='dead')
plt.scatter((data[:,0])[data[:,4]==0],(data[:,2])[data[:,4]==0],marker='o',s=1,c='b',label='alive')
lgnd = plt.legend()
lgnd.legendHandles[0]._sizes = [30]
lgnd.legendHandles[1]._sizes = [30]
plt.xlabel('Age',fontsize=12)
plt.ylabel('Vitamin D',fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig('VitD_data.pdf',bbox_inches='tight')
plt.close('all')
data0 = data.copy()
for i in range(data.shape[1]):
data[:,i] = (data[:,i]-data[:,i].mean())/data[:,i].std()
Y = data[:,[4]]
X = data[:,[0,2]]
Z = data[:,[0,1]]
t0 = time.time()
EYEN = np.eye(X.shape[0])
N2 = X.shape[0]**2
if IV:
ak = get_median_inter_mnist(Z)
W0 = _sqdist(Z,None)
W = (np.exp(-W0/ak/ak/2)+np.exp(-W0/ak/ak/200)+np.exp(-W0/ak/ak*50))/3/N2
del W0
else:
W = EYEN/N2
L0 = np.array([_sqdist(X[:,[i]],None) for i in range(X.shape[1])])
params0 =np.random.randn(3)/10
bounds = None # [[0.01,10],[0.01,5]]
if nystr:
for _ in range(seed+1):
random_indices = np.sort(np.random.choice(range(W.shape[0]),nystr_M,replace=False))
eig_val_K,eig_vec_K = nystrom_decomp(W*N2, random_indices)
inv_eig_val_K = np.diag(1/eig_val_K/N2)
W_nystr = eig_vec_K @ np.diag(eig_val_K)@eig_vec_K.T/N2
W_nystr_Y = W_nystr@Y
obj_grad = value_and_grad(lambda params: LMO_err(params))
res = minimize(obj_grad, x0=params0,bounds=bounds, method='L-BFGS-B',jac=True,options={'maxiter':5000}, callback=callback0)
if __name__ == '__main__':
experiment(IV=True)
experiment(IV=False) | 0.201892 | 0.428652 |
import scipp as sc
import numpy as np
import operator
def test_type():
variable_slice = sc.Variable(dims=['x'], values=np.arange(1, 10,
dtype=float))['x', :]
assert type(variable_slice) == sc.Variable
def test_astype():
variable_slice = sc.Variable(dims=['x'], values=np.arange(1, 10,
dtype=np.int64))['x', :]
assert variable_slice.dtype == sc.DType.int64
var_as_float = variable_slice.astype(sc.DType.float32)
assert var_as_float.dtype == sc.DType.float32
def apply_test_op(op, a, b, data):
op(a, b)
# Assume numpy operations are correct as comparator
op(data, b.values)
assert np.array_equal(a.values, data)
def test_binary_operations():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
_b = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
b = _b['x', :]
data = np.copy(a.values)
c = a + b
assert type(c) == sc.Variable
assert np.array_equal(c.values, data + data)
c = a - b
assert np.array_equal(c.values, data - data)
c = a * b
assert np.array_equal(c.values, data * data)
c = a / b
assert np.array_equal(c.values, data / data)
apply_test_op(operator.iadd, a, b, data)
apply_test_op(operator.isub, a, b, data)
apply_test_op(operator.imul, a, b, data)
apply_test_op(operator.itruediv, a, b, data)
def test_binary_float_operations():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
data = np.copy(a.values)
c = a + 2.0
assert np.array_equal(c.values, data + 2.0)
c = a - 2.0
assert np.array_equal(c.values, data - 2.0)
c = a * 2.0
assert np.array_equal(c.values, data * 2.0)
c = a / 2.0
assert np.array_equal(c.values, data / 2.0)
c = 2.0 + a
assert np.array_equal(c.values, data + 2.0)
c = 2.0 - a
assert np.array_equal(c.values, 2.0 - data)
c = 2.0 * a
assert np.array_equal(c.values, data * 2.0)
def test_equal_not_equal():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
_b = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
b = _b['x', :]
c = a + 2.0
assert sc.identical(a, b)
assert sc.identical(b, a)
assert not sc.identical(a, c)
assert not sc.identical(c, a)
def test_correct_temporaries():
v = sc.Variable(dims=['x'], values=np.arange(100.0))
b = sc.sqrt(v)['x', 0:10]
assert len(b.values) == 10
b = b['x', 2:5]
assert len(b.values) == 3 | tests/variable_view_test.py | import scipp as sc
import numpy as np
import operator
def test_type():
variable_slice = sc.Variable(dims=['x'], values=np.arange(1, 10,
dtype=float))['x', :]
assert type(variable_slice) == sc.Variable
def test_astype():
variable_slice = sc.Variable(dims=['x'], values=np.arange(1, 10,
dtype=np.int64))['x', :]
assert variable_slice.dtype == sc.DType.int64
var_as_float = variable_slice.astype(sc.DType.float32)
assert var_as_float.dtype == sc.DType.float32
def apply_test_op(op, a, b, data):
op(a, b)
# Assume numpy operations are correct as comparator
op(data, b.values)
assert np.array_equal(a.values, data)
def test_binary_operations():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
_b = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
b = _b['x', :]
data = np.copy(a.values)
c = a + b
assert type(c) == sc.Variable
assert np.array_equal(c.values, data + data)
c = a - b
assert np.array_equal(c.values, data - data)
c = a * b
assert np.array_equal(c.values, data * data)
c = a / b
assert np.array_equal(c.values, data / data)
apply_test_op(operator.iadd, a, b, data)
apply_test_op(operator.isub, a, b, data)
apply_test_op(operator.imul, a, b, data)
apply_test_op(operator.itruediv, a, b, data)
def test_binary_float_operations():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
data = np.copy(a.values)
c = a + 2.0
assert np.array_equal(c.values, data + 2.0)
c = a - 2.0
assert np.array_equal(c.values, data - 2.0)
c = a * 2.0
assert np.array_equal(c.values, data * 2.0)
c = a / 2.0
assert np.array_equal(c.values, data / 2.0)
c = 2.0 + a
assert np.array_equal(c.values, data + 2.0)
c = 2.0 - a
assert np.array_equal(c.values, 2.0 - data)
c = 2.0 * a
assert np.array_equal(c.values, data * 2.0)
def test_equal_not_equal():
_a = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
_b = sc.Variable(dims=['x'], values=np.arange(1, 10, dtype=float))
a = _a['x', :]
b = _b['x', :]
c = a + 2.0
assert sc.identical(a, b)
assert sc.identical(b, a)
assert not sc.identical(a, c)
assert not sc.identical(c, a)
def test_correct_temporaries():
v = sc.Variable(dims=['x'], values=np.arange(100.0))
b = sc.sqrt(v)['x', 0:10]
assert len(b.values) == 10
b = b['x', 2:5]
assert len(b.values) == 3 | 0.625209 | 0.870101 |
import logging
import os
import subprocess
import time
import luigi
from luigi.task import MixinNaiveBulkComplete
from spotify_tensorflow.luigi.utils import get_uri, run_with_logging
logger = logging.getLogger("luigi-interface")
class PythonDataflowTask(MixinNaiveBulkComplete, luigi.Task):
""""Luigi wrapper for a dataflow job
The following properties can be set:
python_script = None # Python script for the dataflow task.
project = None # Name of the project owning the dataflow job.
staging_location = None # GCS path for staging code packages needed by workers.
zone = None # GCE availability zone for launching workers.
region = None # GCE region for creating the dataflow job.
temp_location = None # GCS path for saving temporary workflow jobs.
num_workers = None # The number of workers to start the task with.
autoscaling_algorithm = None # Set to "NONE" to disable autoscaling. `num_workers`
# will then be used for the job.
max_num_workers = None # Used if the autoscaling is enabled.
network = None # Network in GCE to be used for launching workers.
subnetwork = None # Subnetwork in GCE to be used for launching workers.
disk_size_gb = None # Remote worker disk size, if not defined uses default size.
worker_machine_type = None # Machine type to create Dataflow worker VMs. If unset,
# the Dataflow service will choose a reasonable default.
worker_disk_type = None # Specify SSD for local disk or defaults to hard disk.
service_account = None # Service account of Dataflow VMs/workers. Default is a
default GCE service account.
job_name = None # Name of the dataflow job
requirements_file = None # Path to a requirements file containing package dependencies.
local_runner = False # If local_runner = True, the job uses DirectRunner,
otherwise it uses DataflowRunner
setup_file = None # Path to a setup Python file containing package dependencies.
:Example:
class AwesomeJob(PythonDataflowJobTask):
python_script = "/path/to/python_script"
project = "gcp-project"
staging_location = "gs://gcp-project-playground/user/staging"
temp_location = "gs://gcp-project-playground/user/tmp"
max_num_workers = 20
region = "europe-west1"
service_account_email = "<EMAIL>"
def output(self):
...
"""
# Required dataflow args
python_script = None # type: str
project = None # type: str
staging_location = None # type: str
# Dataflow requires one and only one of:
zone = None # type: str
region = None # type: str
# Optional dataflow args
temp_location = None # type: str
num_workers = None # type: int
autoscaling_algorithm = None # type: str
max_num_workers = None # type: int
network = None # type: str
subnetwork = None # type: str
disk_size_gb = None # type: int
worker_machine_type = None # type: str
worker_disk_type = None # type: str
service_account = None # type: str
job_name = None # type: str
requirements_file = None # type: str
local_runner = False # type: bool
setup_file = None # type: str
def __init__(self, *args, **kwargs):
super(PythonDataflowTask, self).__init__(*args, **kwargs)
self._output = self.output()
if isinstance(self._output, luigi.Target):
self._output = {"output": self._output}
if self.job_name is None:
# job_name must consist of only the characters [-a-z0-9]
cls_name = self.__class__.__name__.replace("_", "-").lower()
self.job_name = "{cls_name}-{timestamp}".format(cls_name=cls_name,
timestamp=str(time.time())[:-3])
def on_successful_run(self):
""" Callback that gets called right after the dataflow job has finished successfully but
before validate_output is run.
"""
pass
def validate_output(self):
""" Callback that can be used to validate your output before it is moved to it's final
location. Returning false here will cause the job to fail, and output to be removed instead
of published.
:return: Whether the output is valid or not
:rtype: Boolean
"""
return True
def file_pattern(self):
""" If one/some of the input target files are not in the pattern of part-*,
we can add the key of the required target and the correct file pattern
that should be appended in the command line here. If the input target key is not found
in this dict, the file pattern will be assumed to be part-* for that target.
:return A dictionary of overrided file pattern that is not part-* for the inputs
:rtype: Dict of String to String
"""
return {}
def run(self):
cmd_line = self._mk_cmd_line()
logger.info(" ".join(cmd_line))
try:
run_with_logging(cmd_line, logger)
except subprocess.CalledProcessError as e:
logging.error(e, exc_info=True)
# exit luigi with the same exit code as the python dataflow job proccess
# In this way users can easily exit the job with code 50 to avoid Styx retries
# https://github.com/spotify/styx/blob/master/doc/design-overview.md#workflow-state-graph
os._exit(e.returncode)
self.on_successful_run()
if self.validate_output():
self._publish_outputs()
else:
raise ValueError("Output is not valid")
def _publish_outputs(self):
for (name, target) in self._output.items():
if hasattr(target, "publish"):
target.publish(self._output_uris[name])
def _mk_cmd_line(self):
cmd_line = self._dataflow_executable()
cmd_line.extend(self._get_dataflow_args())
cmd_line.extend(self._get_input_args())
cmd_line.extend(self._get_output_args())
cmd_line.extend(self.args())
return cmd_line
def _dataflow_executable(self):
"""
Defines the executable used to run the python dataflow job.
"""
return ["python", self.python_script]
def _get_input_uri(self, file_pattern, target):
uri = get_uri(target)
uri = uri.rstrip("/") + "/" + file_pattern
return uri
def _get_file_pattern(self):
file_pattern = self.file_pattern()
if not isinstance(file_pattern, dict):
raise ValueError("file_pattern() must return a dict type")
return file_pattern
def _get_input_args(self):
"""
Collects outputs from requires() and converts them to input arguments.
file_pattern() is called to construct input file path glob with default value "part-*"
"""
job_input = self.input()
if isinstance(job_input, luigi.Target):
job_input = {"input": job_input}
if not isinstance(job_input, dict):
raise ValueError("Input (requires()) must be dict type")
input_args = []
file_pattern_dict = self._get_file_pattern()
for (name, targets) in job_input.items():
uri_targets = luigi.task.flatten(targets)
pattern = file_pattern_dict.get(name, "part-*")
uris = [self._get_input_uri(pattern, uri_target) for uri_target in uri_targets]
if isinstance(targets, dict):
# If targets is a dict that means it had multiple outputs.
# Make the input args in that case "<input key>-<task output key>"
names = ["%s-%s" % (name, key) for key in targets.keys()]
else:
names = [name] * len(uris)
for (arg_name, uri) in zip(names, uris):
input_args.append("--%s=%s" % (arg_name, uri))
return input_args
def _get_output_args(self):
if not isinstance(self._output, dict):
raise ValueError("Output must be dict type")
output_args = []
self._output_uris = {}
for (name, target) in self._output.items():
uri = target.generate_uri() if hasattr(target, "generate_uri") else get_uri(target)
uri = uri.rstrip("/")
output_args.append("--%s=%s" % (name, uri))
self._output_uris[name] = uri
return output_args
def _get_runner(self):
return "DirectRunner" if self.local_runner else "DataflowRunner"
def _get_dataflow_args(self):
dataflow_args = []
_runner = self._get_runner()
if _runner:
dataflow_args += ["--runner={}".format(_runner)]
if self.project:
dataflow_args += ["--project={}".format(self.project)]
if self.staging_location:
dataflow_args += ["--staging_location={}".format(self.staging_location)]
if self.zone:
dataflow_args += ["--zone={}".format(self.zone)]
if self.region:
dataflow_args += ["--region={}".format(self.region)]
if self.temp_location:
dataflow_args += ["--temp_location={}".format(self.temp_location)]
if self.num_workers:
dataflow_args += ["--num_workers={}".format(self.num_workers)]
if self.autoscaling_algorithm:
dataflow_args += ["--autoscaling_algorithm={}".format(self.autoscaling_algorithm)]
if self.max_num_workers:
dataflow_args += ["--max_num_workers={}".format(self.max_num_workers)]
if self.network:
dataflow_args += ["--network={}".format(self.network)]
if self.subnetwork:
dataflow_args += ["--subnetwork={}".format(self.subnetwork)]
if self.disk_size_gb:
dataflow_args += ["--disk_size_gb={}".format(self.disk_size_gb)]
if self.worker_machine_type:
dataflow_args += ["--worker_machine_type={}".format(self.worker_machine_type)]
if self.job_name:
dataflow_args += ["--job_name={}".format(self.job_name)]
if self.worker_disk_type:
dataflow_args += ["--worker_disk_type={}".format(self.worker_disk_type)]
if self.service_account:
dataflow_args += ["--service_account_email={}".format(self.service_account)]
if self.requirements_file:
dataflow_args += ["--requirements_file={}".format(self.requirements_file)]
if self.setup_file:
dataflow_args += ["--setup_file={}".format(self.setup_file)]
return dataflow_args
def args(self):
""" Extra arguments that will be passed to your dataflow job.
Example:
return ["--project=my-gcp-project",
"--zone=a-zone",
"--staging_location=gs://my-gcp-project/dataflow"]
Note that:
* You "set" args by overriding this method in your subclass.
* This function should return an iterable of strings.
"""
return []
def get_output_uris(self):
""" Returns a dictionary that contains output uris.
The key is the name of the output target defined in output(), and the value is
the path/uri of the output target. It can be used to write data to different sub directories
under one output target.
:return A dictionary of output uris
:rtype: Dict of String to String
"""
return self._output_uris | spotify_tensorflow/luigi/python_dataflow_task.py |
import logging
import os
import subprocess
import time
import luigi
from luigi.task import MixinNaiveBulkComplete
from spotify_tensorflow.luigi.utils import get_uri, run_with_logging
logger = logging.getLogger("luigi-interface")
class PythonDataflowTask(MixinNaiveBulkComplete, luigi.Task):
""""Luigi wrapper for a dataflow job
The following properties can be set:
python_script = None # Python script for the dataflow task.
project = None # Name of the project owning the dataflow job.
staging_location = None # GCS path for staging code packages needed by workers.
zone = None # GCE availability zone for launching workers.
region = None # GCE region for creating the dataflow job.
temp_location = None # GCS path for saving temporary workflow jobs.
num_workers = None # The number of workers to start the task with.
autoscaling_algorithm = None # Set to "NONE" to disable autoscaling. `num_workers`
# will then be used for the job.
max_num_workers = None # Used if the autoscaling is enabled.
network = None # Network in GCE to be used for launching workers.
subnetwork = None # Subnetwork in GCE to be used for launching workers.
disk_size_gb = None # Remote worker disk size, if not defined uses default size.
worker_machine_type = None # Machine type to create Dataflow worker VMs. If unset,
# the Dataflow service will choose a reasonable default.
worker_disk_type = None # Specify SSD for local disk or defaults to hard disk.
service_account = None # Service account of Dataflow VMs/workers. Default is a
default GCE service account.
job_name = None # Name of the dataflow job
requirements_file = None # Path to a requirements file containing package dependencies.
local_runner = False # If local_runner = True, the job uses DirectRunner,
otherwise it uses DataflowRunner
setup_file = None # Path to a setup Python file containing package dependencies.
:Example:
class AwesomeJob(PythonDataflowJobTask):
python_script = "/path/to/python_script"
project = "gcp-project"
staging_location = "gs://gcp-project-playground/user/staging"
temp_location = "gs://gcp-project-playground/user/tmp"
max_num_workers = 20
region = "europe-west1"
service_account_email = "<EMAIL>"
def output(self):
...
"""
# Required dataflow args
python_script = None # type: str
project = None # type: str
staging_location = None # type: str
# Dataflow requires one and only one of:
zone = None # type: str
region = None # type: str
# Optional dataflow args
temp_location = None # type: str
num_workers = None # type: int
autoscaling_algorithm = None # type: str
max_num_workers = None # type: int
network = None # type: str
subnetwork = None # type: str
disk_size_gb = None # type: int
worker_machine_type = None # type: str
worker_disk_type = None # type: str
service_account = None # type: str
job_name = None # type: str
requirements_file = None # type: str
local_runner = False # type: bool
setup_file = None # type: str
def __init__(self, *args, **kwargs):
super(PythonDataflowTask, self).__init__(*args, **kwargs)
self._output = self.output()
if isinstance(self._output, luigi.Target):
self._output = {"output": self._output}
if self.job_name is None:
# job_name must consist of only the characters [-a-z0-9]
cls_name = self.__class__.__name__.replace("_", "-").lower()
self.job_name = "{cls_name}-{timestamp}".format(cls_name=cls_name,
timestamp=str(time.time())[:-3])
def on_successful_run(self):
""" Callback that gets called right after the dataflow job has finished successfully but
before validate_output is run.
"""
pass
def validate_output(self):
""" Callback that can be used to validate your output before it is moved to it's final
location. Returning false here will cause the job to fail, and output to be removed instead
of published.
:return: Whether the output is valid or not
:rtype: Boolean
"""
return True
def file_pattern(self):
""" If one/some of the input target files are not in the pattern of part-*,
we can add the key of the required target and the correct file pattern
that should be appended in the command line here. If the input target key is not found
in this dict, the file pattern will be assumed to be part-* for that target.
:return A dictionary of overrided file pattern that is not part-* for the inputs
:rtype: Dict of String to String
"""
return {}
def run(self):
cmd_line = self._mk_cmd_line()
logger.info(" ".join(cmd_line))
try:
run_with_logging(cmd_line, logger)
except subprocess.CalledProcessError as e:
logging.error(e, exc_info=True)
# exit luigi with the same exit code as the python dataflow job proccess
# In this way users can easily exit the job with code 50 to avoid Styx retries
# https://github.com/spotify/styx/blob/master/doc/design-overview.md#workflow-state-graph
os._exit(e.returncode)
self.on_successful_run()
if self.validate_output():
self._publish_outputs()
else:
raise ValueError("Output is not valid")
def _publish_outputs(self):
for (name, target) in self._output.items():
if hasattr(target, "publish"):
target.publish(self._output_uris[name])
def _mk_cmd_line(self):
cmd_line = self._dataflow_executable()
cmd_line.extend(self._get_dataflow_args())
cmd_line.extend(self._get_input_args())
cmd_line.extend(self._get_output_args())
cmd_line.extend(self.args())
return cmd_line
def _dataflow_executable(self):
"""
Defines the executable used to run the python dataflow job.
"""
return ["python", self.python_script]
def _get_input_uri(self, file_pattern, target):
uri = get_uri(target)
uri = uri.rstrip("/") + "/" + file_pattern
return uri
def _get_file_pattern(self):
file_pattern = self.file_pattern()
if not isinstance(file_pattern, dict):
raise ValueError("file_pattern() must return a dict type")
return file_pattern
def _get_input_args(self):
"""
Collects outputs from requires() and converts them to input arguments.
file_pattern() is called to construct input file path glob with default value "part-*"
"""
job_input = self.input()
if isinstance(job_input, luigi.Target):
job_input = {"input": job_input}
if not isinstance(job_input, dict):
raise ValueError("Input (requires()) must be dict type")
input_args = []
file_pattern_dict = self._get_file_pattern()
for (name, targets) in job_input.items():
uri_targets = luigi.task.flatten(targets)
pattern = file_pattern_dict.get(name, "part-*")
uris = [self._get_input_uri(pattern, uri_target) for uri_target in uri_targets]
if isinstance(targets, dict):
# If targets is a dict that means it had multiple outputs.
# Make the input args in that case "<input key>-<task output key>"
names = ["%s-%s" % (name, key) for key in targets.keys()]
else:
names = [name] * len(uris)
for (arg_name, uri) in zip(names, uris):
input_args.append("--%s=%s" % (arg_name, uri))
return input_args
def _get_output_args(self):
if not isinstance(self._output, dict):
raise ValueError("Output must be dict type")
output_args = []
self._output_uris = {}
for (name, target) in self._output.items():
uri = target.generate_uri() if hasattr(target, "generate_uri") else get_uri(target)
uri = uri.rstrip("/")
output_args.append("--%s=%s" % (name, uri))
self._output_uris[name] = uri
return output_args
def _get_runner(self):
return "DirectRunner" if self.local_runner else "DataflowRunner"
def _get_dataflow_args(self):
dataflow_args = []
_runner = self._get_runner()
if _runner:
dataflow_args += ["--runner={}".format(_runner)]
if self.project:
dataflow_args += ["--project={}".format(self.project)]
if self.staging_location:
dataflow_args += ["--staging_location={}".format(self.staging_location)]
if self.zone:
dataflow_args += ["--zone={}".format(self.zone)]
if self.region:
dataflow_args += ["--region={}".format(self.region)]
if self.temp_location:
dataflow_args += ["--temp_location={}".format(self.temp_location)]
if self.num_workers:
dataflow_args += ["--num_workers={}".format(self.num_workers)]
if self.autoscaling_algorithm:
dataflow_args += ["--autoscaling_algorithm={}".format(self.autoscaling_algorithm)]
if self.max_num_workers:
dataflow_args += ["--max_num_workers={}".format(self.max_num_workers)]
if self.network:
dataflow_args += ["--network={}".format(self.network)]
if self.subnetwork:
dataflow_args += ["--subnetwork={}".format(self.subnetwork)]
if self.disk_size_gb:
dataflow_args += ["--disk_size_gb={}".format(self.disk_size_gb)]
if self.worker_machine_type:
dataflow_args += ["--worker_machine_type={}".format(self.worker_machine_type)]
if self.job_name:
dataflow_args += ["--job_name={}".format(self.job_name)]
if self.worker_disk_type:
dataflow_args += ["--worker_disk_type={}".format(self.worker_disk_type)]
if self.service_account:
dataflow_args += ["--service_account_email={}".format(self.service_account)]
if self.requirements_file:
dataflow_args += ["--requirements_file={}".format(self.requirements_file)]
if self.setup_file:
dataflow_args += ["--setup_file={}".format(self.setup_file)]
return dataflow_args
def args(self):
""" Extra arguments that will be passed to your dataflow job.
Example:
return ["--project=my-gcp-project",
"--zone=a-zone",
"--staging_location=gs://my-gcp-project/dataflow"]
Note that:
* You "set" args by overriding this method in your subclass.
* This function should return an iterable of strings.
"""
return []
def get_output_uris(self):
""" Returns a dictionary that contains output uris.
The key is the name of the output target defined in output(), and the value is
the path/uri of the output target. It can be used to write data to different sub directories
under one output target.
:return A dictionary of output uris
:rtype: Dict of String to String
"""
return self._output_uris | 0.556761 | 0.237024 |
import numpy as np
from mealpy.optimizer import Optimizer
class BaseCSA(Optimizer):
"""
The original version of: Cuckoo Search Algorithm (CSA)
(Cuckoo search via Levy flights)
Link:
https://doi.org/10.1109/NABIC.2009.5393690
"""
def __init__(self, problem, epoch=10000, pop_size=100, p_a=0.3, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
p_a (float): probability a
**kwargs ():
"""
super().__init__(problem, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.p_a = p_a
self.n_cut = int(self.p_a * self.pop_size)
self.nfe_per_epoch = self.pop_size + self.n_cut
self.sort_flag = False
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for i in range(0, self.pop_size):
## Generate levy-flight solution
levy_step = self.get_levy_flight_step(multiplier=0.001, case=-1)
pos_new = self.pop[i][self.ID_POS] + 1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * \
levy_step * (self.pop[i][self.ID_POS] - self.g_best[self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
list_idx_rand = np.random.choice(list(range(0, self.pop_size)), self.pop_size, replace=True)
for idx in range(self.pop_size):
if self.compare_agent(self.pop[list_idx_rand[idx]], pop_new[idx]):
pop_new[idx] = self.pop[list_idx_rand[idx]].copy()
## Abandoned some worst nests
pop = self.get_sorted_strim_population(pop_new, self.pop_size)
pop_new = []
for i in range(0, self.n_cut):
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
self.pop = pop[:(self.pop_size-self.n_cut)] + pop_new | mealpy/swarm_based/CSA.py |
import numpy as np
from mealpy.optimizer import Optimizer
class BaseCSA(Optimizer):
"""
The original version of: Cuckoo Search Algorithm (CSA)
(Cuckoo search via Levy flights)
Link:
https://doi.org/10.1109/NABIC.2009.5393690
"""
def __init__(self, problem, epoch=10000, pop_size=100, p_a=0.3, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
p_a (float): probability a
**kwargs ():
"""
super().__init__(problem, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.p_a = p_a
self.n_cut = int(self.p_a * self.pop_size)
self.nfe_per_epoch = self.pop_size + self.n_cut
self.sort_flag = False
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for i in range(0, self.pop_size):
## Generate levy-flight solution
levy_step = self.get_levy_flight_step(multiplier=0.001, case=-1)
pos_new = self.pop[i][self.ID_POS] + 1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * \
levy_step * (self.pop[i][self.ID_POS] - self.g_best[self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
list_idx_rand = np.random.choice(list(range(0, self.pop_size)), self.pop_size, replace=True)
for idx in range(self.pop_size):
if self.compare_agent(self.pop[list_idx_rand[idx]], pop_new[idx]):
pop_new[idx] = self.pop[list_idx_rand[idx]].copy()
## Abandoned some worst nests
pop = self.get_sorted_strim_population(pop_new, self.pop_size)
pop_new = []
for i in range(0, self.n_cut):
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
self.pop = pop[:(self.pop_size-self.n_cut)] + pop_new | 0.741393 | 0.274722 |
from collections import OrderedDict
from copy import deepcopy
import datetime
import os
from scrapy import signals
from scrapy.crawler import CrawlerRunner, Crawler
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Request
from twisted.web.error import Error
from twisted.internet import defer
from . import log
from .conf import settings
from .conf.spider_settings import get_scrapyrt_settings, get_project_settings
from .decorators import deprecated
from .log import setup_spider_logging
class ScrapyrtCrawler(Crawler):
"""Main and only difference from base class -
ScrapyrtCrawler allows us to call or not call
start_requests.
https://github.com/scrapy/scrapy/blob/master/scrapy/crawler.py#L52
TODO: PR to scrapy - ability to set start_requests here.
"""
def __init__(self, spidercls, crawler_settings, start_requests=False):
super(ScrapyrtCrawler, self).__init__(spidercls, crawler_settings)
self.start_requests = start_requests
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
if self.start_requests:
start_requests = iter(self.spider.start_requests())
else:
start_requests = ()
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
class ScrapyrtCrawlerProcess(CrawlerRunner):
def __init__(self, settings, scrapyrt_manager):
super(ScrapyrtCrawlerProcess, self).__init__(settings)
self.scrapyrt_manager = scrapyrt_manager
def crawl(self, spidercls, *args, **kwargs):
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
for kw in kwargs:
attr_or_m = getattr(spidercls, kw, None)
if attr_or_m and callable(attr_or_m):
msg = 'Crawl argument cannot override spider method.'
msg += ' Got argument {} that overrides spider method {}'
raise Error('400', message=msg.format(kw, getattr(spidercls, kw)))
# creating our own crawler that will allow us to disable start requests easily
crawler = ScrapyrtCrawler(
spidercls, self.settings, self.scrapyrt_manager.start_requests)
self.scrapyrt_manager.crawler = crawler
# Connecting signals to handlers that control crawl process
crawler.signals.connect(self.scrapyrt_manager.get_item,
signals.item_scraped)
crawler.signals.connect(self.scrapyrt_manager.collect_dropped,
signals.item_dropped)
crawler.signals.connect(self.scrapyrt_manager.spider_idle,
signals.spider_idle)
crawler.signals.connect(self.scrapyrt_manager.handle_spider_error,
signals.spider_error)
crawler.signals.connect(self.scrapyrt_manager.handle_scheduling,
signals.request_scheduled)
dfd = super(ScrapyrtCrawlerProcess, self).crawl(crawler, *args, **kwargs)
_cleanup_handler = setup_spider_logging(crawler.spider, self.settings)
def cleanup_logging(result):
_cleanup_handler()
return result
return dfd.addBoth(cleanup_logging)
class CrawlManager(object):
"""
Runs crawls
"""
def __init__(self, spider_name, request_kwargs, max_requests=None, start_requests=False):
self.spider_name = spider_name
self.log_dir = settings.LOG_DIR
self.items = []
self.items_dropped = []
self.errors = []
self.max_requests = int(max_requests) if max_requests else None
self.timeout_limit = int(settings.TIMEOUT_LIMIT)
self.request_count = 0
self.debug = settings.DEBUG
self.crawler_process = None
self.crawler = None
# callback will be added after instantiation of crawler object
# because we need to know if spider has method available
self.callback_name = request_kwargs.pop('callback', None) or 'parse'
# do the same for errback
self.errback_name = request_kwargs.pop('errback', None) or 'parse'
if request_kwargs.get("url"):
self.request = self.create_spider_request(deepcopy(request_kwargs))
else:
self.request = None
self.start_requests = start_requests
self._request_scheduled = False
def crawl(self, *args, **kwargs):
self.crawler_process = ScrapyrtCrawlerProcess(
self.get_project_settings(), self)
try:
dfd = self.crawler_process.crawl(self.spider_name, *args, **kwargs)
except KeyError as e:
# Spider not found.
raise Error('404', message=str(e))
dfd.addCallback(self.return_items)
return dfd
def _get_log_file_path(self):
log_dir = os.path.join(self.log_dir, self.spider_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
time_format = settings.SPIDER_LOG_FILE_TIMEFORMAT
filename = datetime.datetime.now().strftime(time_format) + '.log'
return os.path.join(log_dir, filename)
def get_project_settings(self):
# set logfile for a job
log_file = self._get_log_file_path()
custom_settings = get_scrapyrt_settings(log_file=log_file)
return get_project_settings(custom_settings=custom_settings)
@deprecated(use_instead='.crawl()')
def create_crawler(self, **kwargs):
return self.crawl()
def spider_idle(self, spider):
"""Handler of spider_idle signal.
Schedule request for url given to api, with optional callback
and errback that can be passed as GET parameter.
spider_idle signal is used because we want to optionally enable
start_requests for the spider and if request is scheduled in
spider_opened signal handler it's fired earlier then start_requests
which is totally wrong.
"""
if spider is self.crawler.spider and self.request and not self._request_scheduled:
callback = getattr(self.crawler.spider, self.callback_name)
assert callable(callback), 'Invalid callback'
self.request = self.request.replace(callback=callback)
errback = getattr(self.crawler.spider, self.errback_name)
assert callable(errback), 'Invalid errback'
self.request = self.request.replace(errback=errback)
modify_request = getattr(
self.crawler.spider, "modify_realtime_request", None)
if callable(modify_request):
self.request = modify_request(self.request)
spider.crawler.engine.crawl(self.request, spider)
self._request_scheduled = True
raise DontCloseSpider
def handle_scheduling(self, request, spider):
"""Handler of request_scheduled signal.
For every scheduled request check if number of requests is less
then limit and runtime doesn't exceed limit as well.
"""
if spider is self.crawler.spider:
self.limit_requests(spider)
self.limit_runtime(spider)
def limit_runtime(self, spider):
"""Stop crawl if it takes too long."""
start_time = self.crawler.stats.get_value("start_time")
time_now = datetime.datetime.utcnow()
if (time_now - start_time).seconds >= self.timeout_limit:
spider.crawler.engine.close_spider(spider, reason="timeout")
def limit_requests(self, spider):
"""Stop crawl after reaching max_requests."""
if self.max_requests and self.max_requests <= self.request_count:
reason = "stop generating requests, only {} requests allowed".format(
self.max_requests)
spider.crawler.engine.close_spider(spider, reason=reason)
else:
self.request_count += 1
def handle_spider_error(self, failure, spider):
if spider is self.crawler.spider and self.debug:
fail_data = failure.getTraceback()
self.errors.append(fail_data)
def get_item(self, item, response, spider):
if spider is self.crawler.spider:
self.items.append(item)
def collect_dropped(self, item, response, exception, spider):
if spider is self.crawler.spider:
self.items_dropped.append({
"item": item,
"exception": str(exception),
"response": response
})
def return_items(self, result):
stats = self.crawler.stats.get_stats()
stats = OrderedDict((k, v) for k, v in sorted(stats.items()))
results = {
"items": self.items,
"items_dropped": self.items_dropped,
"stats": stats,
"spider_name": self.spider_name,
}
if self.debug:
results["errors"] = self.errors
return results
def create_spider_request(self, kwargs):
url = kwargs.pop('url')
try:
req = Request(url, **kwargs)
except (TypeError, ValueError) as e:
msg = "Error while creating Scrapy Request, {}"
message = msg.format(str(e))
raise Error('400', message=message)
req.dont_filter = True
msg = u"Created request for spider {} with url {} and kwargs {}"
msg = msg.format(self.spider_name, url, repr(kwargs))
log.msg(msg)
return req | scrapyrt/core.py | from collections import OrderedDict
from copy import deepcopy
import datetime
import os
from scrapy import signals
from scrapy.crawler import CrawlerRunner, Crawler
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Request
from twisted.web.error import Error
from twisted.internet import defer
from . import log
from .conf import settings
from .conf.spider_settings import get_scrapyrt_settings, get_project_settings
from .decorators import deprecated
from .log import setup_spider_logging
class ScrapyrtCrawler(Crawler):
"""Main and only difference from base class -
ScrapyrtCrawler allows us to call or not call
start_requests.
https://github.com/scrapy/scrapy/blob/master/scrapy/crawler.py#L52
TODO: PR to scrapy - ability to set start_requests here.
"""
def __init__(self, spidercls, crawler_settings, start_requests=False):
super(ScrapyrtCrawler, self).__init__(spidercls, crawler_settings)
self.start_requests = start_requests
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
if self.start_requests:
start_requests = iter(self.spider.start_requests())
else:
start_requests = ()
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
class ScrapyrtCrawlerProcess(CrawlerRunner):
def __init__(self, settings, scrapyrt_manager):
super(ScrapyrtCrawlerProcess, self).__init__(settings)
self.scrapyrt_manager = scrapyrt_manager
def crawl(self, spidercls, *args, **kwargs):
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
for kw in kwargs:
attr_or_m = getattr(spidercls, kw, None)
if attr_or_m and callable(attr_or_m):
msg = 'Crawl argument cannot override spider method.'
msg += ' Got argument {} that overrides spider method {}'
raise Error('400', message=msg.format(kw, getattr(spidercls, kw)))
# creating our own crawler that will allow us to disable start requests easily
crawler = ScrapyrtCrawler(
spidercls, self.settings, self.scrapyrt_manager.start_requests)
self.scrapyrt_manager.crawler = crawler
# Connecting signals to handlers that control crawl process
crawler.signals.connect(self.scrapyrt_manager.get_item,
signals.item_scraped)
crawler.signals.connect(self.scrapyrt_manager.collect_dropped,
signals.item_dropped)
crawler.signals.connect(self.scrapyrt_manager.spider_idle,
signals.spider_idle)
crawler.signals.connect(self.scrapyrt_manager.handle_spider_error,
signals.spider_error)
crawler.signals.connect(self.scrapyrt_manager.handle_scheduling,
signals.request_scheduled)
dfd = super(ScrapyrtCrawlerProcess, self).crawl(crawler, *args, **kwargs)
_cleanup_handler = setup_spider_logging(crawler.spider, self.settings)
def cleanup_logging(result):
_cleanup_handler()
return result
return dfd.addBoth(cleanup_logging)
class CrawlManager(object):
"""
Runs crawls
"""
def __init__(self, spider_name, request_kwargs, max_requests=None, start_requests=False):
self.spider_name = spider_name
self.log_dir = settings.LOG_DIR
self.items = []
self.items_dropped = []
self.errors = []
self.max_requests = int(max_requests) if max_requests else None
self.timeout_limit = int(settings.TIMEOUT_LIMIT)
self.request_count = 0
self.debug = settings.DEBUG
self.crawler_process = None
self.crawler = None
# callback will be added after instantiation of crawler object
# because we need to know if spider has method available
self.callback_name = request_kwargs.pop('callback', None) or 'parse'
# do the same for errback
self.errback_name = request_kwargs.pop('errback', None) or 'parse'
if request_kwargs.get("url"):
self.request = self.create_spider_request(deepcopy(request_kwargs))
else:
self.request = None
self.start_requests = start_requests
self._request_scheduled = False
def crawl(self, *args, **kwargs):
self.crawler_process = ScrapyrtCrawlerProcess(
self.get_project_settings(), self)
try:
dfd = self.crawler_process.crawl(self.spider_name, *args, **kwargs)
except KeyError as e:
# Spider not found.
raise Error('404', message=str(e))
dfd.addCallback(self.return_items)
return dfd
def _get_log_file_path(self):
log_dir = os.path.join(self.log_dir, self.spider_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
time_format = settings.SPIDER_LOG_FILE_TIMEFORMAT
filename = datetime.datetime.now().strftime(time_format) + '.log'
return os.path.join(log_dir, filename)
def get_project_settings(self):
# set logfile for a job
log_file = self._get_log_file_path()
custom_settings = get_scrapyrt_settings(log_file=log_file)
return get_project_settings(custom_settings=custom_settings)
@deprecated(use_instead='.crawl()')
def create_crawler(self, **kwargs):
return self.crawl()
def spider_idle(self, spider):
"""Handler of spider_idle signal.
Schedule request for url given to api, with optional callback
and errback that can be passed as GET parameter.
spider_idle signal is used because we want to optionally enable
start_requests for the spider and if request is scheduled in
spider_opened signal handler it's fired earlier then start_requests
which is totally wrong.
"""
if spider is self.crawler.spider and self.request and not self._request_scheduled:
callback = getattr(self.crawler.spider, self.callback_name)
assert callable(callback), 'Invalid callback'
self.request = self.request.replace(callback=callback)
errback = getattr(self.crawler.spider, self.errback_name)
assert callable(errback), 'Invalid errback'
self.request = self.request.replace(errback=errback)
modify_request = getattr(
self.crawler.spider, "modify_realtime_request", None)
if callable(modify_request):
self.request = modify_request(self.request)
spider.crawler.engine.crawl(self.request, spider)
self._request_scheduled = True
raise DontCloseSpider
def handle_scheduling(self, request, spider):
"""Handler of request_scheduled signal.
For every scheduled request check if number of requests is less
then limit and runtime doesn't exceed limit as well.
"""
if spider is self.crawler.spider:
self.limit_requests(spider)
self.limit_runtime(spider)
def limit_runtime(self, spider):
"""Stop crawl if it takes too long."""
start_time = self.crawler.stats.get_value("start_time")
time_now = datetime.datetime.utcnow()
if (time_now - start_time).seconds >= self.timeout_limit:
spider.crawler.engine.close_spider(spider, reason="timeout")
def limit_requests(self, spider):
"""Stop crawl after reaching max_requests."""
if self.max_requests and self.max_requests <= self.request_count:
reason = "stop generating requests, only {} requests allowed".format(
self.max_requests)
spider.crawler.engine.close_spider(spider, reason=reason)
else:
self.request_count += 1
def handle_spider_error(self, failure, spider):
if spider is self.crawler.spider and self.debug:
fail_data = failure.getTraceback()
self.errors.append(fail_data)
def get_item(self, item, response, spider):
if spider is self.crawler.spider:
self.items.append(item)
def collect_dropped(self, item, response, exception, spider):
if spider is self.crawler.spider:
self.items_dropped.append({
"item": item,
"exception": str(exception),
"response": response
})
def return_items(self, result):
stats = self.crawler.stats.get_stats()
stats = OrderedDict((k, v) for k, v in sorted(stats.items()))
results = {
"items": self.items,
"items_dropped": self.items_dropped,
"stats": stats,
"spider_name": self.spider_name,
}
if self.debug:
results["errors"] = self.errors
return results
def create_spider_request(self, kwargs):
url = kwargs.pop('url')
try:
req = Request(url, **kwargs)
except (TypeError, ValueError) as e:
msg = "Error while creating Scrapy Request, {}"
message = msg.format(str(e))
raise Error('400', message=message)
req.dont_filter = True
msg = u"Created request for spider {} with url {} and kwargs {}"
msg = msg.format(self.spider_name, url, repr(kwargs))
log.msg(msg)
return req | 0.515864 | 0.096408 |
import os
import sys
from unittest import TestCase
# srcの下をパスに追加
sys.path.append(os.path.join(os.getcwd(), 'src'))
from fig_package.format.ynf import cYnfCircle, cYnfArc
from fig_package.common import IlligalParameterError
class TestCYnfCircle(TestCase):
"""
cYnfCircleのテスト
"""
def test_1_create(self):
"""
作成
"""
c = cYnfCircle({
'center': [10,20],
'r': 5
})
def test_2_not_enough_parameter1(self):
"""
不十分なパラメータ1
"""
try:
c = cYnfCircle({
'r': 5
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_3_not_enough_parameter2(self):
"""
不十分なパラメータ2
"""
try:
c = cYnfCircle({
'center': [10],
'r': 5
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_4_not_enough_parameter3(self):
"""
不十分なパラメータ3
"""
try:
c = cYnfCircle({
'center': [10,20]
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_5_minmax(self):
"""
min/maxの計算
"""
center_pos = [100,200]
r = 5
c = cYnfCircle({
'center': center_pos,
'r': r
})
mm = c.get_minmax()
self.assertEqual(len(mm), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[0]), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[1]), 2, "min/maxの戻り値不正")
self.assertEqual(mm[0][0], center_pos[0]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[0][1], center_pos[1]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][0], center_pos[0]+r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][1], center_pos[1]+r,
"min/maxの戻り値不正")
class TestCYnfArc(TestCase):
"""
cYnfArcのテスト
"""
def test_1_create(self):
"""
普通に作成
"""
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'r': 25,
'clockwise': True
})
def test_2_error1(self):
"""
rを指定していない
(本当はいらないけど、Exceptionにしてる)
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'clockwise': True
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_3_error2(self):
"""
centerを指定していない
(本当はいらないけど、Exceptionにしてる)
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'r': 25,
'clockwise': True
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_4_error3(self):
"""
clockwiseを指定していない
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'r': 25
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_5_minmax(self):
"""
min/maxの計算
"""
center_pos = [100,200]
r = 5
start_pos = [center_pos[0]-r, center_pos[1]]
end_pos = [center_pos[0]+r, center_pos[1]]
a = cYnfArc({
'start': start_pos,
'end': end_pos,
'center': center_pos,
'r': r,
'clockwise': True
})
mm = a.get_minmax()
self.assertEqual(len(mm), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[0]), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[1]), 2, "min/maxの戻り値不正")
self.assertEqual(mm[0][0], center_pos[0]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[0][1], center_pos[1]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][0], center_pos[0]+r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][1], center_pos[1]+r,
"min/maxの戻り値不正") | tests/fig_package/format/test_cYnfCircle.py | import os
import sys
from unittest import TestCase
# srcの下をパスに追加
sys.path.append(os.path.join(os.getcwd(), 'src'))
from fig_package.format.ynf import cYnfCircle, cYnfArc
from fig_package.common import IlligalParameterError
class TestCYnfCircle(TestCase):
"""
cYnfCircleのテスト
"""
def test_1_create(self):
"""
作成
"""
c = cYnfCircle({
'center': [10,20],
'r': 5
})
def test_2_not_enough_parameter1(self):
"""
不十分なパラメータ1
"""
try:
c = cYnfCircle({
'r': 5
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_3_not_enough_parameter2(self):
"""
不十分なパラメータ2
"""
try:
c = cYnfCircle({
'center': [10],
'r': 5
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_4_not_enough_parameter3(self):
"""
不十分なパラメータ3
"""
try:
c = cYnfCircle({
'center': [10,20]
})
self.assertTrue(False, 'エラーが起こるはず')
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_5_minmax(self):
"""
min/maxの計算
"""
center_pos = [100,200]
r = 5
c = cYnfCircle({
'center': center_pos,
'r': r
})
mm = c.get_minmax()
self.assertEqual(len(mm), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[0]), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[1]), 2, "min/maxの戻り値不正")
self.assertEqual(mm[0][0], center_pos[0]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[0][1], center_pos[1]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][0], center_pos[0]+r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][1], center_pos[1]+r,
"min/maxの戻り値不正")
class TestCYnfArc(TestCase):
"""
cYnfArcのテスト
"""
def test_1_create(self):
"""
普通に作成
"""
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'r': 25,
'clockwise': True
})
def test_2_error1(self):
"""
rを指定していない
(本当はいらないけど、Exceptionにしてる)
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'clockwise': True
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_3_error2(self):
"""
centerを指定していない
(本当はいらないけど、Exceptionにしてる)
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'r': 25,
'clockwise': True
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_4_error3(self):
"""
clockwiseを指定していない
"""
try:
a = cYnfArc({
'start': [100,200],
'end': [150, 200],
'center': [125, 200],
'r': 25
})
self.assertTrue(False, "Exceptionが出るはず")
except IlligalParameterError as e:
pass
except:
# 別の何かがraiseされたらNG
self.assertTrue(False, 'IlligalParameterErrorが起こるはず')
def test_5_minmax(self):
"""
min/maxの計算
"""
center_pos = [100,200]
r = 5
start_pos = [center_pos[0]-r, center_pos[1]]
end_pos = [center_pos[0]+r, center_pos[1]]
a = cYnfArc({
'start': start_pos,
'end': end_pos,
'center': center_pos,
'r': r,
'clockwise': True
})
mm = a.get_minmax()
self.assertEqual(len(mm), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[0]), 2, "min/maxの戻り値不正")
self.assertEqual(len(mm[1]), 2, "min/maxの戻り値不正")
self.assertEqual(mm[0][0], center_pos[0]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[0][1], center_pos[1]-r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][0], center_pos[0]+r,
"min/maxの戻り値不正")
self.assertEqual(mm[1][1], center_pos[1]+r,
"min/maxの戻り値不正") | 0.225076 | 0.565659 |
from _winreg import *
import types
import collections
import os.path
# Privileges required for opening keys
from value import parse_value, ValueHandler
KEY_READ = 0x20019
KEY_WRITE = 0x20006
KEY_READ_WRITE = KEY_READ | KEY_WRITE
KeyInfo = collections.namedtuple("KeyInfo", "subkeys values modified")
NamedValue = collections.namedtuple("NamedValue", "name value value_type")
ROOT_NAMES = {
HKEY_CLASSES_ROOT: "HKEY_CLASSES_ROOT",
HKEY_CURRENT_USER: "HKEY_CURRENT_USER",
HKEY_LOCAL_MACHINE: "HKEY_LOCAL_MACHINE",
HKEY_CURRENT_CONFIG: "HKEY_CURRENT_CONFIG",
HKEY_USERS: "HKEY_USERS"
}
REVERSE_ROOT_NAMES = {value: key for key, value in ROOT_NAMES.iteritems()}
class RegistryKeyNotEditable(Exception): pass
def require_editable(f):
"""
Makes sure the registry key is editable before trying to edit it.
"""
def wrapper(self, *args, **kwargs):
if not self._edit:
raise RegistryKeyNotEditable("The key is not set as editable.")
return f(self, *args, **kwargs)
return wrapper
class RegistryKey(object):
def __init__(self, key, subkey=None, edit=False):
"""
edit - Whether the key is editable or not. Will affect subkey access as well.
"""
if subkey is None:
subkey = ""
self._open_key(key, subkey, edit=edit)
base_path = self._get_key_path(key)
self._path = os.path.join(base_path, subkey)
self._edit = edit
def __repr__(self):
return "{}({}, edit={})".format(
self.__class__.__name__,
repr(self.path),
self._edit
)
def __getitem__(self, subkey):
return self.open_subkey(subkey, edit=self._edit)
@require_editable
def set_value(self, name, value):
SetValueEx(self.key, name, 0, value.value_type, value.value)
@require_editable
def delete_value(self, value_name):
DeleteValue(self._key, value_name)
def _open_key(self, key, subkey, edit=False):
# Get a key-subkey pair, `key` can be a key, a string or an instance.
hkey, subkey = self._parse_key(key, subkey)
# Set key permissions
if edit:
options = KEY_READ_WRITE
else:
options = KEY_READ
# Open the key
self._key = OpenKeyEx(hkey, subkey, 0, options)
def _get_key_path(self, key):
if isinstance(key, RegistryKey):
return key.path
elif isinstance(key, types.StringTypes):
return key
else:
return ROOT_NAMES[key]
@property
def path(self):
return self._path.strip(os.path.sep)
@property
def key(self):
return self._key
def open_subkey(self, subkey, edit=False):
try:
return RegistryKey(self, subkey, edit=edit)
except WindowsError as e:
# If the subkey cannot be found
if e.winerror == 2:
raise KeyError("Subkey does not exist: {}".format(
os.path.join(self.path, subkey)),
)
raise
@require_editable
def add_subkey(self, name):
"""
Add a new subkey and return it.
"""
return CreateKeyEx(self.key, name, 0, KEY_READ)
@require_editable
def delete_subkey(self, subkey_path, recurse=False):
# Delete the subkey's subkeys
if recurse:
subkey = self[subkey_path]
for name in subkey.iter_subkey_names():
subkey.delete_subkey(name)
# Delete the subkey
DeleteKey(self.key, subkey_path)
def get_editable(self):
"""
Get an editable copy of the key.
Will open the key.
"""
return RegistryKey(self, subkey=None, edit=True)
def get_non_editable(self):
"""
Get an non-editable copy of the key.
Will open the key.
"""
return RegistryKey(self, subkey=None, edit=False)
def get_info(self):
return KeyInfo(*QueryInfoKey(self.key))
def _enum_value(self, index):
return parse_value(EnumValue(self.key, index))
def _iter_values(self):
subkeys, values, modified = self.get_info()
for index in xrange(values):
yield self._enum_value(index)
@property
def values(self):
return ValueHandler(self)
def _get_key_by_index(self, index):
return EnumKey(self.key, index)
def iter_subkey_names(self):
subkeys, values, modified = self.get_info()
for index in xrange(subkeys):
yield self._get_key_by_index(index)
def iter_subkeys(self, edit=False):
for subkey_name in self.iter_subkey_names():
return self.open_subkey(subkey_name, edit=edit)
def get_parent_key(self):
path = self.path
try:
parent, current = path.rstrip(os.path.sep).rsplit(os.path.sep, 1)
except:
raise ValueError("No parent key.")
return RegistryKey(key=parent)
def _parse_key(self, key, subkey):
if isinstance(key, RegistryKey):
return key.key, subkey
if not isinstance(key, types.StringTypes):
return key, subkey
# We got thus far, so `key` is a string.
# Convert the root of the key-path to an HKEY value,
# join the rest with the subkey path.
parts = key.split(os.path.sep, 1)
root = parts.pop(0)
if parts:
path = parts.pop(0)
else:
path = ""
if root:
print root
hkey = REVERSE_ROOT_NAMES[root]
subkey_path = os.path.join(path, subkey)
else:
print path
hkey = REVERSE_ROOT_NAMES[path]
subkey_path = subkey
return hkey, subkey_path | rage/rage.py | from _winreg import *
import types
import collections
import os.path
# Privileges required for opening keys
from value import parse_value, ValueHandler
KEY_READ = 0x20019
KEY_WRITE = 0x20006
KEY_READ_WRITE = KEY_READ | KEY_WRITE
KeyInfo = collections.namedtuple("KeyInfo", "subkeys values modified")
NamedValue = collections.namedtuple("NamedValue", "name value value_type")
ROOT_NAMES = {
HKEY_CLASSES_ROOT: "HKEY_CLASSES_ROOT",
HKEY_CURRENT_USER: "HKEY_CURRENT_USER",
HKEY_LOCAL_MACHINE: "HKEY_LOCAL_MACHINE",
HKEY_CURRENT_CONFIG: "HKEY_CURRENT_CONFIG",
HKEY_USERS: "HKEY_USERS"
}
REVERSE_ROOT_NAMES = {value: key for key, value in ROOT_NAMES.iteritems()}
class RegistryKeyNotEditable(Exception): pass
def require_editable(f):
"""
Makes sure the registry key is editable before trying to edit it.
"""
def wrapper(self, *args, **kwargs):
if not self._edit:
raise RegistryKeyNotEditable("The key is not set as editable.")
return f(self, *args, **kwargs)
return wrapper
class RegistryKey(object):
def __init__(self, key, subkey=None, edit=False):
"""
edit - Whether the key is editable or not. Will affect subkey access as well.
"""
if subkey is None:
subkey = ""
self._open_key(key, subkey, edit=edit)
base_path = self._get_key_path(key)
self._path = os.path.join(base_path, subkey)
self._edit = edit
def __repr__(self):
return "{}({}, edit={})".format(
self.__class__.__name__,
repr(self.path),
self._edit
)
def __getitem__(self, subkey):
return self.open_subkey(subkey, edit=self._edit)
@require_editable
def set_value(self, name, value):
SetValueEx(self.key, name, 0, value.value_type, value.value)
@require_editable
def delete_value(self, value_name):
DeleteValue(self._key, value_name)
def _open_key(self, key, subkey, edit=False):
# Get a key-subkey pair, `key` can be a key, a string or an instance.
hkey, subkey = self._parse_key(key, subkey)
# Set key permissions
if edit:
options = KEY_READ_WRITE
else:
options = KEY_READ
# Open the key
self._key = OpenKeyEx(hkey, subkey, 0, options)
def _get_key_path(self, key):
if isinstance(key, RegistryKey):
return key.path
elif isinstance(key, types.StringTypes):
return key
else:
return ROOT_NAMES[key]
@property
def path(self):
return self._path.strip(os.path.sep)
@property
def key(self):
return self._key
def open_subkey(self, subkey, edit=False):
try:
return RegistryKey(self, subkey, edit=edit)
except WindowsError as e:
# If the subkey cannot be found
if e.winerror == 2:
raise KeyError("Subkey does not exist: {}".format(
os.path.join(self.path, subkey)),
)
raise
@require_editable
def add_subkey(self, name):
"""
Add a new subkey and return it.
"""
return CreateKeyEx(self.key, name, 0, KEY_READ)
@require_editable
def delete_subkey(self, subkey_path, recurse=False):
# Delete the subkey's subkeys
if recurse:
subkey = self[subkey_path]
for name in subkey.iter_subkey_names():
subkey.delete_subkey(name)
# Delete the subkey
DeleteKey(self.key, subkey_path)
def get_editable(self):
"""
Get an editable copy of the key.
Will open the key.
"""
return RegistryKey(self, subkey=None, edit=True)
def get_non_editable(self):
"""
Get an non-editable copy of the key.
Will open the key.
"""
return RegistryKey(self, subkey=None, edit=False)
def get_info(self):
return KeyInfo(*QueryInfoKey(self.key))
def _enum_value(self, index):
return parse_value(EnumValue(self.key, index))
def _iter_values(self):
subkeys, values, modified = self.get_info()
for index in xrange(values):
yield self._enum_value(index)
@property
def values(self):
return ValueHandler(self)
def _get_key_by_index(self, index):
return EnumKey(self.key, index)
def iter_subkey_names(self):
subkeys, values, modified = self.get_info()
for index in xrange(subkeys):
yield self._get_key_by_index(index)
def iter_subkeys(self, edit=False):
for subkey_name in self.iter_subkey_names():
return self.open_subkey(subkey_name, edit=edit)
def get_parent_key(self):
path = self.path
try:
parent, current = path.rstrip(os.path.sep).rsplit(os.path.sep, 1)
except:
raise ValueError("No parent key.")
return RegistryKey(key=parent)
def _parse_key(self, key, subkey):
if isinstance(key, RegistryKey):
return key.key, subkey
if not isinstance(key, types.StringTypes):
return key, subkey
# We got thus far, so `key` is a string.
# Convert the root of the key-path to an HKEY value,
# join the rest with the subkey path.
parts = key.split(os.path.sep, 1)
root = parts.pop(0)
if parts:
path = parts.pop(0)
else:
path = ""
if root:
print root
hkey = REVERSE_ROOT_NAMES[root]
subkey_path = os.path.join(path, subkey)
else:
print path
hkey = REVERSE_ROOT_NAMES[path]
subkey_path = subkey
return hkey, subkey_path | 0.550607 | 0.096663 |
import argparse
import difflib
import diff_match_patch as dmp_module
def getColourMap(line1, line2):
diff = difflib.SequenceMatcher(None, line1, line2)
colourMap = []
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 10
diff = dmp.diff_main(line1, line2)
dmp.diff_cleanupSemantic(diff)
for index in range(len(diff)):
if diff[index][0] == -1:
colourMap = (tuple(diff[index][1].split(",")), tuple(diff[index+1][1].split(",")))
return colourMap
def schemeSuggest(template, theme, mode):
# Read lines and ignore blank lines (other different lines will cause issues)
template_lines_d = open(template, "r").readlines()
template_lines = [template_lines_d[i] for i in range(len(template_lines_d)) if len(template_lines_d[i].strip()) > 0]
theme_lines_d = open(theme, "r").readlines()
theme_lines = [theme_lines_d[i] for i in range(len(theme_lines_d)) if len(theme_lines_d[i].strip()) > 0]
colours = []
for index in range(len(theme_lines)):
colours.append(getColourMap(template_lines[index], theme_lines[index]))
unique = [list(x) for x in set(tuple(x) for x in colours)]
for element in unique:
if len(element) > 0:
try:
print(element[0][0][2:8], end=": ")
if "reverse" in mode:
colour = element[1][::-1]
else:
colour = element[1]
if "rgb" in mode:
colour = "{:02x}".format(int(colour[0]))+"{:02x}".format(int(colour[1]))+"{:02x}".format(int(colour[2]))
elif "dec" in mode:
colour = "{:02x}".format(int(colour[0])*255)+"{:02x}".format(int(colour[1])*255)+"{:02x}".format(int(colour[2])*255)
print("\033["+ ";".join(["48", "2", str(int(colour[0:2], 16)),str(int(colour[2:4], 16)),str(int(colour[4:6], 16))]) + "m \033[0m #" + colour)
except:
pass
if __name__ == "__main__":
"""Main method, do argparsing and call schemeSuggest
"""
parser = argparse.ArgumentParser("schemeSuggest",
description="Pair colours with base24 colour ids")
parser.add_argument("template",
help="relative or abs path to the base24 template")
parser.add_argument("theme",
help="relative or abs path to the theme file")
parser.add_argument("--mode", action="store", default="hexhash",
help="""color format: hex (ff00aa), reversehex (aa00ff), rgb (255,0,170),
reversergb (170,0,255), dec (1.0,0,0.666), reversedec (0.666,0,1.0)""")
args = parser.parse_args()
schemeSuggest(args.template, args.theme, args.mode) | schemeSuggest.py | import argparse
import difflib
import diff_match_patch as dmp_module
def getColourMap(line1, line2):
diff = difflib.SequenceMatcher(None, line1, line2)
colourMap = []
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 10
diff = dmp.diff_main(line1, line2)
dmp.diff_cleanupSemantic(diff)
for index in range(len(diff)):
if diff[index][0] == -1:
colourMap = (tuple(diff[index][1].split(",")), tuple(diff[index+1][1].split(",")))
return colourMap
def schemeSuggest(template, theme, mode):
# Read lines and ignore blank lines (other different lines will cause issues)
template_lines_d = open(template, "r").readlines()
template_lines = [template_lines_d[i] for i in range(len(template_lines_d)) if len(template_lines_d[i].strip()) > 0]
theme_lines_d = open(theme, "r").readlines()
theme_lines = [theme_lines_d[i] for i in range(len(theme_lines_d)) if len(theme_lines_d[i].strip()) > 0]
colours = []
for index in range(len(theme_lines)):
colours.append(getColourMap(template_lines[index], theme_lines[index]))
unique = [list(x) for x in set(tuple(x) for x in colours)]
for element in unique:
if len(element) > 0:
try:
print(element[0][0][2:8], end=": ")
if "reverse" in mode:
colour = element[1][::-1]
else:
colour = element[1]
if "rgb" in mode:
colour = "{:02x}".format(int(colour[0]))+"{:02x}".format(int(colour[1]))+"{:02x}".format(int(colour[2]))
elif "dec" in mode:
colour = "{:02x}".format(int(colour[0])*255)+"{:02x}".format(int(colour[1])*255)+"{:02x}".format(int(colour[2])*255)
print("\033["+ ";".join(["48", "2", str(int(colour[0:2], 16)),str(int(colour[2:4], 16)),str(int(colour[4:6], 16))]) + "m \033[0m #" + colour)
except:
pass
if __name__ == "__main__":
"""Main method, do argparsing and call schemeSuggest
"""
parser = argparse.ArgumentParser("schemeSuggest",
description="Pair colours with base24 colour ids")
parser.add_argument("template",
help="relative or abs path to the base24 template")
parser.add_argument("theme",
help="relative or abs path to the theme file")
parser.add_argument("--mode", action="store", default="hexhash",
help="""color format: hex (ff00aa), reversehex (aa00ff), rgb (255,0,170),
reversergb (170,0,255), dec (1.0,0,0.666), reversedec (0.666,0,1.0)""")
args = parser.parse_args()
schemeSuggest(args.template, args.theme, args.mode) | 0.179854 | 0.243946 |
# Lint as: python3
from typing import Any
from .. import struct
import jax
import jax.numpy as jnp
import numpy as onp
from .base import OptimizerDef
@struct.dataclass
class _WeightNormHyperParams:
inner: Any
wn_decay: onp.ndarray
wn_eps: onp.ndarray
@struct.dataclass
class _WeightNormParamState:
direction_state: Any
scale_state: Any
mult: onp.ndarray
class WeightNorm(OptimizerDef):
"""Adds weight normalization to an optimizer def.
See https://arxiv.org/abs/1602.07868
"""
def __init__(self, wrapped_optimizer, wn_decay=0, wn_eps=1e-8):
"""Constructor for a WeightNorm optimizer.
Weight vectors are decomposed as w = g * v/||v||_2, for scalar
scale parameter g, and raw weight vector v. The original optimizer is then
applied to the (g,v) parameterization and the updated parameters are
transformed back to w-space, i.e.
w,state --> (g,v) --(original optimizer)--> (g',v') --> w',state'
We assume the output axis of any kernel matrix is the last one,
as per the Tensorflow convention.
Args:
wrapped_optimizer: another OptimizerDef
wn_decay: apply l2 decay to the unnoralized weight vector
wn_eps: additive constant for stability of
the normalization (default: 1e-8).
"""
hps = _WeightNormHyperParams(
wrapped_optimizer.hyper_params, wn_decay, wn_eps)
super().__init__(hps)
self.wrapped_optimizer = wrapped_optimizer
def update_hyper_params(self, **hyper_param_overrides):
decay = hyper_param_overrides.pop('wn_decay', self.hyper_params.wn_decay)
eps = hyper_param_overrides.pop('wn_eps', self.hyper_params.wn_eps)
inner = self.wrapped_optimizer.update_hyper_params(
**hyper_param_overrides)
return self.hyper_params.replace(inner=inner, wn_decay=decay, wn_eps=eps)
def init_state(self, params):
leaves, treedef = jax.tree_flatten(params)
directions, scales = zip(*(self._split_param(p) for p in leaves))
directions = treedef.unflatten(directions)
scales = treedef.unflatten(scales)
wn_params = {'direction': directions, 'scale': scales}
state = self.wrapped_optimizer.init_state(wn_params)
direction_state = state.param_states['direction']
scale_state = state.param_states['scale']
param_states = jax.tree_multimap(
lambda _, *args: _WeightNormParamState(*args),
params, direction_state, scale_state, scales)
return state.replace(param_states=param_states)
def apply_gradient(self, hyper_params, params, state, grads):
p_leaves, treedef = jax.tree_flatten(params)
s_leaves = treedef.flatten_up_to(state.param_states)
g_leaves = treedef.flatten_up_to(grads)
split_grads = zip(*(self._split_grad(p, s, g, hyper_params.wn_decay)
for p, s, g in zip(p_leaves, s_leaves, g_leaves)))
d_p, d_s, d_g, s_p, s_s, s_g = [
jax.tree_unflatten(treedef, x) for x in split_grads]
wn_params = {'direction': d_p, 'scale': s_p}
wn_state = {'direction': d_s, 'scale': s_s}
wn_grads = {'direction': d_g, 'scale': s_g}
new_wn_params, new_state = self.wrapped_optimizer.apply_gradient(
hyper_params.inner, wn_params,
state.replace(param_states=wn_state), wn_grads)
directions = treedef.flatten_up_to(new_wn_params['direction'])
scales = treedef.flatten_up_to(new_wn_params['scale'])
new_params, mults = zip(*(self._merge_param(d, s, hyper_params.wn_eps)
for d, s in zip(directions, scales)))
new_params = jax.tree_unflatten(treedef, new_params)
mults = jax.tree_unflatten(treedef, mults)
direction_state = new_state.param_states['direction']
scale_state = new_state.param_states['scale']
param_states = jax.tree_multimap(
lambda _, *args: _WeightNormParamState(*args),
params, direction_state, scale_state, mults)
return new_params, new_state.replace(param_states=param_states)
def _split_param(self, param):
if param.size > param.shape[-1]:
scale = jnp.sqrt(jnp.square(param).sum(
tuple(range(param.ndim-1)), keepdims=True))
direction = param / scale
return direction, scale
else:
return param, ()
def _merge_param(self, direction, scale, eps):
if direction.size > direction.shape[-1]:
norm = jnp.sqrt(jnp.square(direction).sum(
tuple(range(direction.ndim - 1)), keepdims=True))
mult = scale / (eps + norm)
param = direction * mult
return param, mult
else:
return direction, ()
def _split_grad(self, param, state, grad, decay):
"""Split the gradient for the direction and scale."""
if param.size > param.shape[-1]:
red_dims = tuple(range(param.ndim-1))
direction = param / state.mult
norm = jnp.sqrt(jnp.square(param).sum(red_dims, keepdims=True))
scale = norm * jnp.sign(state.mult)
scale_grad = jnp.sum(
grad * direction, axis=red_dims, keepdims=True)
direction_grad = state.mult * (grad - scale_grad * direction)
if decay != 0:
direction_grad = direction_grad + decay * direction
direction_info = direction, state.direction_state, direction_grad
scale_info = scale, state.scale_state, scale_grad
return direction_info + scale_info
else:
return (param, state.direction_state, grad, (), (), ()) | flax/optim/weight_norm.py |
# Lint as: python3
from typing import Any
from .. import struct
import jax
import jax.numpy as jnp
import numpy as onp
from .base import OptimizerDef
@struct.dataclass
class _WeightNormHyperParams:
inner: Any
wn_decay: onp.ndarray
wn_eps: onp.ndarray
@struct.dataclass
class _WeightNormParamState:
direction_state: Any
scale_state: Any
mult: onp.ndarray
class WeightNorm(OptimizerDef):
"""Adds weight normalization to an optimizer def.
See https://arxiv.org/abs/1602.07868
"""
def __init__(self, wrapped_optimizer, wn_decay=0, wn_eps=1e-8):
"""Constructor for a WeightNorm optimizer.
Weight vectors are decomposed as w = g * v/||v||_2, for scalar
scale parameter g, and raw weight vector v. The original optimizer is then
applied to the (g,v) parameterization and the updated parameters are
transformed back to w-space, i.e.
w,state --> (g,v) --(original optimizer)--> (g',v') --> w',state'
We assume the output axis of any kernel matrix is the last one,
as per the Tensorflow convention.
Args:
wrapped_optimizer: another OptimizerDef
wn_decay: apply l2 decay to the unnoralized weight vector
wn_eps: additive constant for stability of
the normalization (default: 1e-8).
"""
hps = _WeightNormHyperParams(
wrapped_optimizer.hyper_params, wn_decay, wn_eps)
super().__init__(hps)
self.wrapped_optimizer = wrapped_optimizer
def update_hyper_params(self, **hyper_param_overrides):
decay = hyper_param_overrides.pop('wn_decay', self.hyper_params.wn_decay)
eps = hyper_param_overrides.pop('wn_eps', self.hyper_params.wn_eps)
inner = self.wrapped_optimizer.update_hyper_params(
**hyper_param_overrides)
return self.hyper_params.replace(inner=inner, wn_decay=decay, wn_eps=eps)
def init_state(self, params):
leaves, treedef = jax.tree_flatten(params)
directions, scales = zip(*(self._split_param(p) for p in leaves))
directions = treedef.unflatten(directions)
scales = treedef.unflatten(scales)
wn_params = {'direction': directions, 'scale': scales}
state = self.wrapped_optimizer.init_state(wn_params)
direction_state = state.param_states['direction']
scale_state = state.param_states['scale']
param_states = jax.tree_multimap(
lambda _, *args: _WeightNormParamState(*args),
params, direction_state, scale_state, scales)
return state.replace(param_states=param_states)
def apply_gradient(self, hyper_params, params, state, grads):
p_leaves, treedef = jax.tree_flatten(params)
s_leaves = treedef.flatten_up_to(state.param_states)
g_leaves = treedef.flatten_up_to(grads)
split_grads = zip(*(self._split_grad(p, s, g, hyper_params.wn_decay)
for p, s, g in zip(p_leaves, s_leaves, g_leaves)))
d_p, d_s, d_g, s_p, s_s, s_g = [
jax.tree_unflatten(treedef, x) for x in split_grads]
wn_params = {'direction': d_p, 'scale': s_p}
wn_state = {'direction': d_s, 'scale': s_s}
wn_grads = {'direction': d_g, 'scale': s_g}
new_wn_params, new_state = self.wrapped_optimizer.apply_gradient(
hyper_params.inner, wn_params,
state.replace(param_states=wn_state), wn_grads)
directions = treedef.flatten_up_to(new_wn_params['direction'])
scales = treedef.flatten_up_to(new_wn_params['scale'])
new_params, mults = zip(*(self._merge_param(d, s, hyper_params.wn_eps)
for d, s in zip(directions, scales)))
new_params = jax.tree_unflatten(treedef, new_params)
mults = jax.tree_unflatten(treedef, mults)
direction_state = new_state.param_states['direction']
scale_state = new_state.param_states['scale']
param_states = jax.tree_multimap(
lambda _, *args: _WeightNormParamState(*args),
params, direction_state, scale_state, mults)
return new_params, new_state.replace(param_states=param_states)
def _split_param(self, param):
if param.size > param.shape[-1]:
scale = jnp.sqrt(jnp.square(param).sum(
tuple(range(param.ndim-1)), keepdims=True))
direction = param / scale
return direction, scale
else:
return param, ()
def _merge_param(self, direction, scale, eps):
if direction.size > direction.shape[-1]:
norm = jnp.sqrt(jnp.square(direction).sum(
tuple(range(direction.ndim - 1)), keepdims=True))
mult = scale / (eps + norm)
param = direction * mult
return param, mult
else:
return direction, ()
def _split_grad(self, param, state, grad, decay):
"""Split the gradient for the direction and scale."""
if param.size > param.shape[-1]:
red_dims = tuple(range(param.ndim-1))
direction = param / state.mult
norm = jnp.sqrt(jnp.square(param).sum(red_dims, keepdims=True))
scale = norm * jnp.sign(state.mult)
scale_grad = jnp.sum(
grad * direction, axis=red_dims, keepdims=True)
direction_grad = state.mult * (grad - scale_grad * direction)
if decay != 0:
direction_grad = direction_grad + decay * direction
direction_info = direction, state.direction_state, direction_grad
scale_info = scale, state.scale_state, scale_grad
return direction_info + scale_info
else:
return (param, state.direction_state, grad, (), (), ()) | 0.902776 | 0.474083 |
from abc import ABCMeta
from collections import deque
from ..types import SimpleSerializable
_apis_by_name = {}
class MetaAPI(ABCMeta):
"""
Meta Class for APIs. It creates the BoundAPIQuery helpers.
"""
def __new__(mcs, name, bases, attrs):
cls = super(MetaAPI, mcs).__new__(mcs, name, bases, attrs)
# Only create the helpers on subclasses of API
if mcs.__module__ != attrs['__module__']:
from ..queries.base import Query, BoundAPIQuery
from ..caches import DefaultCache
from .parsers import Parser
cls._default_cache = DefaultCache
cls.Query = type('Query', (BoundAPIQuery, ), {'__module__': attrs['__module__'], 'API': cls})
cls.Parser = type('Parser', (Parser, ), {'__module__': attrs['__module__'], 'API': cls})
cls._supported_queries = {}
base_queries = deque(q for q in Query.__subclasses__() if not issubclass(q, BoundAPIQuery))
while base_queries:
base_query = base_queries.popleft()
base_queries.extend(q for q in base_query.__subclasses__() if not issubclass(q, BoundAPIQuery))
setattr(cls, base_query.__name__+'Base', type(base_query.__name__+'Base', (cls.Query, base_query, ),
{'__module__': attrs['__module__']}))
return cls
@property
def supported_queries(cls):
return frozenset(cls._supported_queries)
class API(SimpleSerializable, metaclass=MetaAPI):
"""
An API subclass is a collection of Query implementations used by different networks.
The instance of an API has a name and is usually a network.
To start a query on an API instance use its properties:
>>> api.stops.where(name='Essen')
This may raise a NotImplementedError if the API does not implement this Query.
"""
_model_to_query = {}
def __init__(self, name):
if self.__class__ == API:
raise TypeError('Only API subclasses can be initialized.')
if name in _apis_by_name:
raise TypeError('Duplicate API name: %s' % name)
self.name = name
_apis_by_name[name] = self
@classmethod
def _get_serialized_type_name(cls):
return 'api'
@classmethod
def _model_to_plural_name(cls, model):
name = model.__name__
return {'City': 'cities', 'POI': 'POIs', 'Address': 'addresses'}.get(name, name.lower()+'s')
pass
@classmethod
def _register_model(cls, model):
name = cls._model_to_plural_name(model)
error = 'Querying '+name+' is not supported by this API.'
def api_func(self):
return self._query(model, APIWithCache(self, self._default_cache()), error)
api_func.__name__ = name.lower()
def api_with_cache_func(self):
return self.api._query(model, self, error)
api_with_cache_func.__name__ = name.lower()
setattr(API, name.lower(), property(api_func))
setattr(APIWithCache, name.lower(), property(api_with_cache_func))
def start_model_query(self, model):
return getattr(self, self._model_to_plural_name(model))
def _simple_serialize(self):
return self.name
@classmethod
def _simple_unserialize(cls, data):
if data is None:
return None
try:
return _apis_by_name[data]
except:
raise ValueError('API %s does not exist!' % data)
def _query(self, model, api_with_cache, error):
query = self._supported_queries.get(model)
if query is None:
raise NotImplementedError(error)
return query(api_with_cache)
@classmethod
def _register_query(cls, query_cls):
if query_cls.Model in cls._supported_queries:
raise TypeError('Duplicate %sQuery on %s API.' % (query_cls.Model.__name__, query_cls.API.__name__))
cls._supported_queries[query_cls.Model] = query_cls
class APIWithCache:
def __init__(self, api, cache=None):
self.api = api
self.cache = cache | src/choo/apis/api.py | from abc import ABCMeta
from collections import deque
from ..types import SimpleSerializable
_apis_by_name = {}
class MetaAPI(ABCMeta):
"""
Meta Class for APIs. It creates the BoundAPIQuery helpers.
"""
def __new__(mcs, name, bases, attrs):
cls = super(MetaAPI, mcs).__new__(mcs, name, bases, attrs)
# Only create the helpers on subclasses of API
if mcs.__module__ != attrs['__module__']:
from ..queries.base import Query, BoundAPIQuery
from ..caches import DefaultCache
from .parsers import Parser
cls._default_cache = DefaultCache
cls.Query = type('Query', (BoundAPIQuery, ), {'__module__': attrs['__module__'], 'API': cls})
cls.Parser = type('Parser', (Parser, ), {'__module__': attrs['__module__'], 'API': cls})
cls._supported_queries = {}
base_queries = deque(q for q in Query.__subclasses__() if not issubclass(q, BoundAPIQuery))
while base_queries:
base_query = base_queries.popleft()
base_queries.extend(q for q in base_query.__subclasses__() if not issubclass(q, BoundAPIQuery))
setattr(cls, base_query.__name__+'Base', type(base_query.__name__+'Base', (cls.Query, base_query, ),
{'__module__': attrs['__module__']}))
return cls
@property
def supported_queries(cls):
return frozenset(cls._supported_queries)
class API(SimpleSerializable, metaclass=MetaAPI):
"""
An API subclass is a collection of Query implementations used by different networks.
The instance of an API has a name and is usually a network.
To start a query on an API instance use its properties:
>>> api.stops.where(name='Essen')
This may raise a NotImplementedError if the API does not implement this Query.
"""
_model_to_query = {}
def __init__(self, name):
if self.__class__ == API:
raise TypeError('Only API subclasses can be initialized.')
if name in _apis_by_name:
raise TypeError('Duplicate API name: %s' % name)
self.name = name
_apis_by_name[name] = self
@classmethod
def _get_serialized_type_name(cls):
return 'api'
@classmethod
def _model_to_plural_name(cls, model):
name = model.__name__
return {'City': 'cities', 'POI': 'POIs', 'Address': 'addresses'}.get(name, name.lower()+'s')
pass
@classmethod
def _register_model(cls, model):
name = cls._model_to_plural_name(model)
error = 'Querying '+name+' is not supported by this API.'
def api_func(self):
return self._query(model, APIWithCache(self, self._default_cache()), error)
api_func.__name__ = name.lower()
def api_with_cache_func(self):
return self.api._query(model, self, error)
api_with_cache_func.__name__ = name.lower()
setattr(API, name.lower(), property(api_func))
setattr(APIWithCache, name.lower(), property(api_with_cache_func))
def start_model_query(self, model):
return getattr(self, self._model_to_plural_name(model))
def _simple_serialize(self):
return self.name
@classmethod
def _simple_unserialize(cls, data):
if data is None:
return None
try:
return _apis_by_name[data]
except:
raise ValueError('API %s does not exist!' % data)
def _query(self, model, api_with_cache, error):
query = self._supported_queries.get(model)
if query is None:
raise NotImplementedError(error)
return query(api_with_cache)
@classmethod
def _register_query(cls, query_cls):
if query_cls.Model in cls._supported_queries:
raise TypeError('Duplicate %sQuery on %s API.' % (query_cls.Model.__name__, query_cls.API.__name__))
cls._supported_queries[query_cls.Model] = query_cls
class APIWithCache:
def __init__(self, api, cache=None):
self.api = api
self.cache = cache | 0.797872 | 0.083553 |
import random
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from celery_tasks.sms.tasks import send_sms_code
from meiduo_mall.libs.captcha.captcha import captcha
from meiduo_mall.libs.yuntongxun.ccp_sms import CCP
from meiduo_mall.utils.response_code import RETCODE
from . import const
from django import http
import logging
logger = logging.getLogger('django')
class SMSCodeView(View):
def get(self, request, mobile):
'''
接收手机号+uuid+图形验证码, 进行验证, 如果通过,发送短信验证码
:param request:
:param mobile:
:return:
'''
# 3.链接redis
redis_conn = get_redis_connection('verify_code')
# 0. 从redis中取值:
flag = redis_conn.get('send_flag_%s' % mobile)
if flag:
return http.JsonResponse({'code': RETCODE.THROTTLINGERR,
'errmsg': '发送短信过于频繁'})
# 1.接收参数
image_code_client = request.GET.get('image_code')
uuid = request.GET.get('image_code_id')
# 2.校验参数
if not all([image_code_client, uuid]):
return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR,
'errmsg': '缺少必传参数'})
# 4.从redis中取出图形验证码
image_code_server = redis_conn.get('img_%s' % uuid)
if image_code_server is None:
return http.JsonResponse({'code': RETCODE.IMAGECODEERR,
'errmsg': '验证码过期'})
# 5.删除redis中的图形验证码
try:
redis_conn.delete('img_%s' % uuid)
except Exception as e:
logger.error(e)
# logger.info(e)
# 6.把 前端传入的和redis中的进行对比
if image_code_client.lower() != image_code_server.decode().lower():
return http.JsonResponse({'code': RETCODE.IMAGECODEERR,
'errmsg': '验证码过期'})
# 7.生成一个随机数, 作为短信验证码(6)
sms_code = '%06d' % random.randint(0, 999999)
logger.info(sms_code)
pl = redis_conn.pipeline()
# 8.往redis中存储
pl.setex('send_sms_%s' % mobile,
const.SMS_CODE_REDIS_EXPIRES,
sms_code)
pl.setex('send_flag_%s' % mobile,
60,
1)
# 指定管道:
pl.execute()
# 9.调用云通讯, 发送短信验证码
# CCP().send_template_sms(mobile, [sms_code, 5], 1)
send_sms_code.delay(mobile, sms_code)
# 10.返回结果(json)
return http.JsonResponse({'code': RETCODE.OK,
'errmsg': '发送成功'})
# else:
# return http.JsonResponse({'code': RETCODE.OK,
# 'errmsg': 'ok'})
class ImageCodeView(View):
def get(self, request, uuid):
'''
生成图形验证码, 保存到redis中, 另外返回图片
:param request:
:param uuid:
:return:
'''
# 1.生成图形验证码
text, image = captcha.generate_captcha()
# 2.链接redis, 获取链接对象
redis_conn = get_redis_connection('verify_code')
# 3.利用链接对象, 保存数据到redis
# redis_conn.setex('key', 'expire', 'value')
redis_conn.setex('img_%s' % uuid, const.IMAGE_CODE_REDIS_EXPIRES, text)
# 4.返回(图片)
return http.HttpResponse(image, content_type='image/jpg') | meiduo_mall/meiduo_mall/apps/verifications/views.py | import random
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from celery_tasks.sms.tasks import send_sms_code
from meiduo_mall.libs.captcha.captcha import captcha
from meiduo_mall.libs.yuntongxun.ccp_sms import CCP
from meiduo_mall.utils.response_code import RETCODE
from . import const
from django import http
import logging
logger = logging.getLogger('django')
class SMSCodeView(View):
def get(self, request, mobile):
'''
接收手机号+uuid+图形验证码, 进行验证, 如果通过,发送短信验证码
:param request:
:param mobile:
:return:
'''
# 3.链接redis
redis_conn = get_redis_connection('verify_code')
# 0. 从redis中取值:
flag = redis_conn.get('send_flag_%s' % mobile)
if flag:
return http.JsonResponse({'code': RETCODE.THROTTLINGERR,
'errmsg': '发送短信过于频繁'})
# 1.接收参数
image_code_client = request.GET.get('image_code')
uuid = request.GET.get('image_code_id')
# 2.校验参数
if not all([image_code_client, uuid]):
return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR,
'errmsg': '缺少必传参数'})
# 4.从redis中取出图形验证码
image_code_server = redis_conn.get('img_%s' % uuid)
if image_code_server is None:
return http.JsonResponse({'code': RETCODE.IMAGECODEERR,
'errmsg': '验证码过期'})
# 5.删除redis中的图形验证码
try:
redis_conn.delete('img_%s' % uuid)
except Exception as e:
logger.error(e)
# logger.info(e)
# 6.把 前端传入的和redis中的进行对比
if image_code_client.lower() != image_code_server.decode().lower():
return http.JsonResponse({'code': RETCODE.IMAGECODEERR,
'errmsg': '验证码过期'})
# 7.生成一个随机数, 作为短信验证码(6)
sms_code = '%06d' % random.randint(0, 999999)
logger.info(sms_code)
pl = redis_conn.pipeline()
# 8.往redis中存储
pl.setex('send_sms_%s' % mobile,
const.SMS_CODE_REDIS_EXPIRES,
sms_code)
pl.setex('send_flag_%s' % mobile,
60,
1)
# 指定管道:
pl.execute()
# 9.调用云通讯, 发送短信验证码
# CCP().send_template_sms(mobile, [sms_code, 5], 1)
send_sms_code.delay(mobile, sms_code)
# 10.返回结果(json)
return http.JsonResponse({'code': RETCODE.OK,
'errmsg': '发送成功'})
# else:
# return http.JsonResponse({'code': RETCODE.OK,
# 'errmsg': 'ok'})
class ImageCodeView(View):
def get(self, request, uuid):
'''
生成图形验证码, 保存到redis中, 另外返回图片
:param request:
:param uuid:
:return:
'''
# 1.生成图形验证码
text, image = captcha.generate_captcha()
# 2.链接redis, 获取链接对象
redis_conn = get_redis_connection('verify_code')
# 3.利用链接对象, 保存数据到redis
# redis_conn.setex('key', 'expire', 'value')
redis_conn.setex('img_%s' % uuid, const.IMAGE_CODE_REDIS_EXPIRES, text)
# 4.返回(图片)
return http.HttpResponse(image, content_type='image/jpg') | 0.240061 | 0.069352 |
from pathlib import Path
from typing import Tuple
from typing import Union
import numpy as np
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
class GlobalMVN(AbsNormalize, InversibleInterface):
"""Apply global mean and variance normalization
TODO(kamo): Make this class portable somehow
Args:
stats_file: npy file
norm_means: Apply mean normalization
norm_vars: Apply var normalization
eps:
"""
def __init__(
self,
stats_file: Union[Path, str],
norm_means: bool = True,
norm_vars: bool = True,
eps: float = 1.0e-20,
):
assert check_argument_types()
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
stats_file = Path(stats_file)
self.stats_file = stats_file
stats = np.load(stats_file)
if isinstance(stats, np.ndarray):
# Kaldi like stats
count = stats[0].flatten()[-1]
mean = stats[0, :-1] / count
var = stats[1, :-1] / count - mean * mean
else:
# New style: Npz file
count = stats["count"]
sum_v = stats["sum"]
sum_square_v = stats["sum_square"]
mean = sum_v / count
var = sum_square_v / count - mean * mean
std = np.sqrt(np.maximum(var, eps))
self.register_buffer("mean", torch.from_numpy(mean))
self.register_buffer("std", torch.from_numpy(std))
def extra_repr(self):
return (
f"stats_file={self.stats_file}, "
f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
)
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
# feat: (B, T, D)
if norm_means:
if x.requires_grad:
x = x - self.mean
else:
x -= self.mean
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x /= self.std
return x, ilens
def inverse(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x *= self.std
# feat: (B, T, D)
if norm_means:
x += self.mean
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
return x, ilens | espnet2/layers/global_mvn.py | from pathlib import Path
from typing import Tuple
from typing import Union
import numpy as np
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
class GlobalMVN(AbsNormalize, InversibleInterface):
"""Apply global mean and variance normalization
TODO(kamo): Make this class portable somehow
Args:
stats_file: npy file
norm_means: Apply mean normalization
norm_vars: Apply var normalization
eps:
"""
def __init__(
self,
stats_file: Union[Path, str],
norm_means: bool = True,
norm_vars: bool = True,
eps: float = 1.0e-20,
):
assert check_argument_types()
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
stats_file = Path(stats_file)
self.stats_file = stats_file
stats = np.load(stats_file)
if isinstance(stats, np.ndarray):
# Kaldi like stats
count = stats[0].flatten()[-1]
mean = stats[0, :-1] / count
var = stats[1, :-1] / count - mean * mean
else:
# New style: Npz file
count = stats["count"]
sum_v = stats["sum"]
sum_square_v = stats["sum_square"]
mean = sum_v / count
var = sum_square_v / count - mean * mean
std = np.sqrt(np.maximum(var, eps))
self.register_buffer("mean", torch.from_numpy(mean))
self.register_buffer("std", torch.from_numpy(std))
def extra_repr(self):
return (
f"stats_file={self.stats_file}, "
f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
)
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
# feat: (B, T, D)
if norm_means:
if x.requires_grad:
x = x - self.mean
else:
x -= self.mean
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x /= self.std
return x, ilens
def inverse(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x *= self.std
# feat: (B, T, D)
if norm_means:
x += self.mean
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
return x, ilens | 0.826747 | 0.570391 |
import json
from dataclasses import dataclass
from io import BytesIO
import pyarrow as pa
import pyarrow.json as paj
import datasets
@dataclass
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: datasets.Features = None
field: str = None
use_threads: bool = True
block_size: int = None
newlines_in_values: bool = None
@property
def pa_read_options(self):
return paj.ReadOptions(use_threads=self.use_threads, block_size=self.block_size)
@property
def pa_parse_options(self):
return paj.ParseOptions(explicit_schema=self.schema, newlines_in_values=self.newlines_in_values)
@property
def schema(self):
return pa.schema(self.features.type) if self.features is not None else None
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
for i, file in enumerate(files):
if self.config.field is not None:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
# We keep only the field we are interested in
dataset = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(dataset, (list, tuple)):
pa_table = paj.read_json(
BytesIO("\n".join(json.dumps(row) for row in dataset).encode("utf-8")),
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
)
else:
pa_table = pa.Table.from_pydict(mapping=dataset, schema=self.config.schema)
else:
try:
pa_table = paj.read_json(
file,
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
)
except pa.ArrowInvalid:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the `load_dataset` method. "
)
yield i, pa_table | datasets/json/json.py |
import json
from dataclasses import dataclass
from io import BytesIO
import pyarrow as pa
import pyarrow.json as paj
import datasets
@dataclass
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: datasets.Features = None
field: str = None
use_threads: bool = True
block_size: int = None
newlines_in_values: bool = None
@property
def pa_read_options(self):
return paj.ReadOptions(use_threads=self.use_threads, block_size=self.block_size)
@property
def pa_parse_options(self):
return paj.ParseOptions(explicit_schema=self.schema, newlines_in_values=self.newlines_in_values)
@property
def schema(self):
return pa.schema(self.features.type) if self.features is not None else None
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
for i, file in enumerate(files):
if self.config.field is not None:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
# We keep only the field we are interested in
dataset = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(dataset, (list, tuple)):
pa_table = paj.read_json(
BytesIO("\n".join(json.dumps(row) for row in dataset).encode("utf-8")),
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
)
else:
pa_table = pa.Table.from_pydict(mapping=dataset, schema=self.config.schema)
else:
try:
pa_table = paj.read_json(
file,
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
)
except pa.ArrowInvalid:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the `load_dataset` method. "
)
yield i, pa_table | 0.655446 | 0.283347 |
import os
import unittest
import time
from mlcomp.persist import StorageGroup, Storage
from mlcomp.utils import TemporaryDirectory
class StorageGroupTestCase(unittest.TestCase):
def test_match_name(self):
with TemporaryDirectory() as tempdir:
sg = StorageGroup(tempdir)
# test create
s0 = sg.create_storage()
self.assertTrue('__' not in s0.name)
s1 = sg.create_storage(hostname='host1')
self.assertTrue(s1.name.endswith('__host1'))
s1_2 = sg.create_storage(hostname='host1')
self.assertTrue(s1_2.name.endswith('__host1'))
self.assertNotEquals(s1.name, s1_2.name)
time.sleep(0.01)
s2 = sg.create_storage('abc', hostname='host1')
self.assertTrue(s2.name.startswith('abc__'))
self.assertTrue(s2.name.endswith('__host1'))
time.sleep(0.01)
s3 = sg.create_storage(hostname='host__2')
self.assertTrue(s3.name.endswith('__host_2'))
time.sleep(0.01)
s4 = sg.create_storage('abc', hostname='host__2')
self.assertTrue(s4.name.startswith('abc__'))
self.assertTrue(s4.name.endswith('__host_2'))
time.sleep(0.01)
sbad = Storage(os.path.join(tempdir, 'badname'), mode='create')
self.assertEqual(sbad.name, 'badname')
# test match
f = lambda *args, **kwargs: (
sorted(str(s) for s in sg.iter_storage(*args, **kwargs))
)
self.assertEqual(
f(),
[s0.name, s1.name, s1_2.name, s3.name, s2.name, s4.name]
)
self.assertEqual(
f(well_defined=False),
[s0.name, s1.name, s1_2.name, s3.name, s2.name, s4.name,
sbad.name]
)
self.assertEqual(
f(hostname='host1'),
[s1.name, s1_2.name, s2.name]
)
self.assertEqual(
f(hostname='host1', well_defined=False),
[s1.name, s1_2.name, s2.name]
)
self.assertEqual(
f(hostname='host__2'),
[s3.name, s4.name]
)
self.assertEqual(
f(hostname='host3'),
[]
)
# test find latest
self.assertEqual(sg.open_latest_storage().name, s4.name)
self.assertEqual(sg.open_latest_storage('host1').name, s2.name)
self.assertEqual(sg.open_latest_storage('host__2').name, s4.name)
self.assertIsNone(sg.open_latest_storage('host3')) | tests/persist/test_storage_group.py | import os
import unittest
import time
from mlcomp.persist import StorageGroup, Storage
from mlcomp.utils import TemporaryDirectory
class StorageGroupTestCase(unittest.TestCase):
def test_match_name(self):
with TemporaryDirectory() as tempdir:
sg = StorageGroup(tempdir)
# test create
s0 = sg.create_storage()
self.assertTrue('__' not in s0.name)
s1 = sg.create_storage(hostname='host1')
self.assertTrue(s1.name.endswith('__host1'))
s1_2 = sg.create_storage(hostname='host1')
self.assertTrue(s1_2.name.endswith('__host1'))
self.assertNotEquals(s1.name, s1_2.name)
time.sleep(0.01)
s2 = sg.create_storage('abc', hostname='host1')
self.assertTrue(s2.name.startswith('abc__'))
self.assertTrue(s2.name.endswith('__host1'))
time.sleep(0.01)
s3 = sg.create_storage(hostname='host__2')
self.assertTrue(s3.name.endswith('__host_2'))
time.sleep(0.01)
s4 = sg.create_storage('abc', hostname='host__2')
self.assertTrue(s4.name.startswith('abc__'))
self.assertTrue(s4.name.endswith('__host_2'))
time.sleep(0.01)
sbad = Storage(os.path.join(tempdir, 'badname'), mode='create')
self.assertEqual(sbad.name, 'badname')
# test match
f = lambda *args, **kwargs: (
sorted(str(s) for s in sg.iter_storage(*args, **kwargs))
)
self.assertEqual(
f(),
[s0.name, s1.name, s1_2.name, s3.name, s2.name, s4.name]
)
self.assertEqual(
f(well_defined=False),
[s0.name, s1.name, s1_2.name, s3.name, s2.name, s4.name,
sbad.name]
)
self.assertEqual(
f(hostname='host1'),
[s1.name, s1_2.name, s2.name]
)
self.assertEqual(
f(hostname='host1', well_defined=False),
[s1.name, s1_2.name, s2.name]
)
self.assertEqual(
f(hostname='host__2'),
[s3.name, s4.name]
)
self.assertEqual(
f(hostname='host3'),
[]
)
# test find latest
self.assertEqual(sg.open_latest_storage().name, s4.name)
self.assertEqual(sg.open_latest_storage('host1').name, s2.name)
self.assertEqual(sg.open_latest_storage('host__2').name, s4.name)
self.assertIsNone(sg.open_latest_storage('host3')) | 0.385143 | 0.397529 |
import re
from base_preprocessors import BaseReader
from utils import parse_url, flatten, tidy_dict
class OpenSearchReader(BaseReader):
_service_descriptors = {
"title": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}ShortName",
"abstract": ["/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}LongName",
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Description"],
"source": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Attribution",
"contact": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Developer",
"rights": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}SyndicationRight",
"subject": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Tags"
}
_to_exclude = [
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Url",
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Language/"
]
_parameter_formats = {
"geo:box": "west, south, east, north",
"time:start": "YYYY-MM-DDTHH:mm:ssZ",
"time:stop": "YYYY-MM-DDTHH:mm:ssZ"
}
def return_exclude_descriptors(self):
return [e[1:] for e in (flatten(self._service_descriptors.values()) + self._to_exclude)]
def parse_endpoints(self):
'''
'''
urls = self.parser.find("/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Url")
endpoints = [
tidy_dict({
"protocol": self._remap_http_method(url.get('type', '')),
"url": url.get('template', ''),
"parameters": self._extract_url_parameters(url.get('template', '')),
"actionable": 0 if 'rel' not in url.attrib.keys() else 2
}) for url in urls
]
return endpoints
def _extract_parameter_type(self, param):
'''
return prefix, type from a string as prefix:type or {prefix:type}
as tuple (prefix, type)
'''
pattern = '\{{0,1}(\S*):([\S][^}]*)'
# TODO: this is probably a bad assumption (that there's just the
# one item in the list, not that urlparse returns the terms as a list)
if isinstance(param, list):
param = param[0]
if ':' not in param:
return ('', param)
m = re.search(pattern, param)
return m.groups()
def _extract_url_parameters(self, url):
'''
strip out the osdd url parameters
note: not always emitted correctly as param={thing?}. could also be param=thing
except the param=thing is probably a hardcoded term SO HOW DO WE MANAGE THAT?
TODO: manage that (ex: ?product=MOD021QA&collection={mp:collection?})
tuple: (parameter name, namespace(s), param namespace prefix, param type, format)
'''
assert url, 'No URL'
query_params = parse_url(url)
# deal with the namespaced parameters as [query param key, prefix, type]
query_params = [[k] + list(self._extract_parameter_type(v)) for k, v
in query_params.iteritems()]
return [
tidy_dict({
"name": qp[0],
"namespaces": self.parser._namespaces,
"prefix": qp[1],
"type": qp[2],
"format": self._parameter_formats.get(':'.join(qp[1:]))
})
for qp in query_params
] | demo/dlib/opensearch_preprocessors.py | import re
from base_preprocessors import BaseReader
from utils import parse_url, flatten, tidy_dict
class OpenSearchReader(BaseReader):
_service_descriptors = {
"title": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}ShortName",
"abstract": ["/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}LongName",
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Description"],
"source": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Attribution",
"contact": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Developer",
"rights": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}SyndicationRight",
"subject": "/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Tags"
}
_to_exclude = [
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Url",
"/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Language/"
]
_parameter_formats = {
"geo:box": "west, south, east, north",
"time:start": "YYYY-MM-DDTHH:mm:ssZ",
"time:stop": "YYYY-MM-DDTHH:mm:ssZ"
}
def return_exclude_descriptors(self):
return [e[1:] for e in (flatten(self._service_descriptors.values()) + self._to_exclude)]
def parse_endpoints(self):
'''
'''
urls = self.parser.find("/{http://a9.com/-/spec/opensearch/1.1/}OpenSearchDescription/" +
"{http://a9.com/-/spec/opensearch/1.1/}Url")
endpoints = [
tidy_dict({
"protocol": self._remap_http_method(url.get('type', '')),
"url": url.get('template', ''),
"parameters": self._extract_url_parameters(url.get('template', '')),
"actionable": 0 if 'rel' not in url.attrib.keys() else 2
}) for url in urls
]
return endpoints
def _extract_parameter_type(self, param):
'''
return prefix, type from a string as prefix:type or {prefix:type}
as tuple (prefix, type)
'''
pattern = '\{{0,1}(\S*):([\S][^}]*)'
# TODO: this is probably a bad assumption (that there's just the
# one item in the list, not that urlparse returns the terms as a list)
if isinstance(param, list):
param = param[0]
if ':' not in param:
return ('', param)
m = re.search(pattern, param)
return m.groups()
def _extract_url_parameters(self, url):
'''
strip out the osdd url parameters
note: not always emitted correctly as param={thing?}. could also be param=thing
except the param=thing is probably a hardcoded term SO HOW DO WE MANAGE THAT?
TODO: manage that (ex: ?product=MOD021QA&collection={mp:collection?})
tuple: (parameter name, namespace(s), param namespace prefix, param type, format)
'''
assert url, 'No URL'
query_params = parse_url(url)
# deal with the namespaced parameters as [query param key, prefix, type]
query_params = [[k] + list(self._extract_parameter_type(v)) for k, v
in query_params.iteritems()]
return [
tidy_dict({
"name": qp[0],
"namespaces": self.parser._namespaces,
"prefix": qp[1],
"type": qp[2],
"format": self._parameter_formats.get(':'.join(qp[1:]))
})
for qp in query_params
] | 0.317003 | 0.258607 |
import os
class DefaultConfig(object):
"""Default configuration holder."""
# ecosystem/language to work with
ecosystem = 'python'
# max age of a CVE, in days; older CVEs will be ignored.
# 0 = disable this option and process all CVEs
cve_age = 0
# location of the default NVD JSON feeds
feed_dir = 'nvd-data/'
# path to the default NVD JSON feed
feed_names = None
# range of CVES to process, all other CVEs will be ignored
date_range = None
# ID of a CVE to process, all other CVEs will be ignored
cve_id = None
# package name, requires also cve_id to be set
package_name = None
# location of the cpe2pkg Jar file
cpe2pkg_path = 'cpe2pkg.jar'
# directory where to find files containing package names
pkgfile_dir = 'data/'
# whether or not to use nvd-toolkit
use_nvdtoolkit = False
# directory where to find pretrained classifier for the nvd-toolkit
nvdtoolkit_export_dir = 'export/'
class RuntimeConfig(object):
"""Runtime configuration holder."""
def __init__(self):
"""Constructor."""
self._config = DefaultConfig()
ecosystem = os.environ.get('CVEJOB_ECOSYSTEM')
if ecosystem is not None:
self._config.ecosystem = ecosystem
cve_age = os.environ.get('CVEJOB_CVE_AGE')
if cve_age is not None:
self._config.cve_age = int(cve_age)
feed_dir = os.environ.get('CVEJOB_FEED_DIR')
if feed_dir is not None:
self._config.feed_dir = feed_dir
feed_names = os.environ.get('CVEJOB_FEED_NAMES')
if feed_names is not None:
self._config.feed_names = feed_names.split(',')
date_range = os.environ.get('CVEJOB_DATE_RANGE')
if date_range is not None:
self._config.date_range = date_range
cve_id = os.environ.get('CVEJOB_CVE_ID')
if cve_id is not None:
self._config.cve_id = cve_id
package_name = os.environ.get('CVEJOB_PACKAGE_NAME')
if package_name is not None:
self._config.package_name = package_name
cpe2pkg_path = os.environ.get('CVEJOB_CPE2PKG_PATH')
if cpe2pkg_path is not None:
self._config.cpe2pkg_path = cpe2pkg_path
pkgfile_dir = os.environ.get('CVEJOB_PKGFILE_DIR')
if pkgfile_dir is not None:
self._config.pkgfile_dir = pkgfile_dir
use_nvdtoolkit = os.environ.get('CVEJOB_USE_NVD_TOOLKIT')
if use_nvdtoolkit is not None:
self._config.use_nvdtoolkit = use_nvdtoolkit.lower() in ('true', '1', 'yes')
nvdtoolkit_export_dir = os.environ.get('CVEJOB_NVD_TOOLKIT_EXPORT_DIR')
if nvdtoolkit_export_dir is not None:
self._config.nvdtoolkit_export_dir = nvdtoolkit_export_dir
def __getattr__(self, item):
"""Get attribute."""
return getattr(self._config, item)
Config = RuntimeConfig() | cvejob/config.py |
import os
class DefaultConfig(object):
"""Default configuration holder."""
# ecosystem/language to work with
ecosystem = 'python'
# max age of a CVE, in days; older CVEs will be ignored.
# 0 = disable this option and process all CVEs
cve_age = 0
# location of the default NVD JSON feeds
feed_dir = 'nvd-data/'
# path to the default NVD JSON feed
feed_names = None
# range of CVES to process, all other CVEs will be ignored
date_range = None
# ID of a CVE to process, all other CVEs will be ignored
cve_id = None
# package name, requires also cve_id to be set
package_name = None
# location of the cpe2pkg Jar file
cpe2pkg_path = 'cpe2pkg.jar'
# directory where to find files containing package names
pkgfile_dir = 'data/'
# whether or not to use nvd-toolkit
use_nvdtoolkit = False
# directory where to find pretrained classifier for the nvd-toolkit
nvdtoolkit_export_dir = 'export/'
class RuntimeConfig(object):
"""Runtime configuration holder."""
def __init__(self):
"""Constructor."""
self._config = DefaultConfig()
ecosystem = os.environ.get('CVEJOB_ECOSYSTEM')
if ecosystem is not None:
self._config.ecosystem = ecosystem
cve_age = os.environ.get('CVEJOB_CVE_AGE')
if cve_age is not None:
self._config.cve_age = int(cve_age)
feed_dir = os.environ.get('CVEJOB_FEED_DIR')
if feed_dir is not None:
self._config.feed_dir = feed_dir
feed_names = os.environ.get('CVEJOB_FEED_NAMES')
if feed_names is not None:
self._config.feed_names = feed_names.split(',')
date_range = os.environ.get('CVEJOB_DATE_RANGE')
if date_range is not None:
self._config.date_range = date_range
cve_id = os.environ.get('CVEJOB_CVE_ID')
if cve_id is not None:
self._config.cve_id = cve_id
package_name = os.environ.get('CVEJOB_PACKAGE_NAME')
if package_name is not None:
self._config.package_name = package_name
cpe2pkg_path = os.environ.get('CVEJOB_CPE2PKG_PATH')
if cpe2pkg_path is not None:
self._config.cpe2pkg_path = cpe2pkg_path
pkgfile_dir = os.environ.get('CVEJOB_PKGFILE_DIR')
if pkgfile_dir is not None:
self._config.pkgfile_dir = pkgfile_dir
use_nvdtoolkit = os.environ.get('CVEJOB_USE_NVD_TOOLKIT')
if use_nvdtoolkit is not None:
self._config.use_nvdtoolkit = use_nvdtoolkit.lower() in ('true', '1', 'yes')
nvdtoolkit_export_dir = os.environ.get('CVEJOB_NVD_TOOLKIT_EXPORT_DIR')
if nvdtoolkit_export_dir is not None:
self._config.nvdtoolkit_export_dir = nvdtoolkit_export_dir
def __getattr__(self, item):
"""Get attribute."""
return getattr(self._config, item)
Config = RuntimeConfig() | 0.569733 | 0.074905 |
import re, pandas as pd
import os
from srcT.DataStruct.Code import Code
from srcT.Common import ConfigFile as CF, Helper as H
#region: Read error-IDs (post creation)
def getAllErrs(fname):
allErrs = {}
if not os.path.exists(fname):
saveAllErrs(allErrs)
df = pd.read_csv(fname, index_col=False)
for i, row in df.iterrows():
allErrs[row['msg']] = row['id']
return allErrs
def saveAllErrs(allErrs):
data = []
for msg in allErrs:
iden = allErrs[msg]
data.append((iden, msg))
df = pd.DataFrame(data=data, columns=['id', 'msg'])
df.to_csv(CF.fname_newErrIDs, index=False)
def replIDs(msg):
msg = re.sub(r'\'(.*?)\'', 'ID', msg)
msg = re.sub('\d+', 'NUM', msg)
return msg
def getErrIDs(allErrs, codeObj, lineNum=None):
eids = []
for ce in codeObj.getSevereErrors():
if lineNum is None or ce.line == lineNum: # If filter by lineNum
msg = replIDs(ce.msg)
if msg not in allErrs:
allErrs[msg] = len(allErrs)+1
saveAllErrs(allErrs)
eids.append(allErrs[msg])
return sorted(eids)
def getErrSet(allErrs, codeObj, lineNum=None):
eids = getErrIDs(allErrs, codeObj, lineNum)
return set(eids)
def getErrSetStr(allErrs, codeObj, lineNum=None):
errSet = getErrSet(allErrs, codeObj, lineNum)
return H.joinList(errSet, ';') + ';'
#endregion
#region: Create Error IDs for first time use
def createClass(fnameDataset):
'''Given a dataset (CSV) file, replace old error-IDs (obtained using regex) with new ones (obtained using Clang LLVM)'''
df = pd.read_csv(fnameDataset, encoding = "ISO-8859-1")
allErrs = getAllErrs(CF.fname_newErrIDs)
classes, classesRepeat, newErrSets = [], [], []
mult = 10
for i, row in df.iterrows():
oldClass = row['errSet_diffs']
codeObj = Code(row['sourceText'])
newErrsetStr = getErrSetStr(allErrs, codeObj)
newClass = newErrsetStr +'\n'+ H.joinList(oldClass.splitlines()[1:])
newErrSets.append(newErrsetStr)
classes.append(newClass)
if i >= len(df)*mult/100:
print(str(mult) +'%', end=' ', flush=True)
mult += 10
df['class'] = classes
df['newErrSet'] = newErrSets
df.to_csv(fnameDataset, index=False)
#endregion
#region: Main
if __name__=='__main__':
print('Creating Training-Set Classes')
createClass(CF.fnameSingleL_Train)
print('\nCreating Testing-Set Classes')
createClass(CF.fnameSingleL_Test)
#endregion | srcT/DataStruct/ClusterError.py | import re, pandas as pd
import os
from srcT.DataStruct.Code import Code
from srcT.Common import ConfigFile as CF, Helper as H
#region: Read error-IDs (post creation)
def getAllErrs(fname):
allErrs = {}
if not os.path.exists(fname):
saveAllErrs(allErrs)
df = pd.read_csv(fname, index_col=False)
for i, row in df.iterrows():
allErrs[row['msg']] = row['id']
return allErrs
def saveAllErrs(allErrs):
data = []
for msg in allErrs:
iden = allErrs[msg]
data.append((iden, msg))
df = pd.DataFrame(data=data, columns=['id', 'msg'])
df.to_csv(CF.fname_newErrIDs, index=False)
def replIDs(msg):
msg = re.sub(r'\'(.*?)\'', 'ID', msg)
msg = re.sub('\d+', 'NUM', msg)
return msg
def getErrIDs(allErrs, codeObj, lineNum=None):
eids = []
for ce in codeObj.getSevereErrors():
if lineNum is None or ce.line == lineNum: # If filter by lineNum
msg = replIDs(ce.msg)
if msg not in allErrs:
allErrs[msg] = len(allErrs)+1
saveAllErrs(allErrs)
eids.append(allErrs[msg])
return sorted(eids)
def getErrSet(allErrs, codeObj, lineNum=None):
eids = getErrIDs(allErrs, codeObj, lineNum)
return set(eids)
def getErrSetStr(allErrs, codeObj, lineNum=None):
errSet = getErrSet(allErrs, codeObj, lineNum)
return H.joinList(errSet, ';') + ';'
#endregion
#region: Create Error IDs for first time use
def createClass(fnameDataset):
'''Given a dataset (CSV) file, replace old error-IDs (obtained using regex) with new ones (obtained using Clang LLVM)'''
df = pd.read_csv(fnameDataset, encoding = "ISO-8859-1")
allErrs = getAllErrs(CF.fname_newErrIDs)
classes, classesRepeat, newErrSets = [], [], []
mult = 10
for i, row in df.iterrows():
oldClass = row['errSet_diffs']
codeObj = Code(row['sourceText'])
newErrsetStr = getErrSetStr(allErrs, codeObj)
newClass = newErrsetStr +'\n'+ H.joinList(oldClass.splitlines()[1:])
newErrSets.append(newErrsetStr)
classes.append(newClass)
if i >= len(df)*mult/100:
print(str(mult) +'%', end=' ', flush=True)
mult += 10
df['class'] = classes
df['newErrSet'] = newErrSets
df.to_csv(fnameDataset, index=False)
#endregion
#region: Main
if __name__=='__main__':
print('Creating Training-Set Classes')
createClass(CF.fnameSingleL_Train)
print('\nCreating Testing-Set Classes')
createClass(CF.fnameSingleL_Test)
#endregion | 0.235108 | 0.072145 |
from os import path
from Malt.GL import *
from Malt.Pipeline import Pipeline
from Malt.Mesh import Mesh
from Malt.Shader import Shader
from Malt.Texture import Texture
from Malt.RenderTarget import RenderTarget
from Malt.Parameter import Parameter
from Malt.UBO import UBO
from Malt.Render import Common
from Malt.Render import Lighting
from Malt.Render import Line
from Malt.Render import Sampling
_NPR_Pipeline_Common='''
#version 410 core
#extension GL_ARB_shading_language_include : enable
#include "Pipelines/NPR_Pipeline.glsl"
'''
_obj_composite_depth='''
#version 410 core
#extension GL_ARB_shading_language_include : enable
#include "Common.glsl"
#ifdef VERTEX_SHADER
void main()
{
DEFAULT_VERTEX_SHADER();
}
#endif
#ifdef PIXEL_SHADER
layout (location = 0) out float OUT_DEPTH;
void main()
{
OUT_DEPTH = -transform_point(CAMERA, POSITION).z;
}
#endif
'''
class PipelineTest(Pipeline):
def __init__(self):
super().__init__()
self.sampling_grid_size = 2
self.parameters.scene['Preview Samples'] = GLUniform(-1, GL.GL_INT, 4)
self.parameters.scene['Render Samples'] = GLUniform(-1, GL.GL_INT, 8)
self.parameters.scene['Line Width Max'] = GLUniform(-1, GL.GL_INT, 10)
self.parameters.world['Background Color'] = GLUniform(-1, GL_FLOAT_VEC4, (0.5,0.5,0.5,1))
self.default_shader = self.compile_material_from_source('') #Empty source will force defaults
self.composite_depth_shader = self.compile_shader_from_source(_obj_composite_depth)
self.common_buffer = Common.CommonBuffer()
self.lights_buffer = Lighting.LightsBuffer()
self.line_rendering = Line.LineRendering()
def compile_material_from_source(self, source, include_paths=[]):
source = _NPR_Pipeline_Common + source
return {
'PRE_PASS' : self.compile_shader_from_source(
source, 'COMMON_VERTEX_SHADER', 'PRE_PASS_PIXEL_SHADER', include_paths, ['PRE_PASS']
),
'MAIN_PASS' : self.compile_shader_from_source(
source, 'COMMON_VERTEX_SHADER', 'MAIN_PASS_PIXEL_SHADER', include_paths, ['MAIN_PASS']
)
}
def setup_render_targets(self, resolution):
self.t_depth = Texture(resolution, GL_DEPTH_COMPONENT32F)
self.t_prepass_normal_depth = Texture(resolution, GL_RGBA32F)
self.t_prepass_id = Texture(resolution, GL_R32F)
self.fbo_prepass = RenderTarget([self.t_prepass_normal_depth, self.t_prepass_id], self.t_depth)
self.t_main_color = Texture(resolution, GL_RGB32F)
self.t_line_color = Texture(resolution, GL_RGB32F)
self.t_line_data = Texture(resolution, GL_RGB32F)
self.fbo_main = RenderTarget([self.t_main_color, self.t_line_color, self.t_line_data], self.t_depth)
self.t_color_accumulate = Texture(resolution, GL_RGB32F)
self.fbo_accumulate = RenderTarget([self.t_color_accumulate])
self.t_composite_depth = Texture(resolution, GL_R32F)
self.fbo_composite_depth = RenderTarget([self.t_composite_depth], self.t_depth)
def get_samples(self):
return Sampling.get_RGSS_samples(self.sampling_grid_size)
def do_render(self, resolution, scene, is_final_render, is_new_frame):
#SETUP SAMPLING
if is_final_render:
self.sampling_grid_size = scene.parameters['Render Samples'][0]
else:
self.sampling_grid_size = scene.parameters['Preview Samples'][0]
sample_offset = self.get_samples()[self.sample_count]
#SETUP UNIFORM BLOCKS
self.common_buffer.load(scene, resolution, sample_offset, self.sample_count)
self.lights_buffer.load(scene)
UBOS = {
'COMMON_UNIFORMS' : self.common_buffer,
'SCENE_LIGHTS' : self.lights_buffer
}
#SETUP PER-OBJECT PARAMETERS
for i, obj in enumerate(scene.objects):
obj.parameters['MODEL'] = obj.matrix
obj.parameters['ID'] = i+1
#PRE-PASS
self.fbo_prepass.clear([(0,0,1,1), 0], 1, 0)
self.draw_scene_pass(self.fbo_prepass, scene.objects, 'PRE_PASS', self.default_shader['PRE_PASS'], UBOS)
#MAIN-PASS
textures = {
'IN_NORMAL_DEPTH': self.t_prepass_normal_depth,
'IN_ID': self.t_prepass_id,
}
self.fbo_main.clear([scene.world_parameters['Background Color'], (0,0,0,0), (-1,-1,-1,-1)])
self.draw_scene_pass(self.fbo_main, scene.objects, 'MAIN_PASS', self.default_shader['MAIN_PASS'], UBOS, {}, textures)
composited_line = self.line_rendering.composite_line(
scene.parameters['Line Width Max'], self, self.common_buffer,
self.t_main_color, self.t_depth, self.t_prepass_id, self.t_line_color, self.t_line_data)
# TEMPORAL SUPER-SAMPLING ACCUMULATION
# TODO: Should accumulate in display space ???
# https://therealmjp.github.io/posts/msaa-overview/#working-with-hdr-and-tone-mapping
self.blend_texture(composited_line, self.fbo_accumulate, 1.0 / (self.sample_count + 1))
#COMPOSITE DEPTH
if is_final_render:
self.fbo_composite_depth.clear((10.0e+32,1.0,1.0,1.0))
self.draw_scene_pass(self.fbo_composite_depth, scene.objects, None, self.composite_depth_shader, UBOS)
return {
'COLOR' : self.t_color_accumulate,
'DEPTH' : self.t_composite_depth,
} | BlenderMalt/MaltPath/Malt/PipelineTest.py |
from os import path
from Malt.GL import *
from Malt.Pipeline import Pipeline
from Malt.Mesh import Mesh
from Malt.Shader import Shader
from Malt.Texture import Texture
from Malt.RenderTarget import RenderTarget
from Malt.Parameter import Parameter
from Malt.UBO import UBO
from Malt.Render import Common
from Malt.Render import Lighting
from Malt.Render import Line
from Malt.Render import Sampling
_NPR_Pipeline_Common='''
#version 410 core
#extension GL_ARB_shading_language_include : enable
#include "Pipelines/NPR_Pipeline.glsl"
'''
_obj_composite_depth='''
#version 410 core
#extension GL_ARB_shading_language_include : enable
#include "Common.glsl"
#ifdef VERTEX_SHADER
void main()
{
DEFAULT_VERTEX_SHADER();
}
#endif
#ifdef PIXEL_SHADER
layout (location = 0) out float OUT_DEPTH;
void main()
{
OUT_DEPTH = -transform_point(CAMERA, POSITION).z;
}
#endif
'''
class PipelineTest(Pipeline):
def __init__(self):
super().__init__()
self.sampling_grid_size = 2
self.parameters.scene['Preview Samples'] = GLUniform(-1, GL.GL_INT, 4)
self.parameters.scene['Render Samples'] = GLUniform(-1, GL.GL_INT, 8)
self.parameters.scene['Line Width Max'] = GLUniform(-1, GL.GL_INT, 10)
self.parameters.world['Background Color'] = GLUniform(-1, GL_FLOAT_VEC4, (0.5,0.5,0.5,1))
self.default_shader = self.compile_material_from_source('') #Empty source will force defaults
self.composite_depth_shader = self.compile_shader_from_source(_obj_composite_depth)
self.common_buffer = Common.CommonBuffer()
self.lights_buffer = Lighting.LightsBuffer()
self.line_rendering = Line.LineRendering()
def compile_material_from_source(self, source, include_paths=[]):
source = _NPR_Pipeline_Common + source
return {
'PRE_PASS' : self.compile_shader_from_source(
source, 'COMMON_VERTEX_SHADER', 'PRE_PASS_PIXEL_SHADER', include_paths, ['PRE_PASS']
),
'MAIN_PASS' : self.compile_shader_from_source(
source, 'COMMON_VERTEX_SHADER', 'MAIN_PASS_PIXEL_SHADER', include_paths, ['MAIN_PASS']
)
}
def setup_render_targets(self, resolution):
self.t_depth = Texture(resolution, GL_DEPTH_COMPONENT32F)
self.t_prepass_normal_depth = Texture(resolution, GL_RGBA32F)
self.t_prepass_id = Texture(resolution, GL_R32F)
self.fbo_prepass = RenderTarget([self.t_prepass_normal_depth, self.t_prepass_id], self.t_depth)
self.t_main_color = Texture(resolution, GL_RGB32F)
self.t_line_color = Texture(resolution, GL_RGB32F)
self.t_line_data = Texture(resolution, GL_RGB32F)
self.fbo_main = RenderTarget([self.t_main_color, self.t_line_color, self.t_line_data], self.t_depth)
self.t_color_accumulate = Texture(resolution, GL_RGB32F)
self.fbo_accumulate = RenderTarget([self.t_color_accumulate])
self.t_composite_depth = Texture(resolution, GL_R32F)
self.fbo_composite_depth = RenderTarget([self.t_composite_depth], self.t_depth)
def get_samples(self):
return Sampling.get_RGSS_samples(self.sampling_grid_size)
def do_render(self, resolution, scene, is_final_render, is_new_frame):
#SETUP SAMPLING
if is_final_render:
self.sampling_grid_size = scene.parameters['Render Samples'][0]
else:
self.sampling_grid_size = scene.parameters['Preview Samples'][0]
sample_offset = self.get_samples()[self.sample_count]
#SETUP UNIFORM BLOCKS
self.common_buffer.load(scene, resolution, sample_offset, self.sample_count)
self.lights_buffer.load(scene)
UBOS = {
'COMMON_UNIFORMS' : self.common_buffer,
'SCENE_LIGHTS' : self.lights_buffer
}
#SETUP PER-OBJECT PARAMETERS
for i, obj in enumerate(scene.objects):
obj.parameters['MODEL'] = obj.matrix
obj.parameters['ID'] = i+1
#PRE-PASS
self.fbo_prepass.clear([(0,0,1,1), 0], 1, 0)
self.draw_scene_pass(self.fbo_prepass, scene.objects, 'PRE_PASS', self.default_shader['PRE_PASS'], UBOS)
#MAIN-PASS
textures = {
'IN_NORMAL_DEPTH': self.t_prepass_normal_depth,
'IN_ID': self.t_prepass_id,
}
self.fbo_main.clear([scene.world_parameters['Background Color'], (0,0,0,0), (-1,-1,-1,-1)])
self.draw_scene_pass(self.fbo_main, scene.objects, 'MAIN_PASS', self.default_shader['MAIN_PASS'], UBOS, {}, textures)
composited_line = self.line_rendering.composite_line(
scene.parameters['Line Width Max'], self, self.common_buffer,
self.t_main_color, self.t_depth, self.t_prepass_id, self.t_line_color, self.t_line_data)
# TEMPORAL SUPER-SAMPLING ACCUMULATION
# TODO: Should accumulate in display space ???
# https://therealmjp.github.io/posts/msaa-overview/#working-with-hdr-and-tone-mapping
self.blend_texture(composited_line, self.fbo_accumulate, 1.0 / (self.sample_count + 1))
#COMPOSITE DEPTH
if is_final_render:
self.fbo_composite_depth.clear((10.0e+32,1.0,1.0,1.0))
self.draw_scene_pass(self.fbo_composite_depth, scene.objects, None, self.composite_depth_shader, UBOS)
return {
'COLOR' : self.t_color_accumulate,
'DEPTH' : self.t_composite_depth,
} | 0.429908 | 0.16975 |
from typing import Optional
from aviral_api.base import api_caller
from aviral_api import urls, exceptions
class AviralAPI(api_caller):
def login(self, username : str, password : str) -> dict:
username = username.lower()
response = self._post_call(urls.aviral_login_url, {"username": username, "password": password})
if response["user_group"] == None:
raise exceptions.WrongLoginError("Sahi details daal le bhai")
return response
def get_subjectwise_marks(self, user: dict, session : str = None) -> dict:
token = user.get("jwt_token")
if session is None:
session = user.get("session_id")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
if session is None:
raise exceptions.InvalidSessionError("No session specified")
response = self._get_call(urls.aviral_marks_url, {"Authorization" : token, "session" : session})
return response
def get_semesterwise_marks(self, user: dict) -> dict:
token = user.get("jwt_token")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
response = self._get_call(urls.aviral_semester_result_url, {"Authorization" : token})
return response
def get_userdata(self, user: dict, session : str = None) -> dict:
token = user.get("jwt_token")
if session is None:
session = user.get("session_id")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
if session is None:
raise exceptions.InvalidSessionError("No session specified")
response = self._get_call(urls.aviral_user_details_url, {"Authorization" : token, "session" : session})
return response
def get_sessions(self) -> dict:
response = self._get_call(urls.aviral_sessions_url)
return response
def get_mtech_specialization(self, user : dict) -> dict:
token = user.get("jwt_token")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
response = self._get_call(urls.aviral_mtech_specialization_url, {"Authorization" : token})
return response | aviral_api/api.py | from typing import Optional
from aviral_api.base import api_caller
from aviral_api import urls, exceptions
class AviralAPI(api_caller):
def login(self, username : str, password : str) -> dict:
username = username.lower()
response = self._post_call(urls.aviral_login_url, {"username": username, "password": password})
if response["user_group"] == None:
raise exceptions.WrongLoginError("Sahi details daal le bhai")
return response
def get_subjectwise_marks(self, user: dict, session : str = None) -> dict:
token = user.get("jwt_token")
if session is None:
session = user.get("session_id")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
if session is None:
raise exceptions.InvalidSessionError("No session specified")
response = self._get_call(urls.aviral_marks_url, {"Authorization" : token, "session" : session})
return response
def get_semesterwise_marks(self, user: dict) -> dict:
token = user.get("jwt_token")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
response = self._get_call(urls.aviral_semester_result_url, {"Authorization" : token})
return response
def get_userdata(self, user: dict, session : str = None) -> dict:
token = user.get("jwt_token")
if session is None:
session = user.get("session_id")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
if session is None:
raise exceptions.InvalidSessionError("No session specified")
response = self._get_call(urls.aviral_user_details_url, {"Authorization" : token, "session" : session})
return response
def get_sessions(self) -> dict:
response = self._get_call(urls.aviral_sessions_url)
return response
def get_mtech_specialization(self, user : dict) -> dict:
token = user.get("jwt_token")
if token is None:
raise exceptions.UserNotLoggedInError("Invalid Token, Try to login first")
response = self._get_call(urls.aviral_mtech_specialization_url, {"Authorization" : token})
return response | 0.755457 | 0.077622 |
DEBUG = True
LOG = False
import itertools
from collections import OrderedDict
numReads = 1618
def loadReads():
reads = []
for _ in range(numReads):
reads.append(input())
return reads
class Node:
def __init__ (self, label, id, low, up, chain_len):
self.label = label
self.id = id
self.Lbound = low
self.Ubound = up
self.chain_len = chain_len
self.branch = dict()
self.remain = None
class ReadNode:
def __init__ (self, id, read):
self.id = id
self.read = read
self.visited = False
self.overlaps = []
def addOverlap(self, size, id):
overlap = Overlap(size, id)
if len(self.overlaps) == 0:
self.overlaps.append(overlap)
else:
for i, over in enumerate(self.overlaps):
if overlap.size > over.size:
self.overlaps.insert(i, overlap)
break
self.overlaps.append(overlap)
def findOverlap(self, id):
for over in self.overlaps:
if over.id == id: return over.size
return None
class Overlap:
def __init__(self, size, id):
self.size = size
self.id = id
def FindPSPairs(G, PT):
for S in range(0, len(G)):
charPointer = 1
while charPointer < len(G[S])-1:
v = charPointer
currentNode = PT
path_len = 0
local_position = 1
while True:
if v == len(G[S])-1:
for i in range(currentNode.Lbound, currentNode.Ubound + 1):
if GR[i-1][:path_len+1] == G[S][charPointer:]:
ReadMap[S].addOverlap(len(G[S]) - charPointer, i-1)
break
if currentNode.chain_len >= local_position:
g1 = G[S][v]
g2 = G[currentNode.Lbound-1][local_position-1]
if g1 == g2:
local_position +=1
path_len +=1
v += 1
else:
break
else:
g1 = G[S][v]
if g1 in currentNode.branch.keys():
currentNode = currentNode.branch[g1]
local_position +=1
path_len +=1
v += 1
else:
break
charPointer += 1
def PrefixTree(G):
root = Node("root",-1, 1, len(G), 0)
for S in range(0, len(G)):
currentNode = root
local_position = 1
path_len = 0
charPointer = 0
c = G[S][charPointer]
while charPointer < len(G[S])-1:
if currentNode.chain_len >= local_position:
if c == currentNode.remain[local_position-1]:
local_position += 1
path_len += 1
charPointer += 1
c = G[S][charPointer]
else:
v2 =Node(currentNode.remain[local_position-1], currentNode.id, currentNode.Lbound, currentNode.Ubound - 1, currentNode.chain_len - local_position)
if v2.chain_len == 0:
v2.remain = None
else:
v2.remain = currentNode.remain[local_position:local_position + v2.chain_len]
v2.branch = currentNode.branch.copy()
currentNode.branch[v2.label] = v2
path_len +=1
v3 = Node(c,S, S+1, S+1, len(G[S]) - path_len)
if path_len >= len(G[S]):
v3.remain = None
else:
v3.remain = G[S][path_len:]
currentNode.branch[v3.label] = v3
currentNode.chain_len = local_position - 1
if currentNode.chain_len == 0:
currentNode.remain = None
else:
currentNode.remain = currentNode.remain[:currentNode.chain_len]
break
else:
if c in currentNode.branch.keys():
currentNode = currentNode.branch[c]
local_position = 1
path_len += 1
charPointer += 1
c = G[S][charPointer]
currentNode.Ubound = S + 1
else:
path_len += 1
chain_length = len(G[S]) - path_len
branch = Node(c, S, S+1, S+1, chain_length)
currentNode.branch[c] = branch
if branch.chain_len == 0:
branch.remain = None
else:
branch.remain = G[S][path_len:]
break
return root
def LSP(S):
n = len(S)
lps = [0] * n
L = 0
i = 1
while (i < n):
if S[i] == S[L]:
L +=1
lps[i] = L
i += 1
else:
if L != 0:
L = lps[L-1]
else:
lps[i] = 0
i += 1
global result
result = lps[n-1]
if result > n/2:
return n//2
else:
return result
def GreedyPath(ReadMap):
text = ""
idx = 0
pre = 0
while True:
node = ReadMap[idx]
if LOG: print(idx, "read", node.read)
overlap = node.overlaps[0]
if LOG: print("..overlap to", overlap.id, "weight", overlap.size)
text += node.read[pre:]
if LOG: print("text so far", text)
pre = overlap.size
idx = overlap.id
if idx == 0: break
lsp_size = LSP(text)
return text[lsp_size:]
if DEBUG: text = open("tests/110", "r", encoding="utf8")
if DEBUG:
GR = [line.strip() for line in text.readlines()]
GR = sorted(GR)
else:
GR = loadReads()
GR = list(OrderedDict.fromkeys(GR))
ReadMap = [ReadNode(i, read) for i, read in enumerate(GR)]
PT = PrefixTree(GR)
FindPSPairs(GR, PT)
solution = GreedyPath(ReadMap)
print(solution) | Capstone/Week1/Assemble Week1 Working v1.1.py | DEBUG = True
LOG = False
import itertools
from collections import OrderedDict
numReads = 1618
def loadReads():
reads = []
for _ in range(numReads):
reads.append(input())
return reads
class Node:
def __init__ (self, label, id, low, up, chain_len):
self.label = label
self.id = id
self.Lbound = low
self.Ubound = up
self.chain_len = chain_len
self.branch = dict()
self.remain = None
class ReadNode:
def __init__ (self, id, read):
self.id = id
self.read = read
self.visited = False
self.overlaps = []
def addOverlap(self, size, id):
overlap = Overlap(size, id)
if len(self.overlaps) == 0:
self.overlaps.append(overlap)
else:
for i, over in enumerate(self.overlaps):
if overlap.size > over.size:
self.overlaps.insert(i, overlap)
break
self.overlaps.append(overlap)
def findOverlap(self, id):
for over in self.overlaps:
if over.id == id: return over.size
return None
class Overlap:
def __init__(self, size, id):
self.size = size
self.id = id
def FindPSPairs(G, PT):
for S in range(0, len(G)):
charPointer = 1
while charPointer < len(G[S])-1:
v = charPointer
currentNode = PT
path_len = 0
local_position = 1
while True:
if v == len(G[S])-1:
for i in range(currentNode.Lbound, currentNode.Ubound + 1):
if GR[i-1][:path_len+1] == G[S][charPointer:]:
ReadMap[S].addOverlap(len(G[S]) - charPointer, i-1)
break
if currentNode.chain_len >= local_position:
g1 = G[S][v]
g2 = G[currentNode.Lbound-1][local_position-1]
if g1 == g2:
local_position +=1
path_len +=1
v += 1
else:
break
else:
g1 = G[S][v]
if g1 in currentNode.branch.keys():
currentNode = currentNode.branch[g1]
local_position +=1
path_len +=1
v += 1
else:
break
charPointer += 1
def PrefixTree(G):
root = Node("root",-1, 1, len(G), 0)
for S in range(0, len(G)):
currentNode = root
local_position = 1
path_len = 0
charPointer = 0
c = G[S][charPointer]
while charPointer < len(G[S])-1:
if currentNode.chain_len >= local_position:
if c == currentNode.remain[local_position-1]:
local_position += 1
path_len += 1
charPointer += 1
c = G[S][charPointer]
else:
v2 =Node(currentNode.remain[local_position-1], currentNode.id, currentNode.Lbound, currentNode.Ubound - 1, currentNode.chain_len - local_position)
if v2.chain_len == 0:
v2.remain = None
else:
v2.remain = currentNode.remain[local_position:local_position + v2.chain_len]
v2.branch = currentNode.branch.copy()
currentNode.branch[v2.label] = v2
path_len +=1
v3 = Node(c,S, S+1, S+1, len(G[S]) - path_len)
if path_len >= len(G[S]):
v3.remain = None
else:
v3.remain = G[S][path_len:]
currentNode.branch[v3.label] = v3
currentNode.chain_len = local_position - 1
if currentNode.chain_len == 0:
currentNode.remain = None
else:
currentNode.remain = currentNode.remain[:currentNode.chain_len]
break
else:
if c in currentNode.branch.keys():
currentNode = currentNode.branch[c]
local_position = 1
path_len += 1
charPointer += 1
c = G[S][charPointer]
currentNode.Ubound = S + 1
else:
path_len += 1
chain_length = len(G[S]) - path_len
branch = Node(c, S, S+1, S+1, chain_length)
currentNode.branch[c] = branch
if branch.chain_len == 0:
branch.remain = None
else:
branch.remain = G[S][path_len:]
break
return root
def LSP(S):
n = len(S)
lps = [0] * n
L = 0
i = 1
while (i < n):
if S[i] == S[L]:
L +=1
lps[i] = L
i += 1
else:
if L != 0:
L = lps[L-1]
else:
lps[i] = 0
i += 1
global result
result = lps[n-1]
if result > n/2:
return n//2
else:
return result
def GreedyPath(ReadMap):
text = ""
idx = 0
pre = 0
while True:
node = ReadMap[idx]
if LOG: print(idx, "read", node.read)
overlap = node.overlaps[0]
if LOG: print("..overlap to", overlap.id, "weight", overlap.size)
text += node.read[pre:]
if LOG: print("text so far", text)
pre = overlap.size
idx = overlap.id
if idx == 0: break
lsp_size = LSP(text)
return text[lsp_size:]
if DEBUG: text = open("tests/110", "r", encoding="utf8")
if DEBUG:
GR = [line.strip() for line in text.readlines()]
GR = sorted(GR)
else:
GR = loadReads()
GR = list(OrderedDict.fromkeys(GR))
ReadMap = [ReadNode(i, read) for i, read in enumerate(GR)]
PT = PrefixTree(GR)
FindPSPairs(GR, PT)
solution = GreedyPath(ReadMap)
print(solution) | 0.204104 | 0.192369 |
from django.db import migrations, models
import quantityfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="BigIntFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ChoicesDefinedInModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"weight",
quantityfield.fields.QuantityField(
base_units="kilogram", unit_choices=["milligram", "pounds"]
),
),
],
),
migrations.CreateModel(
name="ChoicesDefinedInModelInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="kilogram", unit_choices=["milligram", "pounds"]
),
),
],
),
migrations.CreateModel(
name="CustomUregDecimalHayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom_decimal",
quantityfield.fields.DecimalQuantityField(
base_units="custom",
decimal_places=2,
max_digits=10,
unit_choices=["custom"],
),
),
],
),
migrations.CreateModel(
name="CustomUregHayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom",
quantityfield.fields.QuantityField(
base_units="custom", unit_choices=["custom"]
),
),
(
"custom_int",
quantityfield.fields.IntegerQuantityField(
base_units="custom", unit_choices=["custom"]
),
),
(
"custom_bigint",
quantityfield.fields.BigIntegerQuantityField(
base_units="custom", unit_choices=["custom"]
),
),
],
),
migrations.CreateModel(
name="DecimalFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.DecimalQuantityField(
base_units="gram",
decimal_places=2,
max_digits=10,
unit_choices=["gram"],
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="EmptyHayBaleBigInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleDecimal",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.DecimalQuantityField(
base_units="gram",
decimal_places=2,
max_digits=10,
null=True,
unit_choices=["gram"],
),
),
(
"compare",
models.DecimalField(decimal_places=2, max_digits=10, null=True),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleFloat",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBalePositiveInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.PositiveIntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="FloatFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="HayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", unit_choices=["gram"]
),
),
(
"weight_int",
quantityfield.fields.IntegerQuantityField(
base_units="gram", blank=True, null=True, unit_choices=["gram"]
),
),
(
"weight_bigint",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", blank=True, null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="IntFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
] | tests/dummyapp/migrations/0001_initial.py |
from django.db import migrations, models
import quantityfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="BigIntFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ChoicesDefinedInModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"weight",
quantityfield.fields.QuantityField(
base_units="kilogram", unit_choices=["milligram", "pounds"]
),
),
],
),
migrations.CreateModel(
name="ChoicesDefinedInModelInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="kilogram", unit_choices=["milligram", "pounds"]
),
),
],
),
migrations.CreateModel(
name="CustomUregDecimalHayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom_decimal",
quantityfield.fields.DecimalQuantityField(
base_units="custom",
decimal_places=2,
max_digits=10,
unit_choices=["custom"],
),
),
],
),
migrations.CreateModel(
name="CustomUregHayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"custom",
quantityfield.fields.QuantityField(
base_units="custom", unit_choices=["custom"]
),
),
(
"custom_int",
quantityfield.fields.IntegerQuantityField(
base_units="custom", unit_choices=["custom"]
),
),
(
"custom_bigint",
quantityfield.fields.BigIntegerQuantityField(
base_units="custom", unit_choices=["custom"]
),
),
],
),
migrations.CreateModel(
name="DecimalFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.DecimalQuantityField(
base_units="gram",
decimal_places=2,
max_digits=10,
unit_choices=["gram"],
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="EmptyHayBaleBigInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleDecimal",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.DecimalQuantityField(
base_units="gram",
decimal_places=2,
max_digits=10,
null=True,
unit_choices=["gram"],
),
),
(
"compare",
models.DecimalField(decimal_places=2, max_digits=10, null=True),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleFloat",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBaleInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="EmptyHayBalePositiveInt",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.PositiveIntegerQuantityField(
base_units="gram", null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="FloatFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="HayBale",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.QuantityField(
base_units="gram", unit_choices=["gram"]
),
),
(
"weight_int",
quantityfield.fields.IntegerQuantityField(
base_units="gram", blank=True, null=True, unit_choices=["gram"]
),
),
(
"weight_bigint",
quantityfield.fields.BigIntegerQuantityField(
base_units="gram", blank=True, null=True, unit_choices=["gram"]
),
),
],
),
migrations.CreateModel(
name="IntFieldSaveModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"weight",
quantityfield.fields.IntegerQuantityField(
base_units="gram", unit_choices=["gram"]
),
),
],
options={
"abstract": False,
},
),
] | 0.650245 | 0.208441 |
import numpy as np
import matplotlib
matplotlib.rcParams['text.usetex'] = True # enable latex syntax in plots
import matplotlib.pyplot as plt
from ex1.utils import split_data, plot_ten_images, compute_ts_error
from ex1.classifiers import NearestMeanCentroid
from ex1.data_loaders import DataLoaderMNIST, DataLoaderLFW
from ex1.data_perturb import DataPerturbUniform, DataPerturbNormal
use_faces = False # True to use LFW, False for MNIST digits
if use_faces is True:
data_loader = DataLoaderLFW()
else:
data_loader = DataLoaderMNIST()
x, y = data_loader.load_data()
w = data_loader.width
h = data_loader.height
# split data into TR/TS
Xtr, ytr, Xts, yts = split_data(x, y, tr_fraction=0.6)
# train classifier on clean TR
clf = NearestMeanCentroid()
clf.fit(Xtr, ytr)
# test on clean TS
yc = clf.predict(Xts)
compute_ts_error(yc, yts) # prints test error
# perturb data
data_perturb_uniform = DataPerturbUniform()
data_perturb_normal = DataPerturbNormal()
xts_perturbed = data_perturb_uniform.perturb_data(Xts)
# test on clean TS
yc = clf.predict(xts_perturbed)
compute_ts_error(yc, yts) # prints test error
# plot unperturbed test data
titles = ['y: ' + str(i) for i in yts]
plt.figure(figsize=(10, 6))
plot_ten_images(Xts, w, h, titles)
plt.savefig('../figs/examples.pdf')
# plot the perturbed data (w/ uniform perturbation)
plt.figure(figsize=(10, 6))
plot_ten_images(xts_perturbed, w, h, titles)
plt.savefig('../figs/examples_perturbed.pdf')
param_values = np.linspace(0, 5, num=100)
test_error_uniform = np.zeros(shape=param_values.shape)
test_error_normal = np.zeros(shape=param_values.shape)
for i in range(param_values.size):
data_perturb_uniform.k = param_values[i]
data_perturb_normal.sigma = param_values[i]
xts_perturbed = data_perturb_uniform.perturb_data(Xts)
yc = clf.predict(xts_perturbed)
test_error_uniform[i] = compute_ts_error(yc, yts)
xts_perturbed = data_perturb_normal.perturb_data(Xts)
yc = clf.predict(xts_perturbed)
test_error_normal[i] = compute_ts_error(yc, yts)
# print(test_error)
plt.figure()
plt.plot(param_values, test_error_uniform, 'b', label="uniform")
plt.plot(param_values, test_error_normal, 'r', label="normal")
plt.title('Test error')
plt.xlabel(r'Perturbation size (K or $\sigma$)')
plt.legend()
plt.savefig('../figs/error_perturbed.pdf') | src/ex1/main_perturb.py | import numpy as np
import matplotlib
matplotlib.rcParams['text.usetex'] = True # enable latex syntax in plots
import matplotlib.pyplot as plt
from ex1.utils import split_data, plot_ten_images, compute_ts_error
from ex1.classifiers import NearestMeanCentroid
from ex1.data_loaders import DataLoaderMNIST, DataLoaderLFW
from ex1.data_perturb import DataPerturbUniform, DataPerturbNormal
use_faces = False # True to use LFW, False for MNIST digits
if use_faces is True:
data_loader = DataLoaderLFW()
else:
data_loader = DataLoaderMNIST()
x, y = data_loader.load_data()
w = data_loader.width
h = data_loader.height
# split data into TR/TS
Xtr, ytr, Xts, yts = split_data(x, y, tr_fraction=0.6)
# train classifier on clean TR
clf = NearestMeanCentroid()
clf.fit(Xtr, ytr)
# test on clean TS
yc = clf.predict(Xts)
compute_ts_error(yc, yts) # prints test error
# perturb data
data_perturb_uniform = DataPerturbUniform()
data_perturb_normal = DataPerturbNormal()
xts_perturbed = data_perturb_uniform.perturb_data(Xts)
# test on clean TS
yc = clf.predict(xts_perturbed)
compute_ts_error(yc, yts) # prints test error
# plot unperturbed test data
titles = ['y: ' + str(i) for i in yts]
plt.figure(figsize=(10, 6))
plot_ten_images(Xts, w, h, titles)
plt.savefig('../figs/examples.pdf')
# plot the perturbed data (w/ uniform perturbation)
plt.figure(figsize=(10, 6))
plot_ten_images(xts_perturbed, w, h, titles)
plt.savefig('../figs/examples_perturbed.pdf')
param_values = np.linspace(0, 5, num=100)
test_error_uniform = np.zeros(shape=param_values.shape)
test_error_normal = np.zeros(shape=param_values.shape)
for i in range(param_values.size):
data_perturb_uniform.k = param_values[i]
data_perturb_normal.sigma = param_values[i]
xts_perturbed = data_perturb_uniform.perturb_data(Xts)
yc = clf.predict(xts_perturbed)
test_error_uniform[i] = compute_ts_error(yc, yts)
xts_perturbed = data_perturb_normal.perturb_data(Xts)
yc = clf.predict(xts_perturbed)
test_error_normal[i] = compute_ts_error(yc, yts)
# print(test_error)
plt.figure()
plt.plot(param_values, test_error_uniform, 'b', label="uniform")
plt.plot(param_values, test_error_normal, 'r', label="normal")
plt.title('Test error')
plt.xlabel(r'Perturbation size (K or $\sigma$)')
plt.legend()
plt.savefig('../figs/error_perturbed.pdf') | 0.461502 | 0.737087 |
from fuzz_lightyear.request import FuzzingRequest
from fuzz_lightyear.response import ResponseSequence
from fuzz_lightyear.runner import validate_sequence
def test_basic(mock_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='basic',
operation_id='get_private_listing',
id=1,
),
],
ResponseSequence(),
)
assert responses.data['session'] == 'victim_session'
assert responses.test_results['IDORPlugin']
def test_skipped_due_to_no_inputs(mock_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='basic',
operation_id='get_no_inputs_required',
),
],
ResponseSequence(),
)
assert responses.data['session'] == 'victim_session'
assert responses.test_results == {}
def test_side_effect_unsafe(mock_api_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='sequence',
operation_id='post_create_with_side_effect',
),
FuzzingRequest(
tag='user',
operation_id='get_get_user',
),
# This goes last, to test for IDOR.
FuzzingRequest(
tag='sequence',
operation_id='get_get_with_side_effect_unsafe',
),
],
ResponseSequence(),
)
assert responses.responses[1].created_resource
assert responses.test_results['IDORPlugin']
def test_side_effect_safe(mock_api_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='sequence',
operation_id='post_create_with_side_effect',
),
FuzzingRequest(
tag='user',
operation_id='get_get_user',
),
# This goes last, to test for IDOR.
FuzzingRequest(
tag='sequence',
operation_id='get_get_with_side_effect_safe',
),
],
ResponseSequence(),
)
assert responses.responses[1].created_resource
assert not responses.test_results['IDORPlugin'] | tests/integration/plugins/idor_test.py | from fuzz_lightyear.request import FuzzingRequest
from fuzz_lightyear.response import ResponseSequence
from fuzz_lightyear.runner import validate_sequence
def test_basic(mock_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='basic',
operation_id='get_private_listing',
id=1,
),
],
ResponseSequence(),
)
assert responses.data['session'] == 'victim_session'
assert responses.test_results['IDORPlugin']
def test_skipped_due_to_no_inputs(mock_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='basic',
operation_id='get_no_inputs_required',
),
],
ResponseSequence(),
)
assert responses.data['session'] == 'victim_session'
assert responses.test_results == {}
def test_side_effect_unsafe(mock_api_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='sequence',
operation_id='post_create_with_side_effect',
),
FuzzingRequest(
tag='user',
operation_id='get_get_user',
),
# This goes last, to test for IDOR.
FuzzingRequest(
tag='sequence',
operation_id='get_get_with_side_effect_unsafe',
),
],
ResponseSequence(),
)
assert responses.responses[1].created_resource
assert responses.test_results['IDORPlugin']
def test_side_effect_safe(mock_api_client):
responses = validate_sequence(
[
FuzzingRequest(
tag='sequence',
operation_id='post_create_with_side_effect',
),
FuzzingRequest(
tag='user',
operation_id='get_get_user',
),
# This goes last, to test for IDOR.
FuzzingRequest(
tag='sequence',
operation_id='get_get_with_side_effect_safe',
),
],
ResponseSequence(),
)
assert responses.responses[1].created_resource
assert not responses.test_results['IDORPlugin'] | 0.662469 | 0.45175 |
import os
import sys
import time
import yaml
import docopt
import shutil
import pkg_resources
import traceback
import signal
from .logger import Logger
from .notifiers import create_notifier
try:
from .thingpin import Thingpin
except ImportError:
Thingpin = None
def main():
args = docopt.docopt(__doc__)
if args['create-config']:
sample = pkg_resources.resource_filename('thingpin',
'thingpin-config.yml.sample')
config_file = 'thingpin-config.yml'
if os.path.exists(config_file):
print('config file {} already exists, not overwriting'.format(
config_file))
return 2
else:
shutil.copyfile(sample, config_file)
print('created config file: {}'.format(config_file))
return
config_file = os.path.expanduser(args['--config'])
with open(config_file) as f:
config = yaml.load(f)
if args['install-service']:
print('** coming soon - watch this space **')
return
log = get_logger(args)
if Thingpin is None:
log.error('must run on Raspberry Pi')
return 1
# TODO: support more than one
notifier_config = config['notifiers'].items()[0]
notifier = create_notifier(notifier_config[0], notifier_config[1])
service = Thingpin(notifier,
pin_mode=config['pin_mode'],
things=config['things'],
debug=config.get('debug', False))
pidfile = args.get('--pidfile')
if pidfile is not None:
with open(os.path.expanduser(pidfile), "w") as f:
f.write(str(os.getpid()))
try:
service.run()
except KeyboardInterrupt:
log.info('exiting on Ctrl-C...')
service.cleanup()
return
def get_logger(args):
log_file = args.get('--log')
if log_file is None and args.get('--pidfile'):
log_file = '/var/log/thingpin.log'
return Logger(log_file=log_file)
if __name__ == '__main__':
sys.exit(main()) | src/thingpin/main.py | import os
import sys
import time
import yaml
import docopt
import shutil
import pkg_resources
import traceback
import signal
from .logger import Logger
from .notifiers import create_notifier
try:
from .thingpin import Thingpin
except ImportError:
Thingpin = None
def main():
args = docopt.docopt(__doc__)
if args['create-config']:
sample = pkg_resources.resource_filename('thingpin',
'thingpin-config.yml.sample')
config_file = 'thingpin-config.yml'
if os.path.exists(config_file):
print('config file {} already exists, not overwriting'.format(
config_file))
return 2
else:
shutil.copyfile(sample, config_file)
print('created config file: {}'.format(config_file))
return
config_file = os.path.expanduser(args['--config'])
with open(config_file) as f:
config = yaml.load(f)
if args['install-service']:
print('** coming soon - watch this space **')
return
log = get_logger(args)
if Thingpin is None:
log.error('must run on Raspberry Pi')
return 1
# TODO: support more than one
notifier_config = config['notifiers'].items()[0]
notifier = create_notifier(notifier_config[0], notifier_config[1])
service = Thingpin(notifier,
pin_mode=config['pin_mode'],
things=config['things'],
debug=config.get('debug', False))
pidfile = args.get('--pidfile')
if pidfile is not None:
with open(os.path.expanduser(pidfile), "w") as f:
f.write(str(os.getpid()))
try:
service.run()
except KeyboardInterrupt:
log.info('exiting on Ctrl-C...')
service.cleanup()
return
def get_logger(args):
log_file = args.get('--log')
if log_file is None and args.get('--pidfile'):
log_file = '/var/log/thingpin.log'
return Logger(log_file=log_file)
if __name__ == '__main__':
sys.exit(main()) | 0.106261 | 0.052062 |
import platform
import re
import dateutil
from teamcity.config import TEAMCITY_API
class Utils(object):
"""Utility functionalities for teamcity
:param object: default inherited features
:type object: obj
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def python_version(cls, version_type="major"):
"""Get version of the python used
:param version_type: version type, defaults to "major"
:param version_type: str, optional
:return: version number
:rtype: str
"""
version = platform.python_version_tuple()
return version[0] if version_type == "major" else version[1]
@classmethod
def api_url(cls, url):
"""Get API URL
:param url: URL to be checked if proper in format
:type url: str
:return: Stripped URL
:rtype: str
"""
if not url.endswith(TEAMCITY_API):
fmt = None
if url.endswith(r"/"):
fmt = "{url}{api}"
else:
fmt = "{url}/{api}"
url = fmt.format(url=url, api=TEAMCITY_API)
return url
@classmethod
def strip_trailing_slash(cls, url):
"""Strip trailing slash from the url
:param url: URL to be checked for slash at the end
:type url: str
:return: Stripped URL
:rtype: str
"""
while url.endswith('/'):
url = url[:-1]
return url
@classmethod
def parse_date_str(cls, date_str):
"""Parse string to date
:param date_str: date in string
:type date_str: str
:return: date object
:rtype: obj
"""
return dateutil.parser.parse(date_str)
@classmethod
def cleanhtml(cls, raw_html):
"""clean html from response and retrieve text message
:param raw_html: raw html string
:type raw_html: str
:return: clean message string
:rtype: str
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
if __name__=="__main__":
print(Utils.python_version("minor")) | teamcity/utils.py | import platform
import re
import dateutil
from teamcity.config import TEAMCITY_API
class Utils(object):
"""Utility functionalities for teamcity
:param object: default inherited features
:type object: obj
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def python_version(cls, version_type="major"):
"""Get version of the python used
:param version_type: version type, defaults to "major"
:param version_type: str, optional
:return: version number
:rtype: str
"""
version = platform.python_version_tuple()
return version[0] if version_type == "major" else version[1]
@classmethod
def api_url(cls, url):
"""Get API URL
:param url: URL to be checked if proper in format
:type url: str
:return: Stripped URL
:rtype: str
"""
if not url.endswith(TEAMCITY_API):
fmt = None
if url.endswith(r"/"):
fmt = "{url}{api}"
else:
fmt = "{url}/{api}"
url = fmt.format(url=url, api=TEAMCITY_API)
return url
@classmethod
def strip_trailing_slash(cls, url):
"""Strip trailing slash from the url
:param url: URL to be checked for slash at the end
:type url: str
:return: Stripped URL
:rtype: str
"""
while url.endswith('/'):
url = url[:-1]
return url
@classmethod
def parse_date_str(cls, date_str):
"""Parse string to date
:param date_str: date in string
:type date_str: str
:return: date object
:rtype: obj
"""
return dateutil.parser.parse(date_str)
@classmethod
def cleanhtml(cls, raw_html):
"""clean html from response and retrieve text message
:param raw_html: raw html string
:type raw_html: str
:return: clean message string
:rtype: str
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
if __name__=="__main__":
print(Utils.python_version("minor")) | 0.64232 | 0.086016 |
import os
import sys
parent_dirname = '/'.join(os.path.dirname(__file__).split('/')[:-2])
sys.path.append(parent_dirname)
sys.path.append(parent_dirname + '/cloud_transformers')
from cloud_transformers.layers.multihead_ct_adain import MultiHeadUnionAdaIn, forward_style
from cloud_transformers.layers.utils import AdaIn1dUpd
import torch.nn.functional as F
from torch import nn
def sum_hack(lists):
start = lists[0]
for i in range(1, len(lists)):
start += lists[i]
return start
class ParamEncoder(nn.Module):
def __init__(self, input_dim, encoder_depth, output_dim, bn):
'''
`input_dim`: dimensionality of an outfit code (defaults to 8 in our experiments)
'''
super(ParamEncoder, self).__init__()
self.dims = [input_dim, 256, 512, 512, 512, 512, 512, 512, 512, 512] # 10 layers at most
layers = []
if encoder_depth == 1:
layers.append(nn.Linear(input_dim, output_dim)) # mapping
else:
i = 0
while (i < min(encoder_depth - 1, len(self.dims) - 1)):
layers.append(nn.Linear(self.dims[i], self.dims[i + 1]))
if bn:
layers.append(nn.BatchNorm1d(self.dims[i + 1]))
last_dim = self.dims[i + 1]
i += 1
layers.append(nn.Linear(last_dim, output_dim)) # mapping
if bn:
layers.append(nn.BatchNorm1d(output_dim))
self.mlps = nn.ModuleList(layers)
def forward(self, x):
for l in self.mlps:
x = F.relu(l(x), inplace=True)
return x
class DrapingNetwork(nn.Module):
def __init__(self, encoder_input_dim, encoder_depth, num_latent=512, encoder_bn=True, ct_heads=None,
ct_feats_dims=None):
super().__init__()
self.model_dim = 512
self.encoder = nn.Sequential(ParamEncoder(
input_dim=encoder_input_dim, encoder_depth=encoder_depth,
output_dim=num_latent, bn=encoder_bn))
self.start = nn.Sequential(nn.Conv1d(in_channels=3,
out_channels=self.model_dim, kernel_size=1, bias=False),
AdaIn1dUpd(self.model_dim, num_latent=num_latent),
nn.ReLU(True))
features_dims_vals = [(4, 4), (4 * 4, 4 * 4), (4 * 4, 4 * 8)] if ct_feats_dims is None else ct_feats_dims
heads_vals = [(16, 16), (16, 16), (16, 16)] if ct_heads is None else ct_heads
self.attentions_decoder = nn.ModuleList(sum_hack([[MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[0],
heads=heads_vals[0],
tensor_sizes=[128, 32],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True),
MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[1],
heads=heads_vals[1],
tensor_sizes=[64, 16],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True),
MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[2],
heads=heads_vals[2],
tensor_sizes=[16, 8],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True)] for _ in range(4)]))
self.final = nn.Sequential(nn.Conv1d(in_channels=self.model_dim,
out_channels=self.model_dim, kernel_size=1, bias=False),
AdaIn1dUpd(self.model_dim, num_latent=num_latent),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=self.model_dim,
out_channels=3, kernel_size=1))
def forward(self, noise, input, return_lattice=False):
'''
`input` (torch.FloatTensor): tensor of outfit codes of shape (B, encoder_input_dim)
`noise` (torch.FloatTensor): tensor of point clouds of shape (B, 3, num_points)
'''
z = self.encoder(input)
x = forward_style(self.start, noise, z)
lattices_sizes = []
for i in range(len(self.attentions_decoder)):
x, lattice_size = self.attentions_decoder[i](x, z, noise)
lattices_sizes += lattice_size
x = forward_style(self.final, x, z)
return x.unsqueeze(2), lattices_sizes | src/models/draping_network.py | import os
import sys
parent_dirname = '/'.join(os.path.dirname(__file__).split('/')[:-2])
sys.path.append(parent_dirname)
sys.path.append(parent_dirname + '/cloud_transformers')
from cloud_transformers.layers.multihead_ct_adain import MultiHeadUnionAdaIn, forward_style
from cloud_transformers.layers.utils import AdaIn1dUpd
import torch.nn.functional as F
from torch import nn
def sum_hack(lists):
start = lists[0]
for i in range(1, len(lists)):
start += lists[i]
return start
class ParamEncoder(nn.Module):
def __init__(self, input_dim, encoder_depth, output_dim, bn):
'''
`input_dim`: dimensionality of an outfit code (defaults to 8 in our experiments)
'''
super(ParamEncoder, self).__init__()
self.dims = [input_dim, 256, 512, 512, 512, 512, 512, 512, 512, 512] # 10 layers at most
layers = []
if encoder_depth == 1:
layers.append(nn.Linear(input_dim, output_dim)) # mapping
else:
i = 0
while (i < min(encoder_depth - 1, len(self.dims) - 1)):
layers.append(nn.Linear(self.dims[i], self.dims[i + 1]))
if bn:
layers.append(nn.BatchNorm1d(self.dims[i + 1]))
last_dim = self.dims[i + 1]
i += 1
layers.append(nn.Linear(last_dim, output_dim)) # mapping
if bn:
layers.append(nn.BatchNorm1d(output_dim))
self.mlps = nn.ModuleList(layers)
def forward(self, x):
for l in self.mlps:
x = F.relu(l(x), inplace=True)
return x
class DrapingNetwork(nn.Module):
def __init__(self, encoder_input_dim, encoder_depth, num_latent=512, encoder_bn=True, ct_heads=None,
ct_feats_dims=None):
super().__init__()
self.model_dim = 512
self.encoder = nn.Sequential(ParamEncoder(
input_dim=encoder_input_dim, encoder_depth=encoder_depth,
output_dim=num_latent, bn=encoder_bn))
self.start = nn.Sequential(nn.Conv1d(in_channels=3,
out_channels=self.model_dim, kernel_size=1, bias=False),
AdaIn1dUpd(self.model_dim, num_latent=num_latent),
nn.ReLU(True))
features_dims_vals = [(4, 4), (4 * 4, 4 * 4), (4 * 4, 4 * 8)] if ct_feats_dims is None else ct_feats_dims
heads_vals = [(16, 16), (16, 16), (16, 16)] if ct_heads is None else ct_heads
self.attentions_decoder = nn.ModuleList(sum_hack([[MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[0],
heads=heads_vals[0],
tensor_sizes=[128, 32],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True),
MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[1],
heads=heads_vals[1],
tensor_sizes=[64, 16],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True),
MultiHeadUnionAdaIn(model_dim=self.model_dim,
features_dims=features_dims_vals[2],
heads=heads_vals[2],
tensor_sizes=[16, 8],
model_dim_out=self.model_dim,
n_latent=num_latent,
tensor_dims=[2, 3],
scales=True)] for _ in range(4)]))
self.final = nn.Sequential(nn.Conv1d(in_channels=self.model_dim,
out_channels=self.model_dim, kernel_size=1, bias=False),
AdaIn1dUpd(self.model_dim, num_latent=num_latent),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=self.model_dim,
out_channels=3, kernel_size=1))
def forward(self, noise, input, return_lattice=False):
'''
`input` (torch.FloatTensor): tensor of outfit codes of shape (B, encoder_input_dim)
`noise` (torch.FloatTensor): tensor of point clouds of shape (B, 3, num_points)
'''
z = self.encoder(input)
x = forward_style(self.start, noise, z)
lattices_sizes = []
for i in range(len(self.attentions_decoder)):
x, lattice_size = self.attentions_decoder[i](x, z, noise)
lattices_sizes += lattice_size
x = forward_style(self.final, x, z)
return x.unsqueeze(2), lattices_sizes | 0.732879 | 0.382776 |
################################################################################
## Form generated from reading UI file 'main_ui.ui'
##
## Created by: Qt User Interface Compiler version 5.14.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1329, 851)
MainWindow.setMinimumSize(QSize(1329, 851))
MainWindow.setMaximumSize(QSize(1329, 851))
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.urlInfo = QLabel(self.centralwidget)
self.urlInfo.setObjectName(u"urlInfo")
self.urlInfo.setGeometry(QRect(430, 10, 481, 16))
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tabWidget.setGeometry(QRect(11, 40, 1310, 781))
self.tabWidget.setMaximumSize(QSize(4564665, 781))
self.aniSearchTab = QWidget()
self.aniSearchTab.setObjectName(u"aniSearchTab")
self.aniSearchBox = QLineEdit(self.aniSearchTab)
self.aniSearchBox.setObjectName(u"aniSearchBox")
self.aniSearchBox.setGeometry(QRect(340, 31, 821, 31))
font = QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.aniSearchBox.setFont(font)
self.aniSearchBtn = QPushButton(self.aniSearchTab)
self.aniSearchBtn.setObjectName(u"aniSearchBtn")
self.aniSearchBtn.setGeometry(QRect(1180, 31, 111, 31))
self.setSiteFrame = QFrame(self.aniSearchTab)
self.setSiteFrame.setObjectName(u"setSiteFrame")
self.setSiteFrame.setGeometry(QRect(10, 51, 301, 61))
font1 = QFont()
font1.setBold(False)
font1.setWeight(50)
self.setSiteFrame.setFont(font1)
self.setSiteFrame.setAutoFillBackground(False)
self.setSiteFrame.setStyleSheet(u"")
self.setSiteFrame.setFrameShape(QFrame.Box)
self.setSiteFrame.setFrameShadow(QFrame.Raised)
self.setSiteFrame.setLineWidth(1)
self.setSiteYaAni24rBtn = QRadioButton(self.setSiteFrame)
self.setSiteBtnGroup = QButtonGroup(MainWindow)
self.setSiteBtnGroup.setObjectName(u"setSiteBtnGroup")
self.setSiteBtnGroup.addButton(self.setSiteYaAni24rBtn)
self.setSiteYaAni24rBtn.setObjectName(u"setSiteYaAni24rBtn")
self.setSiteYaAni24rBtn.setGeometry(QRect(160, 20, 91, 19))
font2 = QFont()
font2.setFamily(u"Arial")
font2.setBold(True)
font2.setWeight(75)
self.setSiteYaAni24rBtn.setFont(font2)
self.setSiteAni24rBtn = QRadioButton(self.setSiteFrame)
self.setSiteBtnGroup.addButton(self.setSiteAni24rBtn)
self.setSiteAni24rBtn.setObjectName(u"setSiteAni24rBtn")
self.setSiteAni24rBtn.setGeometry(QRect(19, 20, 91, 19))
font3 = QFont()
font3.setFamily(u"Arial")
font3.setPointSize(10)
font3.setBold(True)
font3.setWeight(75)
self.setSiteAni24rBtn.setFont(font3)
self.setWayFrame = QFrame(self.aniSearchTab)
self.setWayFrame.setObjectName(u"setWayFrame")
self.setWayFrame.setGeometry(QRect(10, 156, 301, 171))
self.setWayFrame.setFrameShape(QFrame.Box)
self.searchKeyWordRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup = QButtonGroup(MainWindow)
self.setSearchWayBtnGroup.setObjectName(u"setSearchWayBtnGroup")
self.setSearchWayBtnGroup.addButton(self.searchKeyWordRBtn)
self.searchKeyWordRBtn.setObjectName(u"searchKeyWordRBtn")
self.searchKeyWordRBtn.setGeometry(QRect(160, 20, 81, 21))
self.searchKeyWordRBtn.setFont(font)
self.completeRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.completeRBtn)
self.completeRBtn.setObjectName(u"completeRBtn")
self.completeRBtn.setGeometry(QRect(19, 20, 61, 19))
self.completeRBtn.setFont(font3)
self.quarterTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.quarterTop20rBtn)
self.quarterTop20rBtn.setObjectName(u"quarterTop20rBtn")
self.quarterTop20rBtn.setGeometry(QRect(19, 100, 121, 19))
self.quarterTop20rBtn.setFont(font)
self.newTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.newTop20rBtn)
self.newTop20rBtn.setObjectName(u"newTop20rBtn")
self.newTop20rBtn.setGeometry(QRect(19, 60, 121, 19))
self.newTop20rBtn.setFont(font)
self.yearTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.yearTop20rBtn)
self.yearTop20rBtn.setObjectName(u"yearTop20rBtn")
self.yearTop20rBtn.setGeometry(QRect(160, 100, 121, 19))
self.yearTop20rBtn.setFont(font)
self.genreTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.genreTop20rBtn)
self.genreTop20rBtn.setObjectName(u"genreTop20rBtn")
self.genreTop20rBtn.setGeometry(QRect(160, 60, 121, 19))
self.genreTop20rBtn.setFont(font)
self.tasteRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.tasteRBtn)
self.tasteRBtn.setObjectName(u"tasteRBtn")
self.tasteRBtn.setGeometry(QRect(19, 140, 108, 19))
self.tasteRBtn.setFont(font3)
self.setTagFrame = QFrame(self.aniSearchTab)
self.setTagFrame.setObjectName(u"setTagFrame")
self.setTagFrame.setGeometry(QRect(10, 372, 301, 371))
self.setTagFrame.setFrameShape(QFrame.Box)
self.clothesComboBox = QComboBox(self.setTagFrame)
self.clothesComboBox.setObjectName(u"clothesComboBox")
self.clothesComboBox.setGeometry(QRect(118, 219, 161, 21))
self.quarterLabel = QLabel(self.setTagFrame)
self.quarterLabel.setObjectName(u"quarterLabel")
self.quarterLabel.setGeometry(QRect(17, 140, 41, 21))
self.clothesLabel = QLabel(self.setTagFrame)
self.clothesLabel.setObjectName(u"clothesLabel")
self.clothesLabel.setGeometry(QRect(17, 219, 91, 21))
self.playWayLabel = QLabel(self.setTagFrame)
self.playWayLabel.setObjectName(u"playWayLabel")
self.playWayLabel.setGeometry(QRect(17, 179, 91, 21))
self.playWayComboBox = QComboBox(self.setTagFrame)
self.playWayComboBox.setObjectName(u"playWayComboBox")
self.playWayComboBox.setGeometry(QRect(118, 179, 161, 21))
self.hairColorLabel = QLabel(self.setTagFrame)
self.hairColorLabel.setObjectName(u"hairColorLabel")
self.hairColorLabel.setGeometry(QRect(17, 299, 91, 21))
self.producerComboBox = QComboBox(self.setTagFrame)
self.producerComboBox.setObjectName(u"producerComboBox")
self.producerComboBox.setGeometry(QRect(118, 59, 161, 21))
self.hairStyleLabel = QLabel(self.setTagFrame)
self.hairStyleLabel.setObjectName(u"hairStyleLabel")
self.hairStyleLabel.setGeometry(QRect(17, 259, 91, 21))
self.hairStyleComboBox = QComboBox(self.setTagFrame)
self.hairStyleComboBox.setObjectName(u"hairStyleComboBox")
self.hairStyleComboBox.setGeometry(QRect(118, 259, 161, 21))
self.quarterComboBox = QComboBox(self.setTagFrame)
self.quarterComboBox.setObjectName(u"quarterComboBox")
self.quarterComboBox.setGeometry(QRect(118, 139, 161, 21))
self.hairColorComboBox = QComboBox(self.setTagFrame)
self.hairColorComboBox.setObjectName(u"hairColorComboBox")
self.hairColorComboBox.setGeometry(QRect(118, 299, 161, 21))
self.producerLabel = QLabel(self.setTagFrame)
self.producerLabel.setObjectName(u"producerLabel")
self.producerLabel.setGeometry(QRect(17, 60, 61, 20))
self.yearLabel = QLabel(self.setTagFrame)
self.yearLabel.setObjectName(u"yearLabel")
self.yearLabel.setGeometry(QRect(17, 100, 41, 21))
self.gerneComboBox = QComboBox(self.setTagFrame)
self.gerneComboBox.setObjectName(u"gerneComboBox")
self.gerneComboBox.setGeometry(QRect(118, 19, 161, 21))
self.yearComboBox = QComboBox(self.setTagFrame)
self.yearComboBox.setObjectName(u"yearComboBox")
self.yearComboBox.setGeometry(QRect(118, 99, 161, 21))
self.genreLabel = QLabel(self.setTagFrame)
self.genreLabel.setObjectName(u"genreLabel")
self.genreLabel.setGeometry(QRect(17, 20, 41, 21))
self.typeLabel = QLabel(self.setTagFrame)
self.typeLabel.setObjectName(u"typeLabel")
self.typeLabel.setGeometry(QRect(17, 339, 91, 21))
self.typeComboBox = QComboBox(self.setTagFrame)
self.typeComboBox.setObjectName(u"typeComboBox")
self.typeComboBox.setGeometry(QRect(118, 339, 161, 21))
self.aniSiteCheckLabel = QLabel(self.aniSearchTab)
self.aniSiteCheckLabel.setObjectName(u"aniSiteCheckLabel")
self.aniSiteCheckLabel.setGeometry(QRect(10, 31, 131, 16))
font4 = QFont()
font4.setBold(True)
font4.setWeight(75)
self.aniSiteCheckLabel.setFont(font4)
self.aniSearchWayLabel = QLabel(self.aniSearchTab)
self.aniSearchWayLabel.setObjectName(u"aniSearchWayLabel")
self.aniSearchWayLabel.setGeometry(QRect(10, 136, 111, 16))
self.aniSearchWayLabel.setFont(font4)
self.aniSelectTagLabel = QLabel(self.aniSearchTab)
self.aniSelectTagLabel.setObjectName(u"aniSelectTagLabel")
self.aniSelectTagLabel.setGeometry(QRect(10, 354, 111, 16))
font5 = QFont()
font5.setBold(True)
font5.setItalic(False)
font5.setUnderline(False)
font5.setWeight(75)
font5.setStrikeOut(False)
font5.setKerning(True)
self.aniSelectTagLabel.setFont(font5)
self.searchTable = QTableWidget(self.aniSearchTab)
self.searchTable.setObjectName(u"searchTable")
self.searchTable.setGeometry(QRect(340, 83, 951, 661))
self.tabWidget.addTab(self.aniSearchTab, "")
self.aniDownloadTab = QWidget()
self.aniDownloadTab.setObjectName(u"aniDownloadTab")
self.aniEpiLinkBox = QLineEdit(self.aniDownloadTab)
self.aniEpiLinkBox.setObjectName(u"aniEpiLinkBox")
self.aniEpiLinkBox.setGeometry(QRect(10, 20, 1151, 31))
self.aniEpiLinkBox.setFont(font)
self.aniEpiSearchBtn = QPushButton(self.aniDownloadTab)
self.aniEpiSearchBtn.setObjectName(u"aniEpiSearchBtn")
self.aniEpiSearchBtn.setGeometry(QRect(1170, 20, 121, 31))
self.aniEpiList = QListWidget(self.aniDownloadTab)
self.aniEpiList.setObjectName(u"aniEpiList")
self.aniEpiList.setGeometry(QRect(10, 100, 421, 641))
self.aniEpiListLabel = QLabel(self.aniDownloadTab)
self.aniEpiListLabel.setObjectName(u"aniEpiListLabel")
self.aniEpiListLabel.setGeometry(QRect(10, 80, 121, 16))
self.aniEpiListLabel.setFont(font4)
self.checkAllBox = QCheckBox(self.aniDownloadTab)
self.checkAllBox.setObjectName(u"checkAllBox")
self.checkAllBox.setGeometry(QRect(343, 80, 91, 19))
self.aniDownloadLog = QListWidget(self.aniDownloadTab)
self.aniDownloadLog.setObjectName(u"aniDownloadLog")
self.aniDownloadLog.setGeometry(QRect(570, 100, 721, 601))
self.downloadProgressBar = QProgressBar(self.aniDownloadTab)
self.downloadProgressBar.setObjectName(u"downloadProgressBar")
self.downloadProgressBar.setGeometry(QRect(570, 717, 731, 23))
self.downloadProgressBar.setValue(0)
self.aniDownloadLogLabel = QLabel(self.aniDownloadTab)
self.aniDownloadLogLabel.setObjectName(u"aniDownloadLogLabel")
self.aniDownloadLogLabel.setGeometry(QRect(570, 80, 141, 16))
self.aniDownloadLogLabel.setFont(font4)
self.aniDownloadBtn = QPushButton(self.aniDownloadTab)
self.aniDownloadBtn.setObjectName(u"aniDownloadBtn")
self.aniDownloadBtn.setGeometry(QRect(440, 390, 121, 31))
self.tabWidget.addTab(self.aniDownloadTab, "")
self.settingsTab = QWidget()
self.settingsTab.setObjectName(u"settingsTab")
self.settingsFrame = QFrame(self.settingsTab)
self.settingsFrame.setObjectName(u"settingsFrame")
self.settingsFrame.setGeometry(QRect(10, 60, 431, 681))
self.settingsFrame.setFrameShape(QFrame.Box)
self.settingsFrame.setFrameShadow(QFrame.Raised)
self.label = QLabel(self.settingsFrame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(20, 0, 221, 101))
self.loadNewInfo = QPushButton(self.settingsFrame)
self.loadNewInfo.setObjectName(u"loadNewInfo")
self.loadNewInfo.setGeometry(QRect(260, 40, 141, 28))
self.helpFrame = QFrame(self.settingsTab)
self.helpFrame.setObjectName(u"helpFrame")
self.helpFrame.setGeometry(QRect(480, 60, 371, 681))
self.helpFrame.setFrameShape(QFrame.Box)
self.helpFrame.setFrameShadow(QFrame.Raised)
self.help1Label = QLabel(self.helpFrame)
self.help1Label.setObjectName(u"help1Label")
self.help1Label.setGeometry(QRect(20, 30, 301, 91))
self.help1Label.setFrameShape(QFrame.StyledPanel)
self.help1Label.setTextFormat(Qt.AutoText)
self.help1Label.setWordWrap(False)
self.settingsLabel = QLabel(self.settingsTab)
self.settingsLabel.setObjectName(u"settingsLabel")
self.settingsLabel.setGeometry(QRect(14, 30, 41, 31))
font6 = QFont()
font6.setPointSize(12)
font6.setBold(True)
font6.setWeight(75)
self.settingsLabel.setFont(font6)
self.helpLabel = QLabel(self.settingsTab)
self.helpLabel.setObjectName(u"helpLabel")
self.helpLabel.setGeometry(QRect(480, 30, 71, 31))
self.helpLabel.setFont(font6)
self.developFrame = QFrame(self.settingsTab)
self.developFrame.setObjectName(u"developFrame")
self.developFrame.setGeometry(QRect(890, 58, 391, 681))
self.developFrame.setFrameShape(QFrame.Box)
self.developFrame.setFrameShadow(QFrame.Raised)
self.label_7 = QLabel(self.developFrame)
self.label_7.setObjectName(u"label_7")
self.label_7.setGeometry(QRect(30, 40, 161, 41))
self.label_7.setFont(font)
self.label_8 = QLabel(self.developFrame)
self.label_8.setObjectName(u"label_8")
self.label_8.setGeometry(QRect(30, 90, 281, 41))
self.label_8.setFont(font4)
self.label_9 = QLabel(self.developFrame)
self.label_9.setObjectName(u"label_9")
self.label_9.setGeometry(QRect(30, 140, 331, 41))
self.label_9.setFont(font4)
self.label_10 = QLabel(self.developFrame)
self.label_10.setObjectName(u"label_10")
self.label_10.setGeometry(QRect(30, 190, 331, 41))
self.label_10.setFont(font4)
self.developLabel = QLabel(self.settingsTab)
self.developLabel.setObjectName(u"developLabel")
self.developLabel.setGeometry(QRect(892, 30, 121, 31))
self.developLabel.setFont(font6)
self.tabWidget.addTab(self.settingsTab, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(u"statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"MainWindow", None))
self.urlInfo.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c824: https://ani24do.com \uc57c\uc560\ub2c824: https://yaani24.net", None))
self.aniSearchBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \uac80\uc0c9", None))
self.setSiteYaAni24rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc57c\uc560\ub2c824", None))
self.setSiteAni24rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c824", None))
self.searchKeyWordRBtn.setText(QCoreApplication.translate("MainWindow", u"\ud0a4\uc6cc\ub4dc", None))
self.completeRBtn.setText(QCoreApplication.translate("MainWindow", u"\uc644\uacb0", None))
self.quarterTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\ubd84\uae30 Top20", None))
self.newTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc2e0\uc791 Top20", None))
self.yearTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc62c\ud574 Top20", None))
self.genreTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc7a5\ub974 Top20", None))
self.tasteRBtn.setText(QCoreApplication.translate("MainWindow", u"\ucde8\ud5a5 \ub9de\ucda4", None))
self.quarterLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ubd84\uae30</span></p></body></html>", None))
self.clothesLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc637</span></p></body></html>", None))
self.playWayLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ud50c\ub808\uc774\ubc29\uc2dd</span></p></body></html>", None))
self.hairColorLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uba38\ub9ac\uc0c9</span></p></body></html>", None))
self.hairStyleLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uba38\ub9ac\uc2a4\ud0c0\uc77c</span></p></body></html>", None))
self.producerLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc81c\uc791\uc0ac</span></p></body></html>", None))
self.yearLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc5f0\ub3c4</span></p></body></html>", None))
self.genreLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc7a5\ub974</span></p></body></html>", None))
self.typeLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ud0c0\uc785</span></p></body></html>", None))
self.aniSiteCheckLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \uc0ac\uc774\ud2b8 \uc120\ud0dd", None))
self.aniSearchWayLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \ubc29\ubc95 \uc120\ud0dd", None))
self.aniSelectTagLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \ud0dc\uadf8 \uc120\ud0dd", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.aniSearchTab), QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \uac80\uc0c9", None))
self.aniEpiSearchBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ud69f\ucc28 \uac80\uc0c9", None))
self.aniEpiListLabel.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ud69f\ucc28 \ub9ac\uc2a4\ud2b8", None))
self.checkAllBox.setText(QCoreApplication.translate("MainWindow", u"\uc804\uccb4 \uc120\ud0dd", None))
self.aniDownloadLogLabel.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc \ub85c\uadf8", None))
self.aniDownloadBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.aniDownloadTab), QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Ani24 Info File\uc744 \uac00\uc838\uc635\ub2c8\ub2e4.\n"
"\ucc98\uc74c\uc774\ub098 \uc5c5\ub370\uc774\ud2b8 \ud558\uace0 \uc2f6\uc744\ub54c \n"
"\ubd88\ub7ec\uc624\uc2dc\uba74 \ub429\ub2c8\ub2e4.", None))
self.loadNewInfo.setText(QCoreApplication.translate("MainWindow", u"\uc0c8 \uc815\ubcf4 \ubd88\ub7ec\uc624\uae30", None))
self.help1Label.setText(QCoreApplication.translate("MainWindow", u"[\uac80\uc0c9 \ubc29\ubc95 \uc124\uba85]\n"
"\ud0a4\uc6cc\ub4dc\ub294 \uc606 \uac80\uc0c9\ubc15\uc2a4\uc5d0 \uac80\uc0c9\uc5b4\ub97c\n"
"\uc785\ub825\ud558\uba74 \ub429\ub2c8\ub2e4.\n"
"\uadf8\ub9ac\uace0 \uac80\uc0c9\ubc84\ud2bc\uc740 \uac80\uc0c9\ubc15\uc2a4 \uc606\n"
"[\uc560\ub2c8 \uac80\uc0c9] \ubc84\ud2bc\uc73c\ub85c \ud1b5\uc77c\ub418\uc5b4\uc788\uc2b5\ub2c8\ub2e4.", None))
self.settingsLabel.setText(QCoreApplication.translate("MainWindow", u"\uc124\uc815", None))
self.helpLabel.setText(QCoreApplication.translate("MainWindow", u"\ub3c4\uc6c0\ub9d0", None))
self.label_7.setText(QCoreApplication.translate("MainWindow", u"\uc81c\uc791\uc790: \uae40\uacbd\ubbfckdr", None))
self.label_8.setText(QCoreApplication.translate("MainWindow", u"Github: https://github.com/kdrkdrkdr", None))
self.label_9.setText(QCoreApplication.translate("MainWindow", u"Blog: https://blog.naver.com/powerapollon", None))
self.label_10.setText(QCoreApplication.translate("MainWindow", u"E-Mail: <EMAIL>", None))
self.developLabel.setText(QCoreApplication.translate("MainWindow", u"\uac1c\ubc1c\uc790 \uc815\ubcf4", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.settingsTab), QCoreApplication.translate("MainWindow", u"\uc124\uc815 \ubc0f \ub3c4\uc6c0\ub9d0", None))
# retranslateUi
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(form)
form.show()
sys.exit(app.exec_()) | Ani24-Downloader/ani24ui.py |
################################################################################
## Form generated from reading UI file 'main_ui.ui'
##
## Created by: Qt User Interface Compiler version 5.14.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1329, 851)
MainWindow.setMinimumSize(QSize(1329, 851))
MainWindow.setMaximumSize(QSize(1329, 851))
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.urlInfo = QLabel(self.centralwidget)
self.urlInfo.setObjectName(u"urlInfo")
self.urlInfo.setGeometry(QRect(430, 10, 481, 16))
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tabWidget.setGeometry(QRect(11, 40, 1310, 781))
self.tabWidget.setMaximumSize(QSize(4564665, 781))
self.aniSearchTab = QWidget()
self.aniSearchTab.setObjectName(u"aniSearchTab")
self.aniSearchBox = QLineEdit(self.aniSearchTab)
self.aniSearchBox.setObjectName(u"aniSearchBox")
self.aniSearchBox.setGeometry(QRect(340, 31, 821, 31))
font = QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.aniSearchBox.setFont(font)
self.aniSearchBtn = QPushButton(self.aniSearchTab)
self.aniSearchBtn.setObjectName(u"aniSearchBtn")
self.aniSearchBtn.setGeometry(QRect(1180, 31, 111, 31))
self.setSiteFrame = QFrame(self.aniSearchTab)
self.setSiteFrame.setObjectName(u"setSiteFrame")
self.setSiteFrame.setGeometry(QRect(10, 51, 301, 61))
font1 = QFont()
font1.setBold(False)
font1.setWeight(50)
self.setSiteFrame.setFont(font1)
self.setSiteFrame.setAutoFillBackground(False)
self.setSiteFrame.setStyleSheet(u"")
self.setSiteFrame.setFrameShape(QFrame.Box)
self.setSiteFrame.setFrameShadow(QFrame.Raised)
self.setSiteFrame.setLineWidth(1)
self.setSiteYaAni24rBtn = QRadioButton(self.setSiteFrame)
self.setSiteBtnGroup = QButtonGroup(MainWindow)
self.setSiteBtnGroup.setObjectName(u"setSiteBtnGroup")
self.setSiteBtnGroup.addButton(self.setSiteYaAni24rBtn)
self.setSiteYaAni24rBtn.setObjectName(u"setSiteYaAni24rBtn")
self.setSiteYaAni24rBtn.setGeometry(QRect(160, 20, 91, 19))
font2 = QFont()
font2.setFamily(u"Arial")
font2.setBold(True)
font2.setWeight(75)
self.setSiteYaAni24rBtn.setFont(font2)
self.setSiteAni24rBtn = QRadioButton(self.setSiteFrame)
self.setSiteBtnGroup.addButton(self.setSiteAni24rBtn)
self.setSiteAni24rBtn.setObjectName(u"setSiteAni24rBtn")
self.setSiteAni24rBtn.setGeometry(QRect(19, 20, 91, 19))
font3 = QFont()
font3.setFamily(u"Arial")
font3.setPointSize(10)
font3.setBold(True)
font3.setWeight(75)
self.setSiteAni24rBtn.setFont(font3)
self.setWayFrame = QFrame(self.aniSearchTab)
self.setWayFrame.setObjectName(u"setWayFrame")
self.setWayFrame.setGeometry(QRect(10, 156, 301, 171))
self.setWayFrame.setFrameShape(QFrame.Box)
self.searchKeyWordRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup = QButtonGroup(MainWindow)
self.setSearchWayBtnGroup.setObjectName(u"setSearchWayBtnGroup")
self.setSearchWayBtnGroup.addButton(self.searchKeyWordRBtn)
self.searchKeyWordRBtn.setObjectName(u"searchKeyWordRBtn")
self.searchKeyWordRBtn.setGeometry(QRect(160, 20, 81, 21))
self.searchKeyWordRBtn.setFont(font)
self.completeRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.completeRBtn)
self.completeRBtn.setObjectName(u"completeRBtn")
self.completeRBtn.setGeometry(QRect(19, 20, 61, 19))
self.completeRBtn.setFont(font3)
self.quarterTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.quarterTop20rBtn)
self.quarterTop20rBtn.setObjectName(u"quarterTop20rBtn")
self.quarterTop20rBtn.setGeometry(QRect(19, 100, 121, 19))
self.quarterTop20rBtn.setFont(font)
self.newTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.newTop20rBtn)
self.newTop20rBtn.setObjectName(u"newTop20rBtn")
self.newTop20rBtn.setGeometry(QRect(19, 60, 121, 19))
self.newTop20rBtn.setFont(font)
self.yearTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.yearTop20rBtn)
self.yearTop20rBtn.setObjectName(u"yearTop20rBtn")
self.yearTop20rBtn.setGeometry(QRect(160, 100, 121, 19))
self.yearTop20rBtn.setFont(font)
self.genreTop20rBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.genreTop20rBtn)
self.genreTop20rBtn.setObjectName(u"genreTop20rBtn")
self.genreTop20rBtn.setGeometry(QRect(160, 60, 121, 19))
self.genreTop20rBtn.setFont(font)
self.tasteRBtn = QRadioButton(self.setWayFrame)
self.setSearchWayBtnGroup.addButton(self.tasteRBtn)
self.tasteRBtn.setObjectName(u"tasteRBtn")
self.tasteRBtn.setGeometry(QRect(19, 140, 108, 19))
self.tasteRBtn.setFont(font3)
self.setTagFrame = QFrame(self.aniSearchTab)
self.setTagFrame.setObjectName(u"setTagFrame")
self.setTagFrame.setGeometry(QRect(10, 372, 301, 371))
self.setTagFrame.setFrameShape(QFrame.Box)
self.clothesComboBox = QComboBox(self.setTagFrame)
self.clothesComboBox.setObjectName(u"clothesComboBox")
self.clothesComboBox.setGeometry(QRect(118, 219, 161, 21))
self.quarterLabel = QLabel(self.setTagFrame)
self.quarterLabel.setObjectName(u"quarterLabel")
self.quarterLabel.setGeometry(QRect(17, 140, 41, 21))
self.clothesLabel = QLabel(self.setTagFrame)
self.clothesLabel.setObjectName(u"clothesLabel")
self.clothesLabel.setGeometry(QRect(17, 219, 91, 21))
self.playWayLabel = QLabel(self.setTagFrame)
self.playWayLabel.setObjectName(u"playWayLabel")
self.playWayLabel.setGeometry(QRect(17, 179, 91, 21))
self.playWayComboBox = QComboBox(self.setTagFrame)
self.playWayComboBox.setObjectName(u"playWayComboBox")
self.playWayComboBox.setGeometry(QRect(118, 179, 161, 21))
self.hairColorLabel = QLabel(self.setTagFrame)
self.hairColorLabel.setObjectName(u"hairColorLabel")
self.hairColorLabel.setGeometry(QRect(17, 299, 91, 21))
self.producerComboBox = QComboBox(self.setTagFrame)
self.producerComboBox.setObjectName(u"producerComboBox")
self.producerComboBox.setGeometry(QRect(118, 59, 161, 21))
self.hairStyleLabel = QLabel(self.setTagFrame)
self.hairStyleLabel.setObjectName(u"hairStyleLabel")
self.hairStyleLabel.setGeometry(QRect(17, 259, 91, 21))
self.hairStyleComboBox = QComboBox(self.setTagFrame)
self.hairStyleComboBox.setObjectName(u"hairStyleComboBox")
self.hairStyleComboBox.setGeometry(QRect(118, 259, 161, 21))
self.quarterComboBox = QComboBox(self.setTagFrame)
self.quarterComboBox.setObjectName(u"quarterComboBox")
self.quarterComboBox.setGeometry(QRect(118, 139, 161, 21))
self.hairColorComboBox = QComboBox(self.setTagFrame)
self.hairColorComboBox.setObjectName(u"hairColorComboBox")
self.hairColorComboBox.setGeometry(QRect(118, 299, 161, 21))
self.producerLabel = QLabel(self.setTagFrame)
self.producerLabel.setObjectName(u"producerLabel")
self.producerLabel.setGeometry(QRect(17, 60, 61, 20))
self.yearLabel = QLabel(self.setTagFrame)
self.yearLabel.setObjectName(u"yearLabel")
self.yearLabel.setGeometry(QRect(17, 100, 41, 21))
self.gerneComboBox = QComboBox(self.setTagFrame)
self.gerneComboBox.setObjectName(u"gerneComboBox")
self.gerneComboBox.setGeometry(QRect(118, 19, 161, 21))
self.yearComboBox = QComboBox(self.setTagFrame)
self.yearComboBox.setObjectName(u"yearComboBox")
self.yearComboBox.setGeometry(QRect(118, 99, 161, 21))
self.genreLabel = QLabel(self.setTagFrame)
self.genreLabel.setObjectName(u"genreLabel")
self.genreLabel.setGeometry(QRect(17, 20, 41, 21))
self.typeLabel = QLabel(self.setTagFrame)
self.typeLabel.setObjectName(u"typeLabel")
self.typeLabel.setGeometry(QRect(17, 339, 91, 21))
self.typeComboBox = QComboBox(self.setTagFrame)
self.typeComboBox.setObjectName(u"typeComboBox")
self.typeComboBox.setGeometry(QRect(118, 339, 161, 21))
self.aniSiteCheckLabel = QLabel(self.aniSearchTab)
self.aniSiteCheckLabel.setObjectName(u"aniSiteCheckLabel")
self.aniSiteCheckLabel.setGeometry(QRect(10, 31, 131, 16))
font4 = QFont()
font4.setBold(True)
font4.setWeight(75)
self.aniSiteCheckLabel.setFont(font4)
self.aniSearchWayLabel = QLabel(self.aniSearchTab)
self.aniSearchWayLabel.setObjectName(u"aniSearchWayLabel")
self.aniSearchWayLabel.setGeometry(QRect(10, 136, 111, 16))
self.aniSearchWayLabel.setFont(font4)
self.aniSelectTagLabel = QLabel(self.aniSearchTab)
self.aniSelectTagLabel.setObjectName(u"aniSelectTagLabel")
self.aniSelectTagLabel.setGeometry(QRect(10, 354, 111, 16))
font5 = QFont()
font5.setBold(True)
font5.setItalic(False)
font5.setUnderline(False)
font5.setWeight(75)
font5.setStrikeOut(False)
font5.setKerning(True)
self.aniSelectTagLabel.setFont(font5)
self.searchTable = QTableWidget(self.aniSearchTab)
self.searchTable.setObjectName(u"searchTable")
self.searchTable.setGeometry(QRect(340, 83, 951, 661))
self.tabWidget.addTab(self.aniSearchTab, "")
self.aniDownloadTab = QWidget()
self.aniDownloadTab.setObjectName(u"aniDownloadTab")
self.aniEpiLinkBox = QLineEdit(self.aniDownloadTab)
self.aniEpiLinkBox.setObjectName(u"aniEpiLinkBox")
self.aniEpiLinkBox.setGeometry(QRect(10, 20, 1151, 31))
self.aniEpiLinkBox.setFont(font)
self.aniEpiSearchBtn = QPushButton(self.aniDownloadTab)
self.aniEpiSearchBtn.setObjectName(u"aniEpiSearchBtn")
self.aniEpiSearchBtn.setGeometry(QRect(1170, 20, 121, 31))
self.aniEpiList = QListWidget(self.aniDownloadTab)
self.aniEpiList.setObjectName(u"aniEpiList")
self.aniEpiList.setGeometry(QRect(10, 100, 421, 641))
self.aniEpiListLabel = QLabel(self.aniDownloadTab)
self.aniEpiListLabel.setObjectName(u"aniEpiListLabel")
self.aniEpiListLabel.setGeometry(QRect(10, 80, 121, 16))
self.aniEpiListLabel.setFont(font4)
self.checkAllBox = QCheckBox(self.aniDownloadTab)
self.checkAllBox.setObjectName(u"checkAllBox")
self.checkAllBox.setGeometry(QRect(343, 80, 91, 19))
self.aniDownloadLog = QListWidget(self.aniDownloadTab)
self.aniDownloadLog.setObjectName(u"aniDownloadLog")
self.aniDownloadLog.setGeometry(QRect(570, 100, 721, 601))
self.downloadProgressBar = QProgressBar(self.aniDownloadTab)
self.downloadProgressBar.setObjectName(u"downloadProgressBar")
self.downloadProgressBar.setGeometry(QRect(570, 717, 731, 23))
self.downloadProgressBar.setValue(0)
self.aniDownloadLogLabel = QLabel(self.aniDownloadTab)
self.aniDownloadLogLabel.setObjectName(u"aniDownloadLogLabel")
self.aniDownloadLogLabel.setGeometry(QRect(570, 80, 141, 16))
self.aniDownloadLogLabel.setFont(font4)
self.aniDownloadBtn = QPushButton(self.aniDownloadTab)
self.aniDownloadBtn.setObjectName(u"aniDownloadBtn")
self.aniDownloadBtn.setGeometry(QRect(440, 390, 121, 31))
self.tabWidget.addTab(self.aniDownloadTab, "")
self.settingsTab = QWidget()
self.settingsTab.setObjectName(u"settingsTab")
self.settingsFrame = QFrame(self.settingsTab)
self.settingsFrame.setObjectName(u"settingsFrame")
self.settingsFrame.setGeometry(QRect(10, 60, 431, 681))
self.settingsFrame.setFrameShape(QFrame.Box)
self.settingsFrame.setFrameShadow(QFrame.Raised)
self.label = QLabel(self.settingsFrame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(20, 0, 221, 101))
self.loadNewInfo = QPushButton(self.settingsFrame)
self.loadNewInfo.setObjectName(u"loadNewInfo")
self.loadNewInfo.setGeometry(QRect(260, 40, 141, 28))
self.helpFrame = QFrame(self.settingsTab)
self.helpFrame.setObjectName(u"helpFrame")
self.helpFrame.setGeometry(QRect(480, 60, 371, 681))
self.helpFrame.setFrameShape(QFrame.Box)
self.helpFrame.setFrameShadow(QFrame.Raised)
self.help1Label = QLabel(self.helpFrame)
self.help1Label.setObjectName(u"help1Label")
self.help1Label.setGeometry(QRect(20, 30, 301, 91))
self.help1Label.setFrameShape(QFrame.StyledPanel)
self.help1Label.setTextFormat(Qt.AutoText)
self.help1Label.setWordWrap(False)
self.settingsLabel = QLabel(self.settingsTab)
self.settingsLabel.setObjectName(u"settingsLabel")
self.settingsLabel.setGeometry(QRect(14, 30, 41, 31))
font6 = QFont()
font6.setPointSize(12)
font6.setBold(True)
font6.setWeight(75)
self.settingsLabel.setFont(font6)
self.helpLabel = QLabel(self.settingsTab)
self.helpLabel.setObjectName(u"helpLabel")
self.helpLabel.setGeometry(QRect(480, 30, 71, 31))
self.helpLabel.setFont(font6)
self.developFrame = QFrame(self.settingsTab)
self.developFrame.setObjectName(u"developFrame")
self.developFrame.setGeometry(QRect(890, 58, 391, 681))
self.developFrame.setFrameShape(QFrame.Box)
self.developFrame.setFrameShadow(QFrame.Raised)
self.label_7 = QLabel(self.developFrame)
self.label_7.setObjectName(u"label_7")
self.label_7.setGeometry(QRect(30, 40, 161, 41))
self.label_7.setFont(font)
self.label_8 = QLabel(self.developFrame)
self.label_8.setObjectName(u"label_8")
self.label_8.setGeometry(QRect(30, 90, 281, 41))
self.label_8.setFont(font4)
self.label_9 = QLabel(self.developFrame)
self.label_9.setObjectName(u"label_9")
self.label_9.setGeometry(QRect(30, 140, 331, 41))
self.label_9.setFont(font4)
self.label_10 = QLabel(self.developFrame)
self.label_10.setObjectName(u"label_10")
self.label_10.setGeometry(QRect(30, 190, 331, 41))
self.label_10.setFont(font4)
self.developLabel = QLabel(self.settingsTab)
self.developLabel.setObjectName(u"developLabel")
self.developLabel.setGeometry(QRect(892, 30, 121, 31))
self.developLabel.setFont(font6)
self.tabWidget.addTab(self.settingsTab, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(u"statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"MainWindow", None))
self.urlInfo.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c824: https://ani24do.com \uc57c\uc560\ub2c824: https://yaani24.net", None))
self.aniSearchBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \uac80\uc0c9", None))
self.setSiteYaAni24rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc57c\uc560\ub2c824", None))
self.setSiteAni24rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c824", None))
self.searchKeyWordRBtn.setText(QCoreApplication.translate("MainWindow", u"\ud0a4\uc6cc\ub4dc", None))
self.completeRBtn.setText(QCoreApplication.translate("MainWindow", u"\uc644\uacb0", None))
self.quarterTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\ubd84\uae30 Top20", None))
self.newTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc2e0\uc791 Top20", None))
self.yearTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc62c\ud574 Top20", None))
self.genreTop20rBtn.setText(QCoreApplication.translate("MainWindow", u"\uc7a5\ub974 Top20", None))
self.tasteRBtn.setText(QCoreApplication.translate("MainWindow", u"\ucde8\ud5a5 \ub9de\ucda4", None))
self.quarterLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ubd84\uae30</span></p></body></html>", None))
self.clothesLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc637</span></p></body></html>", None))
self.playWayLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ud50c\ub808\uc774\ubc29\uc2dd</span></p></body></html>", None))
self.hairColorLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uba38\ub9ac\uc0c9</span></p></body></html>", None))
self.hairStyleLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uba38\ub9ac\uc2a4\ud0c0\uc77c</span></p></body></html>", None))
self.producerLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc81c\uc791\uc0ac</span></p></body></html>", None))
self.yearLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc5f0\ub3c4</span></p></body></html>", None))
self.genreLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\uc7a5\ub974</span></p></body></html>", None))
self.typeLabel.setText(QCoreApplication.translate("MainWindow", u"<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">\ud0c0\uc785</span></p></body></html>", None))
self.aniSiteCheckLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \uc0ac\uc774\ud2b8 \uc120\ud0dd", None))
self.aniSearchWayLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \ubc29\ubc95 \uc120\ud0dd", None))
self.aniSelectTagLabel.setText(QCoreApplication.translate("MainWindow", u"\uac80\uc0c9 \ud0dc\uadf8 \uc120\ud0dd", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.aniSearchTab), QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \uac80\uc0c9", None))
self.aniEpiSearchBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ud69f\ucc28 \uac80\uc0c9", None))
self.aniEpiListLabel.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ud69f\ucc28 \ub9ac\uc2a4\ud2b8", None))
self.checkAllBox.setText(QCoreApplication.translate("MainWindow", u"\uc804\uccb4 \uc120\ud0dd", None))
self.aniDownloadLogLabel.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc \ub85c\uadf8", None))
self.aniDownloadBtn.setText(QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.aniDownloadTab), QCoreApplication.translate("MainWindow", u"\uc560\ub2c8 \ub2e4\uc6b4\ub85c\ub4dc", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Ani24 Info File\uc744 \uac00\uc838\uc635\ub2c8\ub2e4.\n"
"\ucc98\uc74c\uc774\ub098 \uc5c5\ub370\uc774\ud2b8 \ud558\uace0 \uc2f6\uc744\ub54c \n"
"\ubd88\ub7ec\uc624\uc2dc\uba74 \ub429\ub2c8\ub2e4.", None))
self.loadNewInfo.setText(QCoreApplication.translate("MainWindow", u"\uc0c8 \uc815\ubcf4 \ubd88\ub7ec\uc624\uae30", None))
self.help1Label.setText(QCoreApplication.translate("MainWindow", u"[\uac80\uc0c9 \ubc29\ubc95 \uc124\uba85]\n"
"\ud0a4\uc6cc\ub4dc\ub294 \uc606 \uac80\uc0c9\ubc15\uc2a4\uc5d0 \uac80\uc0c9\uc5b4\ub97c\n"
"\uc785\ub825\ud558\uba74 \ub429\ub2c8\ub2e4.\n"
"\uadf8\ub9ac\uace0 \uac80\uc0c9\ubc84\ud2bc\uc740 \uac80\uc0c9\ubc15\uc2a4 \uc606\n"
"[\uc560\ub2c8 \uac80\uc0c9] \ubc84\ud2bc\uc73c\ub85c \ud1b5\uc77c\ub418\uc5b4\uc788\uc2b5\ub2c8\ub2e4.", None))
self.settingsLabel.setText(QCoreApplication.translate("MainWindow", u"\uc124\uc815", None))
self.helpLabel.setText(QCoreApplication.translate("MainWindow", u"\ub3c4\uc6c0\ub9d0", None))
self.label_7.setText(QCoreApplication.translate("MainWindow", u"\uc81c\uc791\uc790: \uae40\uacbd\ubbfckdr", None))
self.label_8.setText(QCoreApplication.translate("MainWindow", u"Github: https://github.com/kdrkdrkdr", None))
self.label_9.setText(QCoreApplication.translate("MainWindow", u"Blog: https://blog.naver.com/powerapollon", None))
self.label_10.setText(QCoreApplication.translate("MainWindow", u"E-Mail: <EMAIL>", None))
self.developLabel.setText(QCoreApplication.translate("MainWindow", u"\uac1c\ubc1c\uc790 \uc815\ubcf4", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.settingsTab), QCoreApplication.translate("MainWindow", u"\uc124\uc815 \ubc0f \ub3c4\uc6c0\ub9d0", None))
# retranslateUi
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(form)
form.show()
sys.exit(app.exec_()) | 0.348645 | 0.075517 |
from __future__ import print_function
import os
import subprocess
import sys
if sys.platform.startswith('java'):
from java.awt import Toolkit, Robot, Rectangle
from javax.imageio import ImageIO
from java.io import File
elif sys.platform == 'cli':
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Bitmap, Graphics, Imaging
from System.Windows.Forms import Screen
else:
try:
import wx
except ImportError:
wx = None
try:
from gtk import gdk
except ImportError:
gdk = None
try:
from PIL import ImageGrab # apparently available only on Windows
except ImportError:
ImageGrab = None
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.version import get_version
from robot.utils import abspath, get_error_message, get_link_path, py2to3
class Screenshot(object):
"""Test library for taking screenshots on the machine where tests are run.
Notice that successfully taking screenshots requires tests to be run with
a physical or virtual display.
= Using with Python =
How screenshots are taken when using Python depends on the operating
system. On OSX screenshots are taken using the built-in ``screencapture``
utility. On other operating systems you need to have one of the following
tools or Python modules installed. You can specify the tool/module to use
when `importing` the library. If no tool or module is specified, the first
one found will be used.
- wxPython :: http://wxpython.org :: Required also by RIDE so many Robot
Framework users already have this module installed.
- PyGTK :: http://pygtk.org :: This module is available by default on most
Linux distributions.
- Pillow :: http://python-pillow.github.io ::
Only works on Windows. Also the original PIL package is supported.
- Scrot :: http://en.wikipedia.org/wiki/Scrot :: Not used on Windows.
Install with ``apt-get install scrot`` or similar.
Using ``screencapture`` on OSX and specifying explicit screenshot module
are new in Robot Framework 2.9.2. The support for using ``scrot`` is new
in Robot Framework 3.0.
= Using with Jython and IronPython =
With Jython and IronPython this library uses APIs provided by JVM and .NET
platforms, respectively. These APIs are always available and thus no
external modules are needed.
= Where screenshots are saved =
By default screenshots are saved into the same directory where the Robot
Framework log file is written. If no log is created, screenshots are saved
into the directory where the XML output file is written.
It is possible to specify a custom location for screenshots using
``screenshot_directory`` argument when `importing` the library and
using `Set Screenshot Directory` keyword during execution. It is also
possible to save screenshots using an absolute path.
= ScreenCapLibrary =
[https://github.com/mihaiparvu/ScreenCapLibrary|ScreenCapLibrary] is an
external Robot Framework library that can be used as an alternative,
which additionally provides support for multiple formats, adjusting the
quality, using GIFs and video capturing.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, screenshot_directory=None, screenshot_module=None):
"""Configure where screenshots are saved.
If ``screenshot_directory`` is not given, screenshots are saved into
same directory as the log file. The directory can also be set using
`Set Screenshot Directory` keyword.
``screenshot_module`` specifies the module or tool to use when using
this library on Python outside OSX. Possible values are ``wxPython``,
``PyGTK``, ``PIL`` and ``scrot``, case-insensitively. If no value is
given, the first module/tool found is used in that order. See `Using
with Python` for more information.
Examples (use only one of these):
| =Setting= | =Value= | =Value= |
| Library | Screenshot | |
| Library | Screenshot | ${TEMPDIR} |
| Library | Screenshot | screenshot_module=PyGTK |
Specifying explicit screenshot module is new in Robot Framework 2.9.2.
"""
self._given_screenshot_dir = self._norm_path(screenshot_directory)
self._screenshot_taker = ScreenshotTaker(screenshot_module)
def _norm_path(self, path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
@property
def _screenshot_dir(self):
return self._given_screenshot_dir or self._log_dir
@property
def _log_dir(self):
variables = BuiltIn().get_variables()
outdir = variables['${OUTPUTDIR}']
log = variables['${LOGFILE}']
log = os.path.dirname(log) if log != 'NONE' else '.'
return self._norm_path(os.path.join(outdir, log))
def set_screenshot_directory(self, path):
"""Sets the directory where screenshots are saved.
It is possible to use ``/`` as a path separator in all operating
systems. Path to the old directory is returned.
The directory can also be set in `importing`.
"""
path = self._norm_path(path)
if not os.path.isdir(path):
raise RuntimeError("Directory '%s' does not exist." % path)
old = self._screenshot_dir
self._given_screenshot_dir = path
return old
def take_screenshot(self, name="screenshot", width="800px"):
"""Takes a screenshot in JPEG format and embeds it into the log file.
Name of the file where the screenshot is stored is derived from the
given ``name``. If the ``name`` ends with extension ``.jpg`` or
``.jpeg``, the screenshot will be stored with that exact name.
Otherwise a unique name is created by adding an underscore, a running
index and an extension to the ``name``.
The name will be interpreted to be relative to the directory where
the log file is written. It is also possible to use absolute paths.
Using ``/`` as a path separator works in all operating systems.
``width`` specifies the size of the screenshot in the log file.
Examples: (LOGDIR is determined automatically by the library)
| Take Screenshot | | | # LOGDIR/screenshot_1.jpg (index automatically incremented) |
| Take Screenshot | mypic | | # LOGDIR/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | ${TEMPDIR}/mypic | | # /tmp/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | pic.jpg | | # LOGDIR/pic.jpg (always uses this file) |
| Take Screenshot | images/login.jpg | 80% | # Specify both name and width. |
| Take Screenshot | width=550px | | # Specify only width. |
The path where the screenshot is saved is returned.
"""
path = self._save_screenshot(name)
self._embed_screenshot(path, width)
return path
def take_screenshot_without_embedding(self, name="screenshot"):
"""Takes a screenshot and links it from the log file.
This keyword is otherwise identical to `Take Screenshot` but the saved
screenshot is not embedded into the log file. The screenshot is linked
so it is nevertheless easily available.
"""
path = self._save_screenshot(name)
self._link_screenshot(path)
return path
def _save_screenshot(self, basename, directory=None):
path = self._get_screenshot_path(basename, directory)
return self._screenshot_to_file(path)
def _screenshot_to_file(self, path):
path = self._validate_screenshot_path(path)
logger.debug('Using %s module/tool for taking screenshot.'
% self._screenshot_taker.module)
try:
self._screenshot_taker(path)
except:
logger.warn('Taking screenshot failed: %s\n'
'Make sure tests are run with a physical or virtual '
'display.' % get_error_message())
return path
def _validate_screenshot_path(self, path):
path = abspath(self._norm_path(path))
if not os.path.exists(os.path.dirname(path)):
raise RuntimeError("Directory '%s' where to save the screenshot "
"does not exist" % os.path.dirname(path))
return path
def _get_screenshot_path(self, basename, directory):
directory = self._norm_path(directory) if directory else self._screenshot_dir
if basename.lower().endswith(('.jpg', '.jpeg')):
return os.path.join(directory, basename)
index = 0
while True:
index += 1
path = os.path.join(directory, "%s_%d.jpg" % (basename, index))
if not os.path.exists(path):
return path
def _embed_screenshot(self, path, width):
link = get_link_path(path, self._log_dir)
logger.info('<a href="%s"><img src="%s" width="%s"></a>'
% (link, link, width), html=True)
def _link_screenshot(self, path):
link = get_link_path(path, self._log_dir)
logger.info("Screenshot saved to '<a href=\"%s\">%s</a>'."
% (link, path), html=True)
@py2to3
class ScreenshotTaker(object):
def __init__(self, module_name=None):
self._screenshot = self._get_screenshot_taker(module_name)
self.module = self._screenshot.__name__.split('_')[1]
self._wx_app_reference = None
def __call__(self, path):
self._screenshot(path)
def __nonzero__(self):
return self.module != 'no'
def test(self, path=None):
if not self:
print("Cannot take screenshots.")
return False
print("Using '%s' to take screenshot." % self.module)
if not path:
print("Not taking test screenshot.")
return True
print("Taking test screenshot to '%s'." % path)
try:
self(path)
except:
print("Failed: %s" % get_error_message())
return False
else:
print("Success!")
return True
def _get_screenshot_taker(self, module_name=None):
if sys.platform.startswith('java'):
return self._java_screenshot
if sys.platform == 'cli':
return self._cli_screenshot
if sys.platform == 'darwin':
return self._osx_screenshot
if module_name:
return self._get_named_screenshot_taker(module_name.lower())
return self._get_default_screenshot_taker()
def _get_named_screenshot_taker(self, name):
screenshot_takers = {'wxpython': (wx, self._wx_screenshot),
'pygtk': (gdk, self._gtk_screenshot),
'pil': (ImageGrab, self._pil_screenshot),
'scrot': (self._scrot, self._scrot_screenshot)}
if name not in screenshot_takers:
raise RuntimeError("Invalid screenshot module or tool '%s'." % name)
supported, screenshot_taker = screenshot_takers[name]
if not supported:
raise RuntimeError("Screenshot module or tool '%s' not installed."
% name)
return screenshot_taker
def _get_default_screenshot_taker(self):
for module, screenshot_taker in [(wx, self._wx_screenshot),
(gdk, self._gtk_screenshot),
(ImageGrab, self._pil_screenshot),
(self._scrot, self._scrot_screenshot),
(True, self._no_screenshot)]:
if module:
return screenshot_taker
def _java_screenshot(self, path):
size = Toolkit.getDefaultToolkit().getScreenSize()
rectangle = Rectangle(0, 0, size.width, size.height)
image = Robot().createScreenCapture(rectangle)
ImageIO.write(image, 'jpg', File(path))
def _cli_screenshot(self, path):
bmp = Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height)
graphics = Graphics.FromImage(bmp)
try:
graphics.CopyFromScreen(0, 0, 0, 0, bmp.Size)
finally:
graphics.Dispose()
bmp.Save(path, Imaging.ImageFormat.Jpeg)
def _osx_screenshot(self, path):
if self._call('screencapture', '-t', 'jpg', path) != 0:
raise RuntimeError("Using 'screencapture' failed.")
def _call(self, *command):
try:
return subprocess.call(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError:
return -1
@property
def _scrot(self):
return os.sep == '/' and self._call('scrot', '--version') == 0
def _scrot_screenshot(self, path):
if not path.endswith(('.jpg', '.jpeg')):
raise RuntimeError("Scrot requires extension to be '.jpg' or "
"'.jpeg', got '%s'." % os.path.splitext(path)[1])
if self._call('scrot', '--silent', path) != 0:
raise RuntimeError("Using 'scrot' failed.")
def _wx_screenshot(self, path):
if not self._wx_app_reference:
self._wx_app_reference = wx.App(False)
context = wx.ScreenDC()
width, height = context.GetSize()
if wx.__version__ >= '4':
bitmap = wx.Bitmap(width, height, -1)
else:
bitmap = wx.EmptyBitmap(width, height, -1)
memory = wx.MemoryDC()
memory.SelectObject(bitmap)
memory.Blit(0, 0, width, height, context, -1, -1)
memory.SelectObject(wx.NullBitmap)
bitmap.SaveFile(path, wx.BITMAP_TYPE_JPEG)
def _gtk_screenshot(self, path):
window = gdk.get_default_root_window()
if not window:
raise RuntimeError('Taking screenshot failed.')
width, height = window.get_size()
pb = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, width, height)
pb = pb.get_from_drawable(window, window.get_colormap(),
0, 0, 0, 0, width, height)
if not pb:
raise RuntimeError('Taking screenshot failed.')
pb.save(path, 'jpeg')
def _pil_screenshot(self, path):
ImageGrab.grab().save(path, 'JPEG')
def _no_screenshot(self, path):
raise RuntimeError('Taking screenshots is not supported on this platform '
'by default. See library documentation for details.')
if __name__ == "__main__":
if len(sys.argv) not in [2, 3]:
sys.exit("Usage: %s <path>|test [wx|pygtk|pil|scrot]"
% os.path.basename(sys.argv[0]))
path = sys.argv[1] if sys.argv[1] != 'test' else None
module = sys.argv[2] if len(sys.argv) > 2 else None
ScreenshotTaker(module).test(path) | src/robot/libraries/Screenshot.py |
from __future__ import print_function
import os
import subprocess
import sys
if sys.platform.startswith('java'):
from java.awt import Toolkit, Robot, Rectangle
from javax.imageio import ImageIO
from java.io import File
elif sys.platform == 'cli':
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Bitmap, Graphics, Imaging
from System.Windows.Forms import Screen
else:
try:
import wx
except ImportError:
wx = None
try:
from gtk import gdk
except ImportError:
gdk = None
try:
from PIL import ImageGrab # apparently available only on Windows
except ImportError:
ImageGrab = None
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.version import get_version
from robot.utils import abspath, get_error_message, get_link_path, py2to3
class Screenshot(object):
"""Test library for taking screenshots on the machine where tests are run.
Notice that successfully taking screenshots requires tests to be run with
a physical or virtual display.
= Using with Python =
How screenshots are taken when using Python depends on the operating
system. On OSX screenshots are taken using the built-in ``screencapture``
utility. On other operating systems you need to have one of the following
tools or Python modules installed. You can specify the tool/module to use
when `importing` the library. If no tool or module is specified, the first
one found will be used.
- wxPython :: http://wxpython.org :: Required also by RIDE so many Robot
Framework users already have this module installed.
- PyGTK :: http://pygtk.org :: This module is available by default on most
Linux distributions.
- Pillow :: http://python-pillow.github.io ::
Only works on Windows. Also the original PIL package is supported.
- Scrot :: http://en.wikipedia.org/wiki/Scrot :: Not used on Windows.
Install with ``apt-get install scrot`` or similar.
Using ``screencapture`` on OSX and specifying explicit screenshot module
are new in Robot Framework 2.9.2. The support for using ``scrot`` is new
in Robot Framework 3.0.
= Using with Jython and IronPython =
With Jython and IronPython this library uses APIs provided by JVM and .NET
platforms, respectively. These APIs are always available and thus no
external modules are needed.
= Where screenshots are saved =
By default screenshots are saved into the same directory where the Robot
Framework log file is written. If no log is created, screenshots are saved
into the directory where the XML output file is written.
It is possible to specify a custom location for screenshots using
``screenshot_directory`` argument when `importing` the library and
using `Set Screenshot Directory` keyword during execution. It is also
possible to save screenshots using an absolute path.
= ScreenCapLibrary =
[https://github.com/mihaiparvu/ScreenCapLibrary|ScreenCapLibrary] is an
external Robot Framework library that can be used as an alternative,
which additionally provides support for multiple formats, adjusting the
quality, using GIFs and video capturing.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, screenshot_directory=None, screenshot_module=None):
"""Configure where screenshots are saved.
If ``screenshot_directory`` is not given, screenshots are saved into
same directory as the log file. The directory can also be set using
`Set Screenshot Directory` keyword.
``screenshot_module`` specifies the module or tool to use when using
this library on Python outside OSX. Possible values are ``wxPython``,
``PyGTK``, ``PIL`` and ``scrot``, case-insensitively. If no value is
given, the first module/tool found is used in that order. See `Using
with Python` for more information.
Examples (use only one of these):
| =Setting= | =Value= | =Value= |
| Library | Screenshot | |
| Library | Screenshot | ${TEMPDIR} |
| Library | Screenshot | screenshot_module=PyGTK |
Specifying explicit screenshot module is new in Robot Framework 2.9.2.
"""
self._given_screenshot_dir = self._norm_path(screenshot_directory)
self._screenshot_taker = ScreenshotTaker(screenshot_module)
def _norm_path(self, path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
@property
def _screenshot_dir(self):
return self._given_screenshot_dir or self._log_dir
@property
def _log_dir(self):
variables = BuiltIn().get_variables()
outdir = variables['${OUTPUTDIR}']
log = variables['${LOGFILE}']
log = os.path.dirname(log) if log != 'NONE' else '.'
return self._norm_path(os.path.join(outdir, log))
def set_screenshot_directory(self, path):
"""Sets the directory where screenshots are saved.
It is possible to use ``/`` as a path separator in all operating
systems. Path to the old directory is returned.
The directory can also be set in `importing`.
"""
path = self._norm_path(path)
if not os.path.isdir(path):
raise RuntimeError("Directory '%s' does not exist." % path)
old = self._screenshot_dir
self._given_screenshot_dir = path
return old
def take_screenshot(self, name="screenshot", width="800px"):
"""Takes a screenshot in JPEG format and embeds it into the log file.
Name of the file where the screenshot is stored is derived from the
given ``name``. If the ``name`` ends with extension ``.jpg`` or
``.jpeg``, the screenshot will be stored with that exact name.
Otherwise a unique name is created by adding an underscore, a running
index and an extension to the ``name``.
The name will be interpreted to be relative to the directory where
the log file is written. It is also possible to use absolute paths.
Using ``/`` as a path separator works in all operating systems.
``width`` specifies the size of the screenshot in the log file.
Examples: (LOGDIR is determined automatically by the library)
| Take Screenshot | | | # LOGDIR/screenshot_1.jpg (index automatically incremented) |
| Take Screenshot | mypic | | # LOGDIR/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | ${TEMPDIR}/mypic | | # /tmp/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | pic.jpg | | # LOGDIR/pic.jpg (always uses this file) |
| Take Screenshot | images/login.jpg | 80% | # Specify both name and width. |
| Take Screenshot | width=550px | | # Specify only width. |
The path where the screenshot is saved is returned.
"""
path = self._save_screenshot(name)
self._embed_screenshot(path, width)
return path
def take_screenshot_without_embedding(self, name="screenshot"):
"""Takes a screenshot and links it from the log file.
This keyword is otherwise identical to `Take Screenshot` but the saved
screenshot is not embedded into the log file. The screenshot is linked
so it is nevertheless easily available.
"""
path = self._save_screenshot(name)
self._link_screenshot(path)
return path
def _save_screenshot(self, basename, directory=None):
path = self._get_screenshot_path(basename, directory)
return self._screenshot_to_file(path)
def _screenshot_to_file(self, path):
path = self._validate_screenshot_path(path)
logger.debug('Using %s module/tool for taking screenshot.'
% self._screenshot_taker.module)
try:
self._screenshot_taker(path)
except:
logger.warn('Taking screenshot failed: %s\n'
'Make sure tests are run with a physical or virtual '
'display.' % get_error_message())
return path
def _validate_screenshot_path(self, path):
path = abspath(self._norm_path(path))
if not os.path.exists(os.path.dirname(path)):
raise RuntimeError("Directory '%s' where to save the screenshot "
"does not exist" % os.path.dirname(path))
return path
def _get_screenshot_path(self, basename, directory):
directory = self._norm_path(directory) if directory else self._screenshot_dir
if basename.lower().endswith(('.jpg', '.jpeg')):
return os.path.join(directory, basename)
index = 0
while True:
index += 1
path = os.path.join(directory, "%s_%d.jpg" % (basename, index))
if not os.path.exists(path):
return path
def _embed_screenshot(self, path, width):
link = get_link_path(path, self._log_dir)
logger.info('<a href="%s"><img src="%s" width="%s"></a>'
% (link, link, width), html=True)
def _link_screenshot(self, path):
link = get_link_path(path, self._log_dir)
logger.info("Screenshot saved to '<a href=\"%s\">%s</a>'."
% (link, path), html=True)
@py2to3
class ScreenshotTaker(object):
def __init__(self, module_name=None):
self._screenshot = self._get_screenshot_taker(module_name)
self.module = self._screenshot.__name__.split('_')[1]
self._wx_app_reference = None
def __call__(self, path):
self._screenshot(path)
def __nonzero__(self):
return self.module != 'no'
def test(self, path=None):
if not self:
print("Cannot take screenshots.")
return False
print("Using '%s' to take screenshot." % self.module)
if not path:
print("Not taking test screenshot.")
return True
print("Taking test screenshot to '%s'." % path)
try:
self(path)
except:
print("Failed: %s" % get_error_message())
return False
else:
print("Success!")
return True
def _get_screenshot_taker(self, module_name=None):
if sys.platform.startswith('java'):
return self._java_screenshot
if sys.platform == 'cli':
return self._cli_screenshot
if sys.platform == 'darwin':
return self._osx_screenshot
if module_name:
return self._get_named_screenshot_taker(module_name.lower())
return self._get_default_screenshot_taker()
def _get_named_screenshot_taker(self, name):
screenshot_takers = {'wxpython': (wx, self._wx_screenshot),
'pygtk': (gdk, self._gtk_screenshot),
'pil': (ImageGrab, self._pil_screenshot),
'scrot': (self._scrot, self._scrot_screenshot)}
if name not in screenshot_takers:
raise RuntimeError("Invalid screenshot module or tool '%s'." % name)
supported, screenshot_taker = screenshot_takers[name]
if not supported:
raise RuntimeError("Screenshot module or tool '%s' not installed."
% name)
return screenshot_taker
def _get_default_screenshot_taker(self):
for module, screenshot_taker in [(wx, self._wx_screenshot),
(gdk, self._gtk_screenshot),
(ImageGrab, self._pil_screenshot),
(self._scrot, self._scrot_screenshot),
(True, self._no_screenshot)]:
if module:
return screenshot_taker
def _java_screenshot(self, path):
size = Toolkit.getDefaultToolkit().getScreenSize()
rectangle = Rectangle(0, 0, size.width, size.height)
image = Robot().createScreenCapture(rectangle)
ImageIO.write(image, 'jpg', File(path))
def _cli_screenshot(self, path):
bmp = Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height)
graphics = Graphics.FromImage(bmp)
try:
graphics.CopyFromScreen(0, 0, 0, 0, bmp.Size)
finally:
graphics.Dispose()
bmp.Save(path, Imaging.ImageFormat.Jpeg)
def _osx_screenshot(self, path):
if self._call('screencapture', '-t', 'jpg', path) != 0:
raise RuntimeError("Using 'screencapture' failed.")
def _call(self, *command):
try:
return subprocess.call(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError:
return -1
@property
def _scrot(self):
return os.sep == '/' and self._call('scrot', '--version') == 0
def _scrot_screenshot(self, path):
if not path.endswith(('.jpg', '.jpeg')):
raise RuntimeError("Scrot requires extension to be '.jpg' or "
"'.jpeg', got '%s'." % os.path.splitext(path)[1])
if self._call('scrot', '--silent', path) != 0:
raise RuntimeError("Using 'scrot' failed.")
def _wx_screenshot(self, path):
if not self._wx_app_reference:
self._wx_app_reference = wx.App(False)
context = wx.ScreenDC()
width, height = context.GetSize()
if wx.__version__ >= '4':
bitmap = wx.Bitmap(width, height, -1)
else:
bitmap = wx.EmptyBitmap(width, height, -1)
memory = wx.MemoryDC()
memory.SelectObject(bitmap)
memory.Blit(0, 0, width, height, context, -1, -1)
memory.SelectObject(wx.NullBitmap)
bitmap.SaveFile(path, wx.BITMAP_TYPE_JPEG)
def _gtk_screenshot(self, path):
window = gdk.get_default_root_window()
if not window:
raise RuntimeError('Taking screenshot failed.')
width, height = window.get_size()
pb = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, width, height)
pb = pb.get_from_drawable(window, window.get_colormap(),
0, 0, 0, 0, width, height)
if not pb:
raise RuntimeError('Taking screenshot failed.')
pb.save(path, 'jpeg')
def _pil_screenshot(self, path):
ImageGrab.grab().save(path, 'JPEG')
def _no_screenshot(self, path):
raise RuntimeError('Taking screenshots is not supported on this platform '
'by default. See library documentation for details.')
if __name__ == "__main__":
if len(sys.argv) not in [2, 3]:
sys.exit("Usage: %s <path>|test [wx|pygtk|pil|scrot]"
% os.path.basename(sys.argv[0]))
path = sys.argv[1] if sys.argv[1] != 'test' else None
module = sys.argv[2] if len(sys.argv) > 2 else None
ScreenshotTaker(module).test(path) | 0.728169 | 0.241579 |
from __future__ import print_function
import qt
import shutil
import sys
import os
import time
from constants import *
def copy_script(once):
if once:
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data.get_filename())+1)],os.path.basename(sys.argv[0])))
#############################
# Measurement Parameters
#############################
# VNA sweep parameters
probe_center = 7.414*GHz
probe_span = 1*Hz
probe_start_freq = probe_center - probe_span/2
probe_stop_freq = probe_center + probe_span/2
probe_numpoints = 1
if_bw = 1*Hz
probe_start_power = -50 #
probe_stop_power = -60 #
probe_power_numpoint = 3
s_params = ['S21']
filename = raw_input('Filename : ')
avg_point = 1
# SMF sweep parameters
drive_start_freq = 4.63*GHz
drive_stop_freq = 4.68*GHz
resolution = 0.25*MHz
drive_numpoints = int(abs(drive_stop_freq - drive_start_freq)/resolution + 1)
drive_power = -20
#############################
# Initialize Instruments
#############################
znb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address=ZNB20_ADDRESS, reset=True)
smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS, reset=True)
rigol = qt.instruments.create('DP832A', 'Rigol_DP832A', address='TCPIP0::192.168.1.5::INSTR')
# setup SMF100 as source
smf.set_frequency(drive_start_freq)
smf.set_source_power(drive_power)
# Setup VNA as source
znb.set_external_reference(True)
znb.set_external_reference_frequency(10)
znb.set_start_frequency(probe_start_freq)
znb.set_stop_frequency(probe_stop_freq)
znb.set_numpoints(probe_numpoints)
znb.set_if_bandwidth(if_bw)
znb.set_source_power(probe_start_power)
znb.add_trace('S21')
# Turn on sources
znb.rf_on()
smf.rf_on()
rigol.output_on(1)
# Test trigger
znb.send_trigger(wait=True)
# znb.autoscale()
go_on = raw_input('Continue? [y/n] ')
assert go_on.strip().upper() != 'N'
### SETTING UP DATA FILE
data=qt.Data(name=filename)
# data.add_comment('No. of repeated measurements for average is 60')
data.add_coordinate('Drive Power', units='dBm')
data.add_coordinate('Drive Frequency', units='Hz')
data.add_value('S21 real')
data.add_value('S21 imag')
data.add_value('S21 abs')
data.add_value('S21 phase')
drive_freq_array = np.linspace(drive_start_freq, drive_stop_freq, drive_numpoints)
probe_power_array = np.linspace(probe_start_power, probe_stop_power, probe_power_numpoint)
in_meta = [drive_start_freq, drive_stop_freq, drive_numpoints, 'Drive (Hz)']
out_meta = [probe_start_power, probe_stop_power, probe_power_numpoint,'Probe power (dBm)']
qt.mstart()
once = True
for prob_power in probe_power_array:
start_time = time.time()
znb.set_source_power(prob_power)
power_list = np.linspace(prob_power, prob_power, num=drive_numpoints)
traces=[[],[],[],[]]
for index, drive_freq in enumerate(drive_freq_array):
traces_sum=[0,0]
print('%d/%d'%(index+1,len(drive_freq_array)), end='\r')
smf.set_frequency(drive_freq)
for i in range(avg_point):
znb.send_trigger(wait=True)
trace = znb.get_data('S21')
traces_sum[0]+=np.real(trace)
traces_sum[1]+=np.imag(trace)
traces[0].append(traces_sum[0][0]/avg_point)
traces[1].append(traces_sum[1][0]/avg_point)
traces[2].append(np.abs(traces[0][-1] + 1j*traces[1][-1]))
traces[3].append(np.angle(traces[0][-1] + 1j*traces[1][-1]))
end_time = time.time()
data.add_data_point(power_list, drive_freq_array, traces[0], traces[1], traces[2], traces[3])
copy_script(once);once = False
data.metagen2D(in_meta, out_meta)
print(end_time - start_time)
data.close_file()
smf.rf_off()
rigol.output_off(1) | scripts/vibhor/Qubit/Two_tone_dynamic_probe_power.py | from __future__ import print_function
import qt
import shutil
import sys
import os
import time
from constants import *
def copy_script(once):
if once:
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data.get_filename())+1)],os.path.basename(sys.argv[0])))
#############################
# Measurement Parameters
#############################
# VNA sweep parameters
probe_center = 7.414*GHz
probe_span = 1*Hz
probe_start_freq = probe_center - probe_span/2
probe_stop_freq = probe_center + probe_span/2
probe_numpoints = 1
if_bw = 1*Hz
probe_start_power = -50 #
probe_stop_power = -60 #
probe_power_numpoint = 3
s_params = ['S21']
filename = raw_input('Filename : ')
avg_point = 1
# SMF sweep parameters
drive_start_freq = 4.63*GHz
drive_stop_freq = 4.68*GHz
resolution = 0.25*MHz
drive_numpoints = int(abs(drive_stop_freq - drive_start_freq)/resolution + 1)
drive_power = -20
#############################
# Initialize Instruments
#############################
znb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address=ZNB20_ADDRESS, reset=True)
smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS, reset=True)
rigol = qt.instruments.create('DP832A', 'Rigol_DP832A', address='TCPIP0::192.168.1.5::INSTR')
# setup SMF100 as source
smf.set_frequency(drive_start_freq)
smf.set_source_power(drive_power)
# Setup VNA as source
znb.set_external_reference(True)
znb.set_external_reference_frequency(10)
znb.set_start_frequency(probe_start_freq)
znb.set_stop_frequency(probe_stop_freq)
znb.set_numpoints(probe_numpoints)
znb.set_if_bandwidth(if_bw)
znb.set_source_power(probe_start_power)
znb.add_trace('S21')
# Turn on sources
znb.rf_on()
smf.rf_on()
rigol.output_on(1)
# Test trigger
znb.send_trigger(wait=True)
# znb.autoscale()
go_on = raw_input('Continue? [y/n] ')
assert go_on.strip().upper() != 'N'
### SETTING UP DATA FILE
data=qt.Data(name=filename)
# data.add_comment('No. of repeated measurements for average is 60')
data.add_coordinate('Drive Power', units='dBm')
data.add_coordinate('Drive Frequency', units='Hz')
data.add_value('S21 real')
data.add_value('S21 imag')
data.add_value('S21 abs')
data.add_value('S21 phase')
drive_freq_array = np.linspace(drive_start_freq, drive_stop_freq, drive_numpoints)
probe_power_array = np.linspace(probe_start_power, probe_stop_power, probe_power_numpoint)
in_meta = [drive_start_freq, drive_stop_freq, drive_numpoints, 'Drive (Hz)']
out_meta = [probe_start_power, probe_stop_power, probe_power_numpoint,'Probe power (dBm)']
qt.mstart()
once = True
for prob_power in probe_power_array:
start_time = time.time()
znb.set_source_power(prob_power)
power_list = np.linspace(prob_power, prob_power, num=drive_numpoints)
traces=[[],[],[],[]]
for index, drive_freq in enumerate(drive_freq_array):
traces_sum=[0,0]
print('%d/%d'%(index+1,len(drive_freq_array)), end='\r')
smf.set_frequency(drive_freq)
for i in range(avg_point):
znb.send_trigger(wait=True)
trace = znb.get_data('S21')
traces_sum[0]+=np.real(trace)
traces_sum[1]+=np.imag(trace)
traces[0].append(traces_sum[0][0]/avg_point)
traces[1].append(traces_sum[1][0]/avg_point)
traces[2].append(np.abs(traces[0][-1] + 1j*traces[1][-1]))
traces[3].append(np.angle(traces[0][-1] + 1j*traces[1][-1]))
end_time = time.time()
data.add_data_point(power_list, drive_freq_array, traces[0], traces[1], traces[2], traces[3])
copy_script(once);once = False
data.metagen2D(in_meta, out_meta)
print(end_time - start_time)
data.close_file()
smf.rf_off()
rigol.output_off(1) | 0.222531 | 0.194444 |
from corehq.apps.sms.models import SMSLog, SMS
from custom.fri.models import FRISMSLog
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Migrates SMSLog to SMS")
option_list = BaseCommand.option_list + (
make_option("--balance_only",
action="store_true",
dest="balance_only",
default=False,
help="Include this option to only run the balancing step."),
)
def get_sms_couch_ids(self):
result = SMSLog.view(
'sms/by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result if row['key'][1] == 'SMSLog']
def clean_doc(self, doc):
"""
Some old docs apparently have +00:00Z at the end of the date string,
which is not a valid timezone specification.
Also, because of http://manage.dimagi.com/default.asp?111189, there's
9 docs with very long phone numbers that should just be replaced
with null because there was no recipient to those sms.
"""
date = doc.get('date')
if isinstance(date, basestring) and date.endswith('+00:00Z'):
date = date[:-7] + 'Z'
doc['date'] = date
phone_number = doc.get('phone_number')
if isinstance(phone_number, basestring) and len(phone_number) > 126:
doc['phone_number'] = None
def run_migration(self):
count = 0
ids = self.get_sms_couch_ids()
total_count = len(ids)
for doc in iter_docs(FRISMSLog.get_db(), ids):
try:
self.clean_doc(doc)
couch_sms = FRISMSLog.wrap(doc)
couch_sms._migration_do_sync()
except Exception as e:
print 'Could not sync SMSLog %s: %s' % (doc['_id'], e)
count += 1
if (count % 10000) == 0:
print 'Processed %s / %s documents' % (count, total_count)
def balance(self):
sql_count = SMS.objects.count()
couch_count = len(self.get_sms_couch_ids())
print "SQL Count: %s, Couch Count: %s" % (sql_count, couch_count)
def handle(self, *args, **options):
if not options['balance_only']:
self.run_migration()
self.balance() | corehq/apps/sms/management/commands/migrate_sms_to_sql.py | from corehq.apps.sms.models import SMSLog, SMS
from custom.fri.models import FRISMSLog
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Migrates SMSLog to SMS")
option_list = BaseCommand.option_list + (
make_option("--balance_only",
action="store_true",
dest="balance_only",
default=False,
help="Include this option to only run the balancing step."),
)
def get_sms_couch_ids(self):
result = SMSLog.view(
'sms/by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result if row['key'][1] == 'SMSLog']
def clean_doc(self, doc):
"""
Some old docs apparently have +00:00Z at the end of the date string,
which is not a valid timezone specification.
Also, because of http://manage.dimagi.com/default.asp?111189, there's
9 docs with very long phone numbers that should just be replaced
with null because there was no recipient to those sms.
"""
date = doc.get('date')
if isinstance(date, basestring) and date.endswith('+00:00Z'):
date = date[:-7] + 'Z'
doc['date'] = date
phone_number = doc.get('phone_number')
if isinstance(phone_number, basestring) and len(phone_number) > 126:
doc['phone_number'] = None
def run_migration(self):
count = 0
ids = self.get_sms_couch_ids()
total_count = len(ids)
for doc in iter_docs(FRISMSLog.get_db(), ids):
try:
self.clean_doc(doc)
couch_sms = FRISMSLog.wrap(doc)
couch_sms._migration_do_sync()
except Exception as e:
print 'Could not sync SMSLog %s: %s' % (doc['_id'], e)
count += 1
if (count % 10000) == 0:
print 'Processed %s / %s documents' % (count, total_count)
def balance(self):
sql_count = SMS.objects.count()
couch_count = len(self.get_sms_couch_ids())
print "SQL Count: %s, Couch Count: %s" % (sql_count, couch_count)
def handle(self, *args, **options):
if not options['balance_only']:
self.run_migration()
self.balance() | 0.494629 | 0.123921 |
# Placeholder for Google-internal load statements.
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_static_framework")
TFL_MINIMUM_OS_VERSION = "9.0"
# Default tags for filtering iOS targets. Targets are restricted to Apple platforms.
TFL_DEFAULT_TAGS = [
"apple",
]
# Following sanitizer tests are not supported by iOS test targets.
TFL_DISABLED_SANITIZER_TAGS = [
"noasan",
"nomsan",
"notsan",
]
# iOS static framework with symbol whitelist. Exported C++ symbbols might cause
# symbol collision with other libraries. List of symbols to whitelist can be
# generated by running `nm -m -g FRAMEWORK_LIBRARY | grep _TfLite` for framework
# built with `ios_static_framework` rule.
def tflite_ios_static_framework(
name,
bundle_name,
whitelist_symbols_file,
exclude_resources = True,
**kwargs):
"""TFLite variant of ios_static_framework with symbol hiding.
Args:
name: The name of the target.
bundle_name: The name to give to the framework bundle, without the
".framework" extension. If omitted, the target's name will be used.
whitelist_symbols_file: a file including a list of whitelisted symbols,
one symbol per line.
exclude_resources: Indicates whether resources should be excluded from the
bundle. This can be used to avoid unnecessarily bundling resources if
the static framework is being distributed in a different fashion, such
as a Cocoapod.
**kwargs: Pass-through arguments.
"""
preprocessed_name = "Preprocessed_" + name
ios_static_framework(
name = preprocessed_name,
bundle_name = bundle_name,
exclude_resources = exclude_resources,
**kwargs
)
framework_target = ":{}.zip".format(preprocessed_name)
srcs = [
framework_target,
whitelist_symbols_file,
]
cmd = ("INPUT_FRAMEWORK=\"$(location " + framework_target + ")\" " +
"BUNDLE_NAME=\"" + bundle_name + "\" " +
"WHITELIST_FILE_PATH=\"$(location " + whitelist_symbols_file + ")\" " +
"OUTPUT=\"$(OUTS)\" " +
"\"$(location //tensorflow/lite/experimental/ios:hide_symbols_with_whitelist)\"")
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".zip"],
cmd = cmd,
tools = [
"//tensorflow/lite/experimental/ios:hide_symbols_with_whitelist",
],
) | tensorflow/lite/experimental/ios/ios.bzl |
# Placeholder for Google-internal load statements.
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_static_framework")
TFL_MINIMUM_OS_VERSION = "9.0"
# Default tags for filtering iOS targets. Targets are restricted to Apple platforms.
TFL_DEFAULT_TAGS = [
"apple",
]
# Following sanitizer tests are not supported by iOS test targets.
TFL_DISABLED_SANITIZER_TAGS = [
"noasan",
"nomsan",
"notsan",
]
# iOS static framework with symbol whitelist. Exported C++ symbbols might cause
# symbol collision with other libraries. List of symbols to whitelist can be
# generated by running `nm -m -g FRAMEWORK_LIBRARY | grep _TfLite` for framework
# built with `ios_static_framework` rule.
def tflite_ios_static_framework(
name,
bundle_name,
whitelist_symbols_file,
exclude_resources = True,
**kwargs):
"""TFLite variant of ios_static_framework with symbol hiding.
Args:
name: The name of the target.
bundle_name: The name to give to the framework bundle, without the
".framework" extension. If omitted, the target's name will be used.
whitelist_symbols_file: a file including a list of whitelisted symbols,
one symbol per line.
exclude_resources: Indicates whether resources should be excluded from the
bundle. This can be used to avoid unnecessarily bundling resources if
the static framework is being distributed in a different fashion, such
as a Cocoapod.
**kwargs: Pass-through arguments.
"""
preprocessed_name = "Preprocessed_" + name
ios_static_framework(
name = preprocessed_name,
bundle_name = bundle_name,
exclude_resources = exclude_resources,
**kwargs
)
framework_target = ":{}.zip".format(preprocessed_name)
srcs = [
framework_target,
whitelist_symbols_file,
]
cmd = ("INPUT_FRAMEWORK=\"$(location " + framework_target + ")\" " +
"BUNDLE_NAME=\"" + bundle_name + "\" " +
"WHITELIST_FILE_PATH=\"$(location " + whitelist_symbols_file + ")\" " +
"OUTPUT=\"$(OUTS)\" " +
"\"$(location //tensorflow/lite/experimental/ios:hide_symbols_with_whitelist)\"")
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".zip"],
cmd = cmd,
tools = [
"//tensorflow/lite/experimental/ios:hide_symbols_with_whitelist",
],
) | 0.725454 | 0.188997 |
import requests
from lxml import etree
from kth_timeoutdecorator import *
import json
SAUCENAO_KEY = "<KEY>" # SauceNAO 的 API key
TIMELIMIT_IMAGE = 7 # 识图功能的时间限制
params = {
'api_key' : SAUCENAO_KEY,
'output_type' : 2
}
def Pixiv_Msg(id, user_name, user_id):
return (f'PixivID: {id}\n[作者]{user_name}: {user_id}')
def from_saucenao(url):
params['url'] = url
response = requests.get('https://saucenao.com/search.php', params=params)
data = response.json()
res_ = []
for res in data['results']:
similarity = res['header']['similarity']
id = res['data'].get('pixiv_id')
if id and float(similarity) > 80:
title = res['data']['title']
user_name = res['data']['member_name']
user_id = res['data']['member_id']
res_.append(Pixiv_Msg(id, user_name, user_id))
return res_
def from_ascii2d(url):
clolr_res = requests.get(f"https://ascii2d.net/search/url/{url}")
html_index = etree.HTML(clolr_res.text)
neet_div = html_index.xpath('//div[@class="detail-link pull-xs-right hidden-sm-down gray-link"]')
url_bovw = "https://ascii2d.net" + neet_div[0].xpath('./span/a/@href')[1]
bovw_res = requests.get(url_bovw)
html_index2 = etree.HTML(bovw_res.text)
res_ = []
for html in [html_index, html_index2]:
all_data = html.xpath('//div[@class="detail-box gray-link"]/h6')
for data in all_data[:3]:
artworks_id, users_id = data.xpath(".//a/@href")
artworks, users = data.xpath(".//a/text()")
if ('pixiv' in artworks_id):
artworks_id = artworks_id.split('/')[-1]
users_id = users_id.split('/')[-1]
res_.append(Pixiv_Msg(artworks_id,users,users_id))
if ('twitter' in artworks_id):
res_.append(f'twitter: {artworks_id}')
return res_
@timeout(TIMELIMIT_IMAGE)
async def get_view(sc, image_url: str) -> str:
return sc(image_url)
async def get_image_data(image_url: str, api_key: str=SAUCENAO_KEY):
putline = []
repass = ''
for sc in [from_saucenao, from_ascii2d]:
try:
putline += await get_view(sc, image_url)
except :
pass
for msg in list(set(putline)):
if repass:
repass = repass + '\n----------\n' + msg
else:
repass += msg
return repass | search_pic/tool.py | import requests
from lxml import etree
from kth_timeoutdecorator import *
import json
SAUCENAO_KEY = "<KEY>" # SauceNAO 的 API key
TIMELIMIT_IMAGE = 7 # 识图功能的时间限制
params = {
'api_key' : SAUCENAO_KEY,
'output_type' : 2
}
def Pixiv_Msg(id, user_name, user_id):
return (f'PixivID: {id}\n[作者]{user_name}: {user_id}')
def from_saucenao(url):
params['url'] = url
response = requests.get('https://saucenao.com/search.php', params=params)
data = response.json()
res_ = []
for res in data['results']:
similarity = res['header']['similarity']
id = res['data'].get('pixiv_id')
if id and float(similarity) > 80:
title = res['data']['title']
user_name = res['data']['member_name']
user_id = res['data']['member_id']
res_.append(Pixiv_Msg(id, user_name, user_id))
return res_
def from_ascii2d(url):
clolr_res = requests.get(f"https://ascii2d.net/search/url/{url}")
html_index = etree.HTML(clolr_res.text)
neet_div = html_index.xpath('//div[@class="detail-link pull-xs-right hidden-sm-down gray-link"]')
url_bovw = "https://ascii2d.net" + neet_div[0].xpath('./span/a/@href')[1]
bovw_res = requests.get(url_bovw)
html_index2 = etree.HTML(bovw_res.text)
res_ = []
for html in [html_index, html_index2]:
all_data = html.xpath('//div[@class="detail-box gray-link"]/h6')
for data in all_data[:3]:
artworks_id, users_id = data.xpath(".//a/@href")
artworks, users = data.xpath(".//a/text()")
if ('pixiv' in artworks_id):
artworks_id = artworks_id.split('/')[-1]
users_id = users_id.split('/')[-1]
res_.append(Pixiv_Msg(artworks_id,users,users_id))
if ('twitter' in artworks_id):
res_.append(f'twitter: {artworks_id}')
return res_
@timeout(TIMELIMIT_IMAGE)
async def get_view(sc, image_url: str) -> str:
return sc(image_url)
async def get_image_data(image_url: str, api_key: str=SAUCENAO_KEY):
putline = []
repass = ''
for sc in [from_saucenao, from_ascii2d]:
try:
putline += await get_view(sc, image_url)
except :
pass
for msg in list(set(putline)):
if repass:
repass = repass + '\n----------\n' + msg
else:
repass += msg
return repass | 0.106174 | 0.086439 |
from typing import overload, Union
from pykotor.common.stream import BinaryReader
from pykotor.resource.formats.ssf import SSF, SSFBinaryReader, SSFXMLReader, SSFBinaryWriter
from pykotor.resource.formats.ssf.io_xml import SSFXMLWriter
from pykotor.resource.type import FileFormat, SOURCE_TYPES, TARGET_TYPES
@overload
def detect_ssf(source: str, offset: int = 0):
...
@overload
def detect_ssf(source: bytes, offset: int = 0):
...
@overload
def detect_ssf(source: bytearray, offset: int = 0):
...
@overload
def detect_ssf(source: BinaryReader, offset: int = 0):
...
def detect_ssf(source: SOURCE_TYPES, offset: int = 0) -> FileFormat:
"""
Returns what format the SSF data is believed to be in. This function performs a basic check and does not guarantee
accuracy of the result or integrity of the data.
Args:
source: Source of the SSF data.
offset: Offset into the data.
Returns:
The format of the SSF data.
"""
try:
if isinstance(source, str):
with BinaryReader.from_file(source, offset) as reader:
file_format = FileFormat.BINARY if reader.read_string(4) == "SSF " else FileFormat.XML
elif isinstance(source, bytes) or isinstance(source, bytearray):
file_format = FileFormat.BINARY if source[:4].decode('ascii', 'ignore') == "SSF " else FileFormat.XML
elif isinstance(source, BinaryReader):
file_format = FileFormat.BINARY if source.read_string(4) == "SSF " else FileFormat.XML
source.skip(-4)
else:
file_format = FileFormat.INVALID
except IOError:
file_format = FileFormat.INVALID
return file_format
def load_ssf(source: SOURCE_TYPES, offset: int = 0) -> SSF:
"""
Returns an SSF instance from the source. The file format (binary or xml) is automatically determined before parsing
the data.
Args:
source: The source of the data.
offset: The byte offset of the file inside the data.
Raises:
ValueError: If the file was corrupted or in an unsupported format.
Returns:
An SSF instance.
"""
file_format = detect_ssf(source, offset)
try:
if file_format == FileFormat.BINARY:
return SSFBinaryReader(source, offset).load()
elif file_format == FileFormat.XML:
return SSFXMLReader(source).load()
else:
raise ValueError
except (IOError, ValueError):
raise ValueError("Tried to load an unsupported or corrupted SSF file.")
def write_ssf(ssf: SSF, target: TARGET_TYPES, file_format: FileFormat = FileFormat.BINARY) -> None:
"""
Writes the SSF data to the target location with the specified format (binary or xml).
Args:
ssf: The SSF file being written.
target: The location to write the data to.
file_format: The file format.
Raises:
ValueError: If an unsupported FileFormat is passed.
"""
if file_format == FileFormat.BINARY:
SSFBinaryWriter(ssf, target).write()
elif file_format == FileFormat.XML:
SSFXMLWriter(ssf, target).write()
else:
raise ValueError("Unsupported format specified; use BINARY or XML.") | pykotor/resource/formats/ssf/auto.py | from typing import overload, Union
from pykotor.common.stream import BinaryReader
from pykotor.resource.formats.ssf import SSF, SSFBinaryReader, SSFXMLReader, SSFBinaryWriter
from pykotor.resource.formats.ssf.io_xml import SSFXMLWriter
from pykotor.resource.type import FileFormat, SOURCE_TYPES, TARGET_TYPES
@overload
def detect_ssf(source: str, offset: int = 0):
...
@overload
def detect_ssf(source: bytes, offset: int = 0):
...
@overload
def detect_ssf(source: bytearray, offset: int = 0):
...
@overload
def detect_ssf(source: BinaryReader, offset: int = 0):
...
def detect_ssf(source: SOURCE_TYPES, offset: int = 0) -> FileFormat:
"""
Returns what format the SSF data is believed to be in. This function performs a basic check and does not guarantee
accuracy of the result or integrity of the data.
Args:
source: Source of the SSF data.
offset: Offset into the data.
Returns:
The format of the SSF data.
"""
try:
if isinstance(source, str):
with BinaryReader.from_file(source, offset) as reader:
file_format = FileFormat.BINARY if reader.read_string(4) == "SSF " else FileFormat.XML
elif isinstance(source, bytes) or isinstance(source, bytearray):
file_format = FileFormat.BINARY if source[:4].decode('ascii', 'ignore') == "SSF " else FileFormat.XML
elif isinstance(source, BinaryReader):
file_format = FileFormat.BINARY if source.read_string(4) == "SSF " else FileFormat.XML
source.skip(-4)
else:
file_format = FileFormat.INVALID
except IOError:
file_format = FileFormat.INVALID
return file_format
def load_ssf(source: SOURCE_TYPES, offset: int = 0) -> SSF:
"""
Returns an SSF instance from the source. The file format (binary or xml) is automatically determined before parsing
the data.
Args:
source: The source of the data.
offset: The byte offset of the file inside the data.
Raises:
ValueError: If the file was corrupted or in an unsupported format.
Returns:
An SSF instance.
"""
file_format = detect_ssf(source, offset)
try:
if file_format == FileFormat.BINARY:
return SSFBinaryReader(source, offset).load()
elif file_format == FileFormat.XML:
return SSFXMLReader(source).load()
else:
raise ValueError
except (IOError, ValueError):
raise ValueError("Tried to load an unsupported or corrupted SSF file.")
def write_ssf(ssf: SSF, target: TARGET_TYPES, file_format: FileFormat = FileFormat.BINARY) -> None:
"""
Writes the SSF data to the target location with the specified format (binary or xml).
Args:
ssf: The SSF file being written.
target: The location to write the data to.
file_format: The file format.
Raises:
ValueError: If an unsupported FileFormat is passed.
"""
if file_format == FileFormat.BINARY:
SSFBinaryWriter(ssf, target).write()
elif file_format == FileFormat.XML:
SSFXMLWriter(ssf, target).write()
else:
raise ValueError("Unsupported format specified; use BINARY or XML.") | 0.890642 | 0.385837 |
import pytest
from jina import Flow, __default_host__
from jina.enums import SocketType
from jina.helper import get_internal_ip, get_public_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_gateway(local_ip, on_public):
# BIND socket's host must always be 0.0.0.0
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip)
f.build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['gateway'].host_in == remote_ip
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_local_gateway_input_socket_pull_connect_from_remote(local_ip, on_public):
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote_ip
assert f['pod1'].host_out == __default_host__
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_local_gateway(local_ip, on_public):
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote_ip
assert f['pod1'].host_out == __default_host__
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_remote_pod_local_gateway_input_socket_pull_connect_from_remote(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add(host=remote1).add().add(
host=remote2).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == remote1
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_remote_pod_local_gateway(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add(host=remote1).add().add(
host=remote2).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == remote1
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_local_pod_remote_pod_remote_pod_local_gateway(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add().add(host=remote1).add(host=remote2)
f.build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == remote1
assert f['pod1'].host_in == __default_host__
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == __default_host__
def test_gateway_remote():
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in.is_bind
assert f['pod0'].args.socket_out.is_bind
assert f['gateway'].host_in == remote1
assert f['gateway'].host_out == remote1
def test_gateway_remote_local():
"""
remote IN: 0.0.0.0:61913 (PULL_BIND) internal_ip:61914 (PUSH_CONNECT)
pod1 IN: 0.0.0.0:61914 (PULL_BIND) 0.0.0.0:61918 (PUSH_BIND)
gateway IN: 0.0.0.0:61918 (PULL_CONNECT) 172.16.17.32:61913 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in == SocketType.PULL_BIND
assert f['pod0'].args.socket_out == SocketType.PUSH_BIND
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == __default_host__
assert f['pod1'].args.socket_in == SocketType.PULL_CONNECT
assert f['pod1'].args.socket_out == SocketType.PUSH_BIND
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote1
assert f['gateway'].args.socket_in == SocketType.PULL_CONNECT
assert f['gateway'].args.socket_out == SocketType.PUSH_CONNECT
def test_gateway_remote_local_input_socket_pull_connect_from_remote():
"""
remote IN: 0.0.0.0:61913 (PULL_BIND) 0.0.0.0:61914 (PUSH_BIND)
pod1 IN: 172.16.17.32:61914 (PULL_CONNECT) 0.0.0.0:61918 (PUSH_BIND)
gateway IN: 0.0.0.0:61918 (PULL_CONNECT) 172.16.17.32:61913 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in.is_bind
assert f['pod0'].args.socket_out.is_bind
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == __default_host__
assert not f['pod1'].args.socket_in.is_bind
assert f['pod1'].args.socket_out.is_bind
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote1
def test_gateway_local_remote():
"""
pod0 IN: 0.0.0.0:62322 (PULL_BIND) 172.16.17.32:62326 (PUSH_CONNECT)
remote IN: 0.0.0.0:62326 (PULL_BIND) 0.0.0.0:62327 (PUSH_BIND)
gateway IN: 172.16.17.32:62327 (PULL_CONNECT) 0.0.0.0:62322 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add().add(host=remote1).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == remote1
assert f['pod0'].args.socket_in.is_bind
assert not f['pod0'].args.socket_out.is_bind
assert f['pod1'].host_in == __default_host__
assert f['pod1'].host_out == __default_host__
assert f['pod1'].args.socket_in.is_bind
assert f['pod1'].args.socket_out.is_bind
assert f['gateway'].host_in == remote1
assert f['gateway'].host_out == __default_host__ | tests/unit/flow/test_remote_orchestrate.py | import pytest
from jina import Flow, __default_host__
from jina.enums import SocketType
from jina.helper import get_internal_ip, get_public_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_gateway(local_ip, on_public):
# BIND socket's host must always be 0.0.0.0
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip)
f.build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['gateway'].host_in == remote_ip
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_local_gateway_input_socket_pull_connect_from_remote(local_ip, on_public):
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote_ip
assert f['pod1'].host_out == __default_host__
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_local_gateway(local_ip, on_public):
remote_ip = '172.16.17.32'
f = Flow(expose_public=on_public).add(host=remote_ip).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote_ip
assert f['pod1'].host_out == __default_host__
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote_ip
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_remote_pod_local_gateway_input_socket_pull_connect_from_remote(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add(host=remote1).add().add(
host=remote2).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == remote1
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_remote_pod_local_pod_remote_pod_local_gateway(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add(host=remote1).add().add(
host=remote2).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == remote1
@pytest.mark.parametrize('local_ip, on_public', [(get_internal_ip(), False),
(get_public_ip(), True)])
def test_local_pod_remote_pod_remote_pod_local_gateway(local_ip, on_public):
remote1 = '172.16.17.32'
remote2 = '172.16.58.3'
f = Flow(expose_public=on_public).add().add(host=remote1).add(host=remote2)
f.build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == remote1
assert f['pod1'].host_in == __default_host__
assert f['pod1'].host_out == remote2
assert f['pod2'].host_in == __default_host__
assert f['pod2'].host_out == __default_host__
assert f['gateway'].host_in == remote2
assert f['gateway'].host_out == __default_host__
def test_gateway_remote():
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in.is_bind
assert f['pod0'].args.socket_out.is_bind
assert f['gateway'].host_in == remote1
assert f['gateway'].host_out == remote1
def test_gateway_remote_local():
"""
remote IN: 0.0.0.0:61913 (PULL_BIND) internal_ip:61914 (PUSH_CONNECT)
pod1 IN: 0.0.0.0:61914 (PULL_BIND) 0.0.0.0:61918 (PUSH_BIND)
gateway IN: 0.0.0.0:61918 (PULL_CONNECT) 172.16.17.32:61913 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in == SocketType.PULL_BIND
assert f['pod0'].args.socket_out == SocketType.PUSH_BIND
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == __default_host__
assert f['pod1'].args.socket_in == SocketType.PULL_CONNECT
assert f['pod1'].args.socket_out == SocketType.PUSH_BIND
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote1
assert f['gateway'].args.socket_in == SocketType.PULL_CONNECT
assert f['gateway'].args.socket_out == SocketType.PUSH_CONNECT
def test_gateway_remote_local_input_socket_pull_connect_from_remote():
"""
remote IN: 0.0.0.0:61913 (PULL_BIND) 0.0.0.0:61914 (PUSH_BIND)
pod1 IN: 172.16.17.32:61914 (PULL_CONNECT) 0.0.0.0:61918 (PUSH_BIND)
gateway IN: 0.0.0.0:61918 (PULL_CONNECT) 172.16.17.32:61913 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add(host=remote1).add().build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == __default_host__
assert f['pod0'].args.socket_in.is_bind
assert f['pod0'].args.socket_out.is_bind
assert f['pod1'].host_in == remote1
assert f['pod1'].host_out == __default_host__
assert not f['pod1'].args.socket_in.is_bind
assert f['pod1'].args.socket_out.is_bind
assert f['gateway'].host_in == __default_host__
assert f['gateway'].host_out == remote1
def test_gateway_local_remote():
"""
pod0 IN: 0.0.0.0:62322 (PULL_BIND) 172.16.17.32:62326 (PUSH_CONNECT)
remote IN: 0.0.0.0:62326 (PULL_BIND) 0.0.0.0:62327 (PUSH_BIND)
gateway IN: 172.16.17.32:62327 (PULL_CONNECT) 0.0.0.0:62322 (PUSH_CONNECT)
:return:
"""
remote1 = '172.16.17.32'
f = Flow().add().add(host=remote1).build()
for k, v in f:
print(f'{v.name}\tIN: {v.address_in}\t{v.address_out}')
assert f['pod0'].host_in == __default_host__
assert f['pod0'].host_out == remote1
assert f['pod0'].args.socket_in.is_bind
assert not f['pod0'].args.socket_out.is_bind
assert f['pod1'].host_in == __default_host__
assert f['pod1'].host_out == __default_host__
assert f['pod1'].args.socket_in.is_bind
assert f['pod1'].args.socket_out.is_bind
assert f['gateway'].host_in == remote1
assert f['gateway'].host_out == __default_host__ | 0.562777 | 0.374562 |
def main():
dictionary = create_dictionary()
determine_user_answer(dictionary)
def create_dictionary():
us_states = {'Alabama':'Montgomery','Alaska':'Juneau','Arizona':'Phoenix',
'Arkansas':'Little Rock','Carlifornia':'Sacramento',
'Colorado':'Denver','Connecticut':'Hartford','Delware':
'Dover','Florida':'Tallahassee','Georgia':'Alanta','Hawaii':
'Honolulu','Idaho':'BOise','Illinois':'Springfield','Indiana':
'Indianapolis','Iowa':'Des Moines','Kansas':'TOpeka',
'Kentucky':'Frankfort','Louisiana':'Baton Rouge','Maine':
'Augusta','Maryland':'Annapolis','Massachusetts':'Boston',
'Michigan':'Lansing','Minnesota':'St. Paul','Mississipi':
'Jackson','Missouri':'Jefferson City','New York':'Albany',
'North Carolina':'Raleigh','North Dakota':'Bismarck','Ohio':
'Columbus','Oklahoma':'Oklahoma City','Oregon':'Salem',
'Pennsylvania':'Harrisburg','Rhode Island':'Providence',
'South Carolina':'Columbia','South Dakota':'Pierre',
'Tennessee':'Nashville','Texas':'Austin','Utah':
'Salt Lake City','Vermont':'Montpelier','Virginia':'Richmond',
'Washington':'Olympia','West Virginia':'Charleston',
'Wisconsin':'Madison','Wyoming':'Cheyenne'}
return us_states
def determine_user_answer(us_states_and_capitals):
correct_answers = 0 #counts the number of correct answers
incorrect_answers = 0 #counts the number of wrong answers
#Asking user for the number of questions
number_of_questions = int(input('Enter your preferred '
'number of questions: '))
#Making sure user entry is within range of
#us_states_and_capitals which is 50
if number_of_questions > len(us_states_and_capitals):
number_of_questons = len(us_states_and_capitals)
print()
#Initalizing a loop to ask the questions
for items in range(number_of_questions):
key,value = us_states_and_capitals.popitem()
print('Enter the capital of',key,end='')
capital = input(': ')
if capital == value:
correct_answers += 1
else:
incorrect_answers += 1
print()
print('You answered',correct_answers,'questions correct')
print('You had',incorrect_answers,'questions wrong')
main() | chapter-9/2. capital_quiz.py |
def main():
dictionary = create_dictionary()
determine_user_answer(dictionary)
def create_dictionary():
us_states = {'Alabama':'Montgomery','Alaska':'Juneau','Arizona':'Phoenix',
'Arkansas':'Little Rock','Carlifornia':'Sacramento',
'Colorado':'Denver','Connecticut':'Hartford','Delware':
'Dover','Florida':'Tallahassee','Georgia':'Alanta','Hawaii':
'Honolulu','Idaho':'BOise','Illinois':'Springfield','Indiana':
'Indianapolis','Iowa':'Des Moines','Kansas':'TOpeka',
'Kentucky':'Frankfort','Louisiana':'Baton Rouge','Maine':
'Augusta','Maryland':'Annapolis','Massachusetts':'Boston',
'Michigan':'Lansing','Minnesota':'St. Paul','Mississipi':
'Jackson','Missouri':'Jefferson City','New York':'Albany',
'North Carolina':'Raleigh','North Dakota':'Bismarck','Ohio':
'Columbus','Oklahoma':'Oklahoma City','Oregon':'Salem',
'Pennsylvania':'Harrisburg','Rhode Island':'Providence',
'South Carolina':'Columbia','South Dakota':'Pierre',
'Tennessee':'Nashville','Texas':'Austin','Utah':
'Salt Lake City','Vermont':'Montpelier','Virginia':'Richmond',
'Washington':'Olympia','West Virginia':'Charleston',
'Wisconsin':'Madison','Wyoming':'Cheyenne'}
return us_states
def determine_user_answer(us_states_and_capitals):
correct_answers = 0 #counts the number of correct answers
incorrect_answers = 0 #counts the number of wrong answers
#Asking user for the number of questions
number_of_questions = int(input('Enter your preferred '
'number of questions: '))
#Making sure user entry is within range of
#us_states_and_capitals which is 50
if number_of_questions > len(us_states_and_capitals):
number_of_questons = len(us_states_and_capitals)
print()
#Initalizing a loop to ask the questions
for items in range(number_of_questions):
key,value = us_states_and_capitals.popitem()
print('Enter the capital of',key,end='')
capital = input(': ')
if capital == value:
correct_answers += 1
else:
incorrect_answers += 1
print()
print('You answered',correct_answers,'questions correct')
print('You had',incorrect_answers,'questions wrong')
main() | 0.244724 | 0.328099 |
from dabam import dabam
import copy
from numpy.testing import assert_almost_equal
def data1(): # some scratch data
return """Line1 Line2 Center Line3 Line4
21.1 -0.871333347 -1.536403196 -1.826784168 -2.013347664 -2.058588716
21.15 -0.876013666 -1.51561079 -1.868885951 -2.014787719 -1.968877289
21.2 -0.981523985 -1.708348394 -1.928961741 -1.979515764 -2.158752833
21.25 -1.087034305 -1.901102009 -1.989037538 -1.944227798 -2.348627347
21.3 -1.254296626 -1.936110635 -2.020671342 -1.791950822 -2.355610831
21.35 -1.421559948 -1.971134271 -2.052305152 -1.639659835 -2.362594286
21.4 -1.399064269 -1.861595918 -2.10292097 -1.713075837 -2.203912711
21.45 -1.376568592 -1.752073576 -2.153552794 -1.78649183 -2.045231106
21.5 -1.036460915 -1.787997245 -2.159108625 -1.864530811 -2.085005472
21.55 -0.696369238 -1.823905924 -2.164680463 -1.942569782 -2.124763808
21.6 -0.780425563 -1.778026614 -2.098917308 -1.897776743 -2.114199114
21.65 -0.864466887 -1.732164315 -2.03315516 -1.852982693 -2.103650391
21.7 -0.940757213 -1.772101026 -2.044830018 -1.900870632 -2.265402638
21.75 -1.017046539 -1.812052748 -2.056520884 -1.948758561 -2.427155856
21.8 -0.989683865 -1.840515481 -2.067112757 -2.05128848 -2.448787044
21.85 -0.962320192 -1.868978225 -2.077720636 -2.153833388 -2.470418202
21.9 -1.01848352 -1.887124979 -1.841470522 -2.060653285 -2.310500331
21.95 -1.074646848 -1.905287744 -1.605221415 -1.967489172 -2.15056743
22 -1.200116177 -1.95608952 -1.666137315 -1.785961048 -2.117633499
22.05 -1.325584506 -2.006875306 -1.727037222 -1.604417914 -2.084699539
22.1 -1.108096836 -1.908324104 -1.757405136 -1.56421777 -2.053077549
22.15 -0.890594166 -1.809756912 -1.787772056 -1.524000614 -2.021439529
22.2 -0.942897497 -1.81974073 -1.763009984 -1.757771449 -2.05019648
22.25 -0.995215829 -1.82972456 -1.738246918 -1.991542273 -2.078953401
22.3 -1.071337161 -1.8554104 -1.976499859 -2.095033086 -2.194563292
22.35 -1.147459494 -1.88108025 -2.214768808 -2.198523889 -2.310189154
22.4 -0.956460827 -1.821011112 -2.387439763 -2.240735681 -2.213289986
22.45 -0.765463161 -1.760941984 -2.560125725 -2.282946463 -2.116405789
"""
def test_dabam_names():
"""
Tests that the I/O methods work well for the list of input values
:return:
"""
print("------------------- test_dabam_names ------------------------------")
dm = dabam()
number_of_input_fields = len(dm.inputs)
argsdict = dm.inputs
names = []
values = []
for i,j in argsdict.items():
names.append(i)
values.append(j)
#change values and reinsert in object
values2 = copy.copy(values)
for i in range(number_of_input_fields):
if values[i] != None:
values2[i] = 2*values[i]
print ("-----------------------------------------------------")
print ("--input_name value value2")
for i in range(number_of_input_fields):
print(i,names[i],values[i],values2[i])
dm.inputs[names[i]] = values2[i]
print ("-----------------------------------------------------")
print ("-----------------------------------------------------")
print ("--input_name input_name_short stored_value2, help")
for i in range(number_of_input_fields):
print(names[i],
dm.get_input_value(names[i]),
dm.get_input_value_short_name(names[i]),
dm.inputs[names[i]],"\n",
dm.get_input_value_help(names[i]),
)
print ("-----------------------------------------------------")
#back to initial values
dict2 = dm.get_inputs_as_dictionary()
for i in range(number_of_input_fields):
dict2[names[i]] = values[i]
dm.inputs[names[i]] = values2[i]
dm.set_inputs_from_dictionary(dict2)
print ("--back to initial value")
if (dm.inputs == dabam().inputs):
print("Back to initial value: OK")
else:
raise Exception("Back to initial value: error returning to initial state")
def test_dabam_stdev_slopes(nmax=9):
"""
Tests the slope error value for the nmax first profiles (from remote server)
:return:
"""
print("------------------- test_dabam_slopes ------------------------------")
stdev_ok = [4.8651846141972904e-07, 1.5096270252538352e-07, 1.7394444580303415e-07, 1.3427931903345248e-07, 8.4197811681221573e-07, 1.0097219914737401e-06, 5.74153915948042e-07, 5.7147678897188605e-07, 4.3527688789008779e-07, 2.3241765005153794e-07, 2.2883095949050537e-07, 3.1848792295534762e-07, 1.2899449478710491e-06, 1.1432193606225235e-06, 2.1297554130432642e-06, 1.8447156600570902e-06, 2.2715775271373941e-06, 1.1878208663183125e-07, 4.1777346923623561e-08, 4.0304426129060434e-07, 4.3430016136041185e-07, 5.3156037926371151e-06, 1.7725086287871762e-07, 2.0222947541222619e-07, 7.2140041229621698e-08]
tmp_profile = []
tmp_psd = []
stdev_ok = []
for i in range(nmax):
print(">> testing slopes stdev from profile number: ",i )
dm = dabam()
dm.set_input_silent(True)
dm.set_input_entryNumber(i+1)
dm.load()
stdev_profile = dm.stdev_profile_slopes()
stdev_psd = dm.stdev_psd_slopes()
tmp_profile.append(stdev_profile)
tmp_psd.append(stdev_psd)
try:
tmp = float(dm.metadata["CALC_SLOPE_RMS"]) * float(dm.metadata["CALC_SLOPE_RMS_FACTOR"])
except:
tmp = 0
stdev_ok.append(tmp)
for i in range(nmax):
print("Entry, stdev from profile, stdev from psd, stdev OK (stored): %03d %8.3g %8.3g %8.3g"%
(i+1,tmp_profile[i],tmp_psd[i],stdev_ok[i]))
for i in range(nmax):
if stdev_ok[i] != 0.0:
print("Checking correctness of dabam-entry: %d"%(1+i))
print(" Check slopes profile urad: StDev=%f, FromPSD=%f, stored=%f "%(1e6*tmp_profile[i],1e6*tmp_psd[i],1e6*stdev_ok[i]))
assert abs(tmp_profile[i] - stdev_ok[i])<1e-6
assert abs(tmp_psd[i] - stdev_ok[i])<1e-6
def test_entry():
dm = dabam.initialize_from_entry_number(80)
stdev_profile = dm.stdev_profile_slopes()
stdev_psd = dm.stdev_psd_slopes()
assert_almost_equal(stdev_profile, stdev_psd)
assert_almost_equal(stdev_profile, 0.158e-6)
assert_almost_equal(stdev_psd, 0.158e-6)
def test_entry_elliptical():
dm = dabam.initialize_from_entry_number(4)
# recalculate without detrending
dm.set_input_setDetrending(-1)
dm.make_calculations()
def test_entry_text():
txt = data1().split("\n")
dm = dabam.initialize_from_external_data(txt,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
def test_entry_file():
filename = "tmp.dat"
f = open(filename,"w")
f.write(data1())
f.close()
print("File written to disk: %s"%filename)
dm = dabam.initialize_from_external_data(filename,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
def test_write_dabam_formatted_files():
txt = data1().split("\n")
dm = dabam.initialize_from_external_data(txt,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
dm.write_output_dabam_files(filename_root="tmp-DABAM-XXX",loaded_from_file=txt)
dm.metadata_set_info(YEAR_FABRICATION=2019) # fill metadata info
dm.write_output_dabam_files(filename_root="tmp-DABAM-YYY")
def test_local_server():
import urllib.request
urllib.request.urlretrieve ("http://ftp.esrf.eu/pub/scisoft/dabam/data/dabam-081.txt", "/tmp/dabam-081.txt")
urllib.request.urlretrieve("http://ftp.esrf.eu/pub/scisoft/dabam/data/dabam-081.dat", "/tmp/dabam-081.dat")
dm = dabam.initialize_from_local_server(81,"/tmp")
m0 = dm.momentsHeights, dm.momentsSlopes
# reset to remote server
dm.set_default_server()
dm.set_input_entryNumber(81)
dm.make_calculations()
m1 = dm.momentsHeights, dm.momentsSlopes
# set again to local serer
dm = dabam.initialize_from_local_server(81,"/tmp")
m2 = dm.momentsHeights, dm.momentsSlopes
assert_almost_equal(m0, m1)
assert_almost_equal(m0, m2)
if __name__ == "__main__":
# test_dabam_names()
# test_dabam_stdev_slopes()
# test_entry()
# test_entry_elliptical()
# test_entry_text()
# test_entry_file()
# test_write_dabam_formatted_files()
test_local_server() | DABAM_DANIELE/dabam_tests.py |
from dabam import dabam
import copy
from numpy.testing import assert_almost_equal
def data1(): # some scratch data
return """Line1 Line2 Center Line3 Line4
21.1 -0.871333347 -1.536403196 -1.826784168 -2.013347664 -2.058588716
21.15 -0.876013666 -1.51561079 -1.868885951 -2.014787719 -1.968877289
21.2 -0.981523985 -1.708348394 -1.928961741 -1.979515764 -2.158752833
21.25 -1.087034305 -1.901102009 -1.989037538 -1.944227798 -2.348627347
21.3 -1.254296626 -1.936110635 -2.020671342 -1.791950822 -2.355610831
21.35 -1.421559948 -1.971134271 -2.052305152 -1.639659835 -2.362594286
21.4 -1.399064269 -1.861595918 -2.10292097 -1.713075837 -2.203912711
21.45 -1.376568592 -1.752073576 -2.153552794 -1.78649183 -2.045231106
21.5 -1.036460915 -1.787997245 -2.159108625 -1.864530811 -2.085005472
21.55 -0.696369238 -1.823905924 -2.164680463 -1.942569782 -2.124763808
21.6 -0.780425563 -1.778026614 -2.098917308 -1.897776743 -2.114199114
21.65 -0.864466887 -1.732164315 -2.03315516 -1.852982693 -2.103650391
21.7 -0.940757213 -1.772101026 -2.044830018 -1.900870632 -2.265402638
21.75 -1.017046539 -1.812052748 -2.056520884 -1.948758561 -2.427155856
21.8 -0.989683865 -1.840515481 -2.067112757 -2.05128848 -2.448787044
21.85 -0.962320192 -1.868978225 -2.077720636 -2.153833388 -2.470418202
21.9 -1.01848352 -1.887124979 -1.841470522 -2.060653285 -2.310500331
21.95 -1.074646848 -1.905287744 -1.605221415 -1.967489172 -2.15056743
22 -1.200116177 -1.95608952 -1.666137315 -1.785961048 -2.117633499
22.05 -1.325584506 -2.006875306 -1.727037222 -1.604417914 -2.084699539
22.1 -1.108096836 -1.908324104 -1.757405136 -1.56421777 -2.053077549
22.15 -0.890594166 -1.809756912 -1.787772056 -1.524000614 -2.021439529
22.2 -0.942897497 -1.81974073 -1.763009984 -1.757771449 -2.05019648
22.25 -0.995215829 -1.82972456 -1.738246918 -1.991542273 -2.078953401
22.3 -1.071337161 -1.8554104 -1.976499859 -2.095033086 -2.194563292
22.35 -1.147459494 -1.88108025 -2.214768808 -2.198523889 -2.310189154
22.4 -0.956460827 -1.821011112 -2.387439763 -2.240735681 -2.213289986
22.45 -0.765463161 -1.760941984 -2.560125725 -2.282946463 -2.116405789
"""
def test_dabam_names():
"""
Tests that the I/O methods work well for the list of input values
:return:
"""
print("------------------- test_dabam_names ------------------------------")
dm = dabam()
number_of_input_fields = len(dm.inputs)
argsdict = dm.inputs
names = []
values = []
for i,j in argsdict.items():
names.append(i)
values.append(j)
#change values and reinsert in object
values2 = copy.copy(values)
for i in range(number_of_input_fields):
if values[i] != None:
values2[i] = 2*values[i]
print ("-----------------------------------------------------")
print ("--input_name value value2")
for i in range(number_of_input_fields):
print(i,names[i],values[i],values2[i])
dm.inputs[names[i]] = values2[i]
print ("-----------------------------------------------------")
print ("-----------------------------------------------------")
print ("--input_name input_name_short stored_value2, help")
for i in range(number_of_input_fields):
print(names[i],
dm.get_input_value(names[i]),
dm.get_input_value_short_name(names[i]),
dm.inputs[names[i]],"\n",
dm.get_input_value_help(names[i]),
)
print ("-----------------------------------------------------")
#back to initial values
dict2 = dm.get_inputs_as_dictionary()
for i in range(number_of_input_fields):
dict2[names[i]] = values[i]
dm.inputs[names[i]] = values2[i]
dm.set_inputs_from_dictionary(dict2)
print ("--back to initial value")
if (dm.inputs == dabam().inputs):
print("Back to initial value: OK")
else:
raise Exception("Back to initial value: error returning to initial state")
def test_dabam_stdev_slopes(nmax=9):
"""
Tests the slope error value for the nmax first profiles (from remote server)
:return:
"""
print("------------------- test_dabam_slopes ------------------------------")
stdev_ok = [4.8651846141972904e-07, 1.5096270252538352e-07, 1.7394444580303415e-07, 1.3427931903345248e-07, 8.4197811681221573e-07, 1.0097219914737401e-06, 5.74153915948042e-07, 5.7147678897188605e-07, 4.3527688789008779e-07, 2.3241765005153794e-07, 2.2883095949050537e-07, 3.1848792295534762e-07, 1.2899449478710491e-06, 1.1432193606225235e-06, 2.1297554130432642e-06, 1.8447156600570902e-06, 2.2715775271373941e-06, 1.1878208663183125e-07, 4.1777346923623561e-08, 4.0304426129060434e-07, 4.3430016136041185e-07, 5.3156037926371151e-06, 1.7725086287871762e-07, 2.0222947541222619e-07, 7.2140041229621698e-08]
tmp_profile = []
tmp_psd = []
stdev_ok = []
for i in range(nmax):
print(">> testing slopes stdev from profile number: ",i )
dm = dabam()
dm.set_input_silent(True)
dm.set_input_entryNumber(i+1)
dm.load()
stdev_profile = dm.stdev_profile_slopes()
stdev_psd = dm.stdev_psd_slopes()
tmp_profile.append(stdev_profile)
tmp_psd.append(stdev_psd)
try:
tmp = float(dm.metadata["CALC_SLOPE_RMS"]) * float(dm.metadata["CALC_SLOPE_RMS_FACTOR"])
except:
tmp = 0
stdev_ok.append(tmp)
for i in range(nmax):
print("Entry, stdev from profile, stdev from psd, stdev OK (stored): %03d %8.3g %8.3g %8.3g"%
(i+1,tmp_profile[i],tmp_psd[i],stdev_ok[i]))
for i in range(nmax):
if stdev_ok[i] != 0.0:
print("Checking correctness of dabam-entry: %d"%(1+i))
print(" Check slopes profile urad: StDev=%f, FromPSD=%f, stored=%f "%(1e6*tmp_profile[i],1e6*tmp_psd[i],1e6*stdev_ok[i]))
assert abs(tmp_profile[i] - stdev_ok[i])<1e-6
assert abs(tmp_psd[i] - stdev_ok[i])<1e-6
def test_entry():
dm = dabam.initialize_from_entry_number(80)
stdev_profile = dm.stdev_profile_slopes()
stdev_psd = dm.stdev_psd_slopes()
assert_almost_equal(stdev_profile, stdev_psd)
assert_almost_equal(stdev_profile, 0.158e-6)
assert_almost_equal(stdev_psd, 0.158e-6)
def test_entry_elliptical():
dm = dabam.initialize_from_entry_number(4)
# recalculate without detrending
dm.set_input_setDetrending(-1)
dm.make_calculations()
def test_entry_text():
txt = data1().split("\n")
dm = dabam.initialize_from_external_data(txt,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
def test_entry_file():
filename = "tmp.dat"
f = open(filename,"w")
f.write(data1())
f.close()
print("File written to disk: %s"%filename)
dm = dabam.initialize_from_external_data(filename,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
def test_write_dabam_formatted_files():
txt = data1().split("\n")
dm = dabam.initialize_from_external_data(txt,
column_index_abscisa=0,
column_index_ordinates=1,
skiprows=1,
useHeightsOrSlopes=0,
to_SI_abscissas=1e-3,
to_SI_ordinates=1e-9,
detrending_flag=-1)
dm.write_output_dabam_files(filename_root="tmp-DABAM-XXX",loaded_from_file=txt)
dm.metadata_set_info(YEAR_FABRICATION=2019) # fill metadata info
dm.write_output_dabam_files(filename_root="tmp-DABAM-YYY")
def test_local_server():
import urllib.request
urllib.request.urlretrieve ("http://ftp.esrf.eu/pub/scisoft/dabam/data/dabam-081.txt", "/tmp/dabam-081.txt")
urllib.request.urlretrieve("http://ftp.esrf.eu/pub/scisoft/dabam/data/dabam-081.dat", "/tmp/dabam-081.dat")
dm = dabam.initialize_from_local_server(81,"/tmp")
m0 = dm.momentsHeights, dm.momentsSlopes
# reset to remote server
dm.set_default_server()
dm.set_input_entryNumber(81)
dm.make_calculations()
m1 = dm.momentsHeights, dm.momentsSlopes
# set again to local serer
dm = dabam.initialize_from_local_server(81,"/tmp")
m2 = dm.momentsHeights, dm.momentsSlopes
assert_almost_equal(m0, m1)
assert_almost_equal(m0, m2)
if __name__ == "__main__":
# test_dabam_names()
# test_dabam_stdev_slopes()
# test_entry()
# test_entry_elliptical()
# test_entry_text()
# test_entry_file()
# test_write_dabam_formatted_files()
test_local_server() | 0.320183 | 0.257882 |
from django.conf import settings
from django.template import Library
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django_prices.templatetags import prices_i18n
from prices import Money
from ...order import events
from ...order.models import OrderEvent
register = Library()
EMAIL_CHOICES = {
events.OrderEventsEmails.PAYMENT: pgettext_lazy(
'Email type', 'Payment confirmation'),
events.OrderEventsEmails.SHIPPING: pgettext_lazy(
'Email type', 'Shipping confirmation'),
events.OrderEventsEmails.FULFILLMENT: pgettext_lazy(
'Email type', 'Fulfillment confirmation'),
events.OrderEventsEmails.ORDER: pgettext_lazy(
'Email type', 'Order confirmation')}
def get_money_from_params(amount):
"""Money serialization changed at one point, as for now it's serialized
as a dict. But we keep those settings for the legacy data.
Can be safely removed after migrating to Dashboard 2.0
"""
if isinstance(amount, Money):
return amount
if isinstance(amount, dict):
return Money(amount=amount['amount'], currency=amount['currency'])
return Money(amount, settings.DEFAULT_CURRENCY)
@register.simple_tag
def display_order_event(order_event: OrderEvent):
"""This function is used to keep the backwards compatibility
with the old dashboard and new type of order events
(storing enums instead of messages)
"""
event_type = order_event.type
params = order_event.parameters
if event_type == events.OrderEvents.PLACED_FROM_DRAFT:
return pgettext_lazy(
'Dashboard message related to an order',
'Order placed from draft order')
if event_type == events.OrderEvents.PAYMENT_VOIDED:
return pgettext_lazy(
'Dashboard message related to an order',
'Payment was voided by %(user_name)s' % {
'user_name': order_event.user})
if event_type == events.OrderEvents.PAYMENT_REFUNDED:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully refunded: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == events.OrderEvents.PAYMENT_CAPTURED:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully captured: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == events.OrderEvents.ORDER_MARKED_AS_PAID:
return pgettext_lazy(
'Dashboard message related to an order',
'Order manually marked as paid')
if event_type == events.OrderEvents.CANCELED:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was canceled')
if event_type == events.OrderEvents.FULFILLMENT_RESTOCKED_ITEMS:
return npgettext_lazy(
'Dashboard message related to an order',
'We restocked %(quantity)d item',
'We restocked %(quantity)d items',
number='quantity') % {'quantity': params['quantity']}
if event_type == events.OrderEvents.NOTE_ADDED:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s added note: %(note)s' % {
'note': params['message'],
'user_name': order_event.user})
if event_type == events.OrderEvents.FULFILLMENT_CANCELED:
return pgettext_lazy(
'Dashboard message',
'Fulfillment #%(fulfillment)s canceled by %(user_name)s') % {
'fulfillment': params['composed_id'],
'user_name': order_event.user}
if event_type == events.OrderEvents.FULFILLMENT_FULFILLED_ITEMS:
return pgettext_lazy(
'Dashboard message related to an order',
'Fulfilled some items')
if event_type == events.OrderEvents.PLACED:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was placed')
if event_type == events.OrderEvents.ORDER_FULLY_PAID:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was fully paid')
if event_type == events.OrderEvents.EMAIL_SENT:
return pgettext_lazy(
'Dashboard message related to an order',
'%(email_type)s email was sent to the customer '
'(%(email)s)') % {
'email_type': EMAIL_CHOICES[params['email_type']],
'email': params['email']}
if event_type == events.OrderEvents.TRACKING_UPDATED:
return pgettext_lazy(
'Dashboard message related to an order',
'Fulfillment #%(fulfillment)s tracking was updated to'
' %(tracking_number)s by %(user_name)s') % {
'fulfillment': params['composed_id'],
'tracking_number': params['tracking_number'],
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_CREATED:
return pgettext_lazy(
'Dashboard message related to an order',
'The draft was created by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_ADDED_PRODUCTS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s added some products') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_REMOVED_PRODUCTS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s removed some products') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.OVERSOLD_ITEMS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s placed the order by bypassing oversold items') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.UPDATED_ADDRESS:
return pgettext_lazy(
'Dashboard message related to an order',
'The order address was updated by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.PAYMENT_FAILED:
return pgettext_lazy(
'Dashboard message related to an order',
'The payment was failed by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.OTHER:
return order_event.parameters['message']
raise ValueError('Not supported event type: %s' % (event_type)) | saleor/dashboard/templatetags/orders.py | from django.conf import settings
from django.template import Library
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django_prices.templatetags import prices_i18n
from prices import Money
from ...order import events
from ...order.models import OrderEvent
register = Library()
EMAIL_CHOICES = {
events.OrderEventsEmails.PAYMENT: pgettext_lazy(
'Email type', 'Payment confirmation'),
events.OrderEventsEmails.SHIPPING: pgettext_lazy(
'Email type', 'Shipping confirmation'),
events.OrderEventsEmails.FULFILLMENT: pgettext_lazy(
'Email type', 'Fulfillment confirmation'),
events.OrderEventsEmails.ORDER: pgettext_lazy(
'Email type', 'Order confirmation')}
def get_money_from_params(amount):
"""Money serialization changed at one point, as for now it's serialized
as a dict. But we keep those settings for the legacy data.
Can be safely removed after migrating to Dashboard 2.0
"""
if isinstance(amount, Money):
return amount
if isinstance(amount, dict):
return Money(amount=amount['amount'], currency=amount['currency'])
return Money(amount, settings.DEFAULT_CURRENCY)
@register.simple_tag
def display_order_event(order_event: OrderEvent):
"""This function is used to keep the backwards compatibility
with the old dashboard and new type of order events
(storing enums instead of messages)
"""
event_type = order_event.type
params = order_event.parameters
if event_type == events.OrderEvents.PLACED_FROM_DRAFT:
return pgettext_lazy(
'Dashboard message related to an order',
'Order placed from draft order')
if event_type == events.OrderEvents.PAYMENT_VOIDED:
return pgettext_lazy(
'Dashboard message related to an order',
'Payment was voided by %(user_name)s' % {
'user_name': order_event.user})
if event_type == events.OrderEvents.PAYMENT_REFUNDED:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully refunded: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == events.OrderEvents.PAYMENT_CAPTURED:
amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully captured: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == events.OrderEvents.ORDER_MARKED_AS_PAID:
return pgettext_lazy(
'Dashboard message related to an order',
'Order manually marked as paid')
if event_type == events.OrderEvents.CANCELED:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was canceled')
if event_type == events.OrderEvents.FULFILLMENT_RESTOCKED_ITEMS:
return npgettext_lazy(
'Dashboard message related to an order',
'We restocked %(quantity)d item',
'We restocked %(quantity)d items',
number='quantity') % {'quantity': params['quantity']}
if event_type == events.OrderEvents.NOTE_ADDED:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s added note: %(note)s' % {
'note': params['message'],
'user_name': order_event.user})
if event_type == events.OrderEvents.FULFILLMENT_CANCELED:
return pgettext_lazy(
'Dashboard message',
'Fulfillment #%(fulfillment)s canceled by %(user_name)s') % {
'fulfillment': params['composed_id'],
'user_name': order_event.user}
if event_type == events.OrderEvents.FULFILLMENT_FULFILLED_ITEMS:
return pgettext_lazy(
'Dashboard message related to an order',
'Fulfilled some items')
if event_type == events.OrderEvents.PLACED:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was placed')
if event_type == events.OrderEvents.ORDER_FULLY_PAID:
return pgettext_lazy(
'Dashboard message related to an order',
'Order was fully paid')
if event_type == events.OrderEvents.EMAIL_SENT:
return pgettext_lazy(
'Dashboard message related to an order',
'%(email_type)s email was sent to the customer '
'(%(email)s)') % {
'email_type': EMAIL_CHOICES[params['email_type']],
'email': params['email']}
if event_type == events.OrderEvents.TRACKING_UPDATED:
return pgettext_lazy(
'Dashboard message related to an order',
'Fulfillment #%(fulfillment)s tracking was updated to'
' %(tracking_number)s by %(user_name)s') % {
'fulfillment': params['composed_id'],
'tracking_number': params['tracking_number'],
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_CREATED:
return pgettext_lazy(
'Dashboard message related to an order',
'The draft was created by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_ADDED_PRODUCTS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s added some products') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.DRAFT_REMOVED_PRODUCTS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s removed some products') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.OVERSOLD_ITEMS:
return pgettext_lazy(
'Dashboard message related to an order',
'%(user_name)s placed the order by bypassing oversold items') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.UPDATED_ADDRESS:
return pgettext_lazy(
'Dashboard message related to an order',
'The order address was updated by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.PAYMENT_FAILED:
return pgettext_lazy(
'Dashboard message related to an order',
'The payment was failed by %(user_name)s') % {
'user_name': order_event.user}
if event_type == events.OrderEvents.OTHER:
return order_event.parameters['message']
raise ValueError('Not supported event type: %s' % (event_type)) | 0.51879 | 0.122681 |
import argparse
from datetime import date
from landsatlinks import __version__
def parse_cli_arguments():
currentDate = date.today().strftime('%Y-%m-%d')
parser = argparse.ArgumentParser(
description='Creating download URLs for Landsat Collection 2 Level 1 data using the USGS machine-to-machine API'
)
# positional arguments
parser.add_argument(
'results',
help='Path to the file containing the search result.This will be created during the first '
'search. This file should be in the download directory to allow creating new download '
'links when the old ones expired. The text file containing the download links will be '
'stored in the same directory.'
)
parser.add_argument(
'sensor',
choices=['TM', 'ETM', 'OLI'],
help='The sensor that scenes are requested for. Choose TM for Landsat 4/5, ETM for Landsat 7, '
'or OLI for Landsat 8.'
)
parser.add_argument(
'pathrowlist',
help='Path to text file containing allowed pathrows, one per line. Format: PPPRRR (keep padding zeroes!).'
)
# optional arguments
parser.add_argument(
'-d', '--daterange',
default=f'1970-01-01,{currentDate}',
help='Start date and end date = date range to be considered. Format: YYYY-MM-DD,YYYY-MM-DD. '
'Default: full archive until today.'
)
parser.add_argument(
'-c', '--cloudcover',
default='0,100',
help='Percent (land) cloud cover range to be considered. \nDefault: 0,100')
parser.add_argument(
'-m', '--months',
default='1,2,3,4,5,6,7,8,9,10,11,12',
help='Seasonal filter: define the months to be considered. \nDefault: 1,2,3,4,5,6,7,8,9,10,11,12'
)
parser.add_argument(
'-t', '--tier',
choices=['T1', 'T2'],
default='T1',
help='Landsat collection tier level. Valid tiers: T1,T2,RT \nDefault: T1'
)
parser.add_argument(
'-l', '--level',
choices=['L1TP', 'L1GT', 'L1GS'],
default='L1TP',
help='Landsat level of processing. Valid levels: L1TP, L1GT, L1GS'
)
parser.add_argument(
'-v', '--version',
action='version',
version=f'landsatlinks version {__version__} https://github.com/ernstste/landsatlinks'
)
parser.add_argument(
'-s', '--secret',
help='Path to a file containing the username and password for the USGS EarthExplorer.'
)
return parser.parse_args() | landsatlinks/parseargs.py | import argparse
from datetime import date
from landsatlinks import __version__
def parse_cli_arguments():
currentDate = date.today().strftime('%Y-%m-%d')
parser = argparse.ArgumentParser(
description='Creating download URLs for Landsat Collection 2 Level 1 data using the USGS machine-to-machine API'
)
# positional arguments
parser.add_argument(
'results',
help='Path to the file containing the search result.This will be created during the first '
'search. This file should be in the download directory to allow creating new download '
'links when the old ones expired. The text file containing the download links will be '
'stored in the same directory.'
)
parser.add_argument(
'sensor',
choices=['TM', 'ETM', 'OLI'],
help='The sensor that scenes are requested for. Choose TM for Landsat 4/5, ETM for Landsat 7, '
'or OLI for Landsat 8.'
)
parser.add_argument(
'pathrowlist',
help='Path to text file containing allowed pathrows, one per line. Format: PPPRRR (keep padding zeroes!).'
)
# optional arguments
parser.add_argument(
'-d', '--daterange',
default=f'1970-01-01,{currentDate}',
help='Start date and end date = date range to be considered. Format: YYYY-MM-DD,YYYY-MM-DD. '
'Default: full archive until today.'
)
parser.add_argument(
'-c', '--cloudcover',
default='0,100',
help='Percent (land) cloud cover range to be considered. \nDefault: 0,100')
parser.add_argument(
'-m', '--months',
default='1,2,3,4,5,6,7,8,9,10,11,12',
help='Seasonal filter: define the months to be considered. \nDefault: 1,2,3,4,5,6,7,8,9,10,11,12'
)
parser.add_argument(
'-t', '--tier',
choices=['T1', 'T2'],
default='T1',
help='Landsat collection tier level. Valid tiers: T1,T2,RT \nDefault: T1'
)
parser.add_argument(
'-l', '--level',
choices=['L1TP', 'L1GT', 'L1GS'],
default='L1TP',
help='Landsat level of processing. Valid levels: L1TP, L1GT, L1GS'
)
parser.add_argument(
'-v', '--version',
action='version',
version=f'landsatlinks version {__version__} https://github.com/ernstste/landsatlinks'
)
parser.add_argument(
'-s', '--secret',
help='Path to a file containing the username and password for the USGS EarthExplorer.'
)
return parser.parse_args() | 0.613352 | 0.301863 |
from __future__ import print_function
import numpy as np
import math
import os
from util.misc import norm_poly_dists, calc_tols
from util.measure import BaselineMeasure
from util.geometry import Polygon
class BaselineMeasureEval(object):
def __init__(self, min_tol=10, max_tol=30, rel_tol=0.25, poly_tick_dist=5):
"""
Initialize BaselineMeasureEval object.
:param min_tol: MINIMUM distance tolerance which is not penalized
:param max_tol: MAXIMUM distance tolerance which is not penalized
:param rel_tol: fraction of estimated interline distance as tolerance values
:param poly_tick_dist: desired distance of points of the baseline
"""
assert type(min_tol) == int and type(max_tol) == int, "min_tol and max_tol have to be ints"
assert min_tol <= max_tol, "min_tol can't exceed max_tol"
assert 0.0 < rel_tol <= 1.0, "rel_tol has to be in the range (0,1]"
assert type(poly_tick_dist) == int, "poly_tick_dist has to be int"
self.max_tols = np.arange(min_tol, max_tol + 1)
self.rel_tol = rel_tol
self.poly_tick_dist = poly_tick_dist
self.truth_line_tols = None
self.measure = BaselineMeasure()
def calc_measure_for_page_baseline_polys(self, polys_truth, polys_reco):
"""
Calculate the BaselinMeasure stats for given truth and reco polygons of
a single page and adds the results to the BaselineMeasure structure.
:param polys_truth: list of TRUTH polygons corresponding to a single page
:param polys_reco: list of RECO polygons corresponding to a single page
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
# Normalize baselines, so that poly points have a desired "distance"
polys_truth_norm = norm_poly_dists(polys_truth, self.poly_tick_dist)
polys_reco_norm = norm_poly_dists(polys_reco, self.poly_tick_dist)
# Optionally calculate tolerances
if self.max_tols[0] < 0:
tols = calc_tols(polys_truth_norm, self.poly_tick_dist, 250, self.rel_tol)
self.truth_line_tols = np.expand_dims(tols, axis=1)
else:
self.truth_line_tols = np.tile(self.max_tols, [len(polys_truth_norm), 1])
# For each reco poly calculate the precision values for all tolerances
precision = self.calc_precision(polys_truth_norm, polys_reco_norm)
# For each truth_poly calculate the recall values for all tolerances
recall = self.calc_recall(polys_truth_norm, polys_reco_norm)
# add results
self.measure.add_per_dist_tol_tick_per_line_precision(precision)
self.measure.add_per_dist_tol_tick_per_line_recall(recall)
self.truth_line_tols = None
def calc_precision(self, polys_truth, polys_reco):
"""
Calculates and returns precision values for given truth and reco polygons for all tolerances.
:param polys_truth: list of TRUTH polygons
:param polys_reco: list of RECO polygons
:return: precision values
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
# relative hits per tolerance value over all reco and truth polygons
rel_hits = np.zeros([self.max_tols.shape[0], len(polys_reco), len(polys_truth)])
for i, poly_reco in enumerate(polys_reco):
for j, poly_truth in enumerate(polys_truth):
rel_hits[:, i, j] = self.count_rel_hits(poly_reco, poly_truth, self.truth_line_tols[j])
# calculate alignment
precision = np.zeros([self.max_tols.shape[0], len(polys_reco)])
for i, hits_per_tol in enumerate(np.split(rel_hits, rel_hits.shape[0])):
hits_per_tol = np.squeeze(hits_per_tol, 0)
while True:
# calculate indices for maximum alignment
max_idx_x, max_idx_y = np.unravel_index(np.argmax(hits_per_tol), hits_per_tol.shape)
# finish if all polys_reco have been aligned
if hits_per_tol[max_idx_x, max_idx_y] < 0:
break
# set precision to max alignment
precision[i, max_idx_x] = hits_per_tol[max_idx_x, max_idx_y]
# set row and column to -1
hits_per_tol[max_idx_x, :] = -1.0
hits_per_tol[:, max_idx_y] = -1.0
return precision
def calc_recall(self, polys_truth, polys_reco):
"""
Calculates and returns recall values for given truth and reco polygons for all tolerances.
:param polys_truth: list of TRUTH polygons
:param polys_reco: list of RECO polygons
:return: recall values
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
recall = np.zeros([self.max_tols.shape[0], len(polys_truth)])
for i, poly_truth in enumerate(polys_truth):
recall[:, i] = self.count_rel_hits_list(poly_truth, polys_reco, self.truth_line_tols[i])
return recall
def count_rel_hits(self, poly_to_count, poly_ref, tols):
"""
Counts the relative hits per tolerance value over all points of the polygon and corresponding
nearest points of the reference polygon.
:param poly_to_count: Polygon to count over
:param poly_ref: reference Polygon
:param tols: vector of tolerances
:return: vector of relative hits for every tolerance value
"""
assert isinstance(poly_to_count, Polygon) and isinstance(poly_ref, Polygon), \
"poly_to_count and poly_ref have to be Polygons"
assert type(tols) == np.ndarray, "tols has to be np.ndarray"
assert len(tols.shape) == 1, "tols has to be 1d vector"
assert tols.dtype == float, "tols has to be float"
poly_to_count_bb = poly_to_count.get_bounding_box()
poly_ref_bb = poly_ref.get_bounding_box()
intersection = poly_to_count_bb.intersection(poly_ref_bb)
rel_hits = np.zeros_like(tols)
# Early stopping criterion
if min(intersection.width, intersection.height) < -3.0 * tols[-1]:
return rel_hits
# Build and expand numpy arrays from points
poly_to_count_x = np.array(poly_to_count.x_points)
poly_to_count_y = np.array(poly_to_count.y_points)
poly_ref_x = np.expand_dims(np.asarray(poly_ref.x_points), axis=1)
poly_ref_y = np.expand_dims(np.asarray(poly_ref.y_points), axis=1)
# Calculate minimum distances
dist_x = abs(poly_to_count_x - poly_ref_x)
dist_y = abs(poly_to_count_y - poly_ref_y)
min_dist = np.amin(dist_x + dist_y, axis=0)
# Calculate masks for two tolerance cases
tols_t = np.expand_dims(np.asarray(tols), axis=1)
mask1 = (min_dist <= tols_t).astype(float)
mask2 = (min_dist <= 3.0 * tols_t).astype(float)
mask2 = mask2 - mask1
# Calculate relative hits
rel_hits = mask1 + mask2 * ((3.0 * tols_t - min_dist) / (2.0 * tols_t))
rel_hits = np.sum(rel_hits, axis=1)
rel_hits /= poly_to_count.n_points
return rel_hits
def count_rel_hits_list(self, poly_to_count, polys_ref, tols):
"""
:param poly_to_count: Polygon to count over
:param polys_ref: list of reference Polygons
:param tols: vector of tolerances
:return:
"""
assert isinstance(poly_to_count, Polygon), "poly_to_count has to be Polygon"
assert type(polys_ref) == list, "polys_ref has to be list"
assert all([isinstance(poly, Polygon) for poly in polys_ref]), "elements of polys_ref have to Polygons"
assert type(tols) == np.ndarray, "tols has to be np.ndarray"
assert len(tols.shape) == 1, "tols has to be 1d vector"
assert tols.dtype == float, "tols has to be float"
poly_to_count_bb = poly_to_count.get_bounding_box()
# rel_hits = np.zeros_like(tols)
all_inf = True
min_dist = np.full((poly_to_count.n_points,), np.inf)
for poly_ref in polys_ref:
poly_ref_bb = poly_ref.get_bounding_box()
intersection = poly_to_count_bb.intersection(poly_ref_bb)
# Early stopping criterion
if min(intersection.width, intersection.height) < -3.0 * tols[-1]:
continue
# Build and expand numpy arrays from points
poly_to_count_x = np.array(poly_to_count.x_points)
poly_to_count_y = np.array(poly_to_count.y_points)
poly_ref_x = np.expand_dims(np.asarray(poly_ref.x_points), axis=1)
poly_ref_y = np.expand_dims(np.asarray(poly_ref.y_points), axis=1)
# Calculate minimum distances
dist_x = abs(poly_to_count_x - poly_ref_x)
dist_y = abs(poly_to_count_y - poly_ref_y)
if all_inf:
all_inf = False
min_dist = np.amin(dist_x + dist_y, axis=0)
else:
min_dist = np.minimum(min_dist, np.amin(dist_x + dist_y, axis=0))
# Calculate masks for two tolerance cases
tols_t = np.expand_dims(np.asarray(tols), axis=1)
mask1 = (min_dist <= tols_t).astype(float)
mask2 = (min_dist <= 3.0 * tols_t).astype(float)
mask2 = mask2 - mask1
# Calculate relative hits
rel_hits = np.zeros(mask1.shape)
if not all_inf:
for i in range(mask1.shape[0]):
for j in range(mask1.shape[1]):
if np.isinf(min_dist[j]):
continue
rel_hits[i, j] = mask1[i, j] + \
mask2[i, j] * ((3.0 * tols_t[i, i] - min_dist[j]) / (2.0 * tols_t[i, i]))
rel_hits = np.sum(rel_hits, axis=1)
rel_hits /= poly_to_count.n_points
return rel_hits
if __name__ == '__main__':
print(os.environ["PYTHONPATH"])
z = np.zeros([1, 2])
if type(z) == np.ndarray:
print("is np.ndarray") | main/eval_measure.py | from __future__ import print_function
import numpy as np
import math
import os
from util.misc import norm_poly_dists, calc_tols
from util.measure import BaselineMeasure
from util.geometry import Polygon
class BaselineMeasureEval(object):
def __init__(self, min_tol=10, max_tol=30, rel_tol=0.25, poly_tick_dist=5):
"""
Initialize BaselineMeasureEval object.
:param min_tol: MINIMUM distance tolerance which is not penalized
:param max_tol: MAXIMUM distance tolerance which is not penalized
:param rel_tol: fraction of estimated interline distance as tolerance values
:param poly_tick_dist: desired distance of points of the baseline
"""
assert type(min_tol) == int and type(max_tol) == int, "min_tol and max_tol have to be ints"
assert min_tol <= max_tol, "min_tol can't exceed max_tol"
assert 0.0 < rel_tol <= 1.0, "rel_tol has to be in the range (0,1]"
assert type(poly_tick_dist) == int, "poly_tick_dist has to be int"
self.max_tols = np.arange(min_tol, max_tol + 1)
self.rel_tol = rel_tol
self.poly_tick_dist = poly_tick_dist
self.truth_line_tols = None
self.measure = BaselineMeasure()
def calc_measure_for_page_baseline_polys(self, polys_truth, polys_reco):
"""
Calculate the BaselinMeasure stats for given truth and reco polygons of
a single page and adds the results to the BaselineMeasure structure.
:param polys_truth: list of TRUTH polygons corresponding to a single page
:param polys_reco: list of RECO polygons corresponding to a single page
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
# Normalize baselines, so that poly points have a desired "distance"
polys_truth_norm = norm_poly_dists(polys_truth, self.poly_tick_dist)
polys_reco_norm = norm_poly_dists(polys_reco, self.poly_tick_dist)
# Optionally calculate tolerances
if self.max_tols[0] < 0:
tols = calc_tols(polys_truth_norm, self.poly_tick_dist, 250, self.rel_tol)
self.truth_line_tols = np.expand_dims(tols, axis=1)
else:
self.truth_line_tols = np.tile(self.max_tols, [len(polys_truth_norm), 1])
# For each reco poly calculate the precision values for all tolerances
precision = self.calc_precision(polys_truth_norm, polys_reco_norm)
# For each truth_poly calculate the recall values for all tolerances
recall = self.calc_recall(polys_truth_norm, polys_reco_norm)
# add results
self.measure.add_per_dist_tol_tick_per_line_precision(precision)
self.measure.add_per_dist_tol_tick_per_line_recall(recall)
self.truth_line_tols = None
def calc_precision(self, polys_truth, polys_reco):
"""
Calculates and returns precision values for given truth and reco polygons for all tolerances.
:param polys_truth: list of TRUTH polygons
:param polys_reco: list of RECO polygons
:return: precision values
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
# relative hits per tolerance value over all reco and truth polygons
rel_hits = np.zeros([self.max_tols.shape[0], len(polys_reco), len(polys_truth)])
for i, poly_reco in enumerate(polys_reco):
for j, poly_truth in enumerate(polys_truth):
rel_hits[:, i, j] = self.count_rel_hits(poly_reco, poly_truth, self.truth_line_tols[j])
# calculate alignment
precision = np.zeros([self.max_tols.shape[0], len(polys_reco)])
for i, hits_per_tol in enumerate(np.split(rel_hits, rel_hits.shape[0])):
hits_per_tol = np.squeeze(hits_per_tol, 0)
while True:
# calculate indices for maximum alignment
max_idx_x, max_idx_y = np.unravel_index(np.argmax(hits_per_tol), hits_per_tol.shape)
# finish if all polys_reco have been aligned
if hits_per_tol[max_idx_x, max_idx_y] < 0:
break
# set precision to max alignment
precision[i, max_idx_x] = hits_per_tol[max_idx_x, max_idx_y]
# set row and column to -1
hits_per_tol[max_idx_x, :] = -1.0
hits_per_tol[:, max_idx_y] = -1.0
return precision
def calc_recall(self, polys_truth, polys_reco):
"""
Calculates and returns recall values for given truth and reco polygons for all tolerances.
:param polys_truth: list of TRUTH polygons
:param polys_reco: list of RECO polygons
:return: recall values
"""
assert type(polys_truth) == list and type(polys_reco) == list, "polys_truth and polys_reco have to be lists"
assert all([isinstance(poly, Polygon) for poly in polys_truth + polys_reco]), \
"elements of polys_truth and polys_reco have to be Polygons"
recall = np.zeros([self.max_tols.shape[0], len(polys_truth)])
for i, poly_truth in enumerate(polys_truth):
recall[:, i] = self.count_rel_hits_list(poly_truth, polys_reco, self.truth_line_tols[i])
return recall
def count_rel_hits(self, poly_to_count, poly_ref, tols):
"""
Counts the relative hits per tolerance value over all points of the polygon and corresponding
nearest points of the reference polygon.
:param poly_to_count: Polygon to count over
:param poly_ref: reference Polygon
:param tols: vector of tolerances
:return: vector of relative hits for every tolerance value
"""
assert isinstance(poly_to_count, Polygon) and isinstance(poly_ref, Polygon), \
"poly_to_count and poly_ref have to be Polygons"
assert type(tols) == np.ndarray, "tols has to be np.ndarray"
assert len(tols.shape) == 1, "tols has to be 1d vector"
assert tols.dtype == float, "tols has to be float"
poly_to_count_bb = poly_to_count.get_bounding_box()
poly_ref_bb = poly_ref.get_bounding_box()
intersection = poly_to_count_bb.intersection(poly_ref_bb)
rel_hits = np.zeros_like(tols)
# Early stopping criterion
if min(intersection.width, intersection.height) < -3.0 * tols[-1]:
return rel_hits
# Build and expand numpy arrays from points
poly_to_count_x = np.array(poly_to_count.x_points)
poly_to_count_y = np.array(poly_to_count.y_points)
poly_ref_x = np.expand_dims(np.asarray(poly_ref.x_points), axis=1)
poly_ref_y = np.expand_dims(np.asarray(poly_ref.y_points), axis=1)
# Calculate minimum distances
dist_x = abs(poly_to_count_x - poly_ref_x)
dist_y = abs(poly_to_count_y - poly_ref_y)
min_dist = np.amin(dist_x + dist_y, axis=0)
# Calculate masks for two tolerance cases
tols_t = np.expand_dims(np.asarray(tols), axis=1)
mask1 = (min_dist <= tols_t).astype(float)
mask2 = (min_dist <= 3.0 * tols_t).astype(float)
mask2 = mask2 - mask1
# Calculate relative hits
rel_hits = mask1 + mask2 * ((3.0 * tols_t - min_dist) / (2.0 * tols_t))
rel_hits = np.sum(rel_hits, axis=1)
rel_hits /= poly_to_count.n_points
return rel_hits
def count_rel_hits_list(self, poly_to_count, polys_ref, tols):
"""
:param poly_to_count: Polygon to count over
:param polys_ref: list of reference Polygons
:param tols: vector of tolerances
:return:
"""
assert isinstance(poly_to_count, Polygon), "poly_to_count has to be Polygon"
assert type(polys_ref) == list, "polys_ref has to be list"
assert all([isinstance(poly, Polygon) for poly in polys_ref]), "elements of polys_ref have to Polygons"
assert type(tols) == np.ndarray, "tols has to be np.ndarray"
assert len(tols.shape) == 1, "tols has to be 1d vector"
assert tols.dtype == float, "tols has to be float"
poly_to_count_bb = poly_to_count.get_bounding_box()
# rel_hits = np.zeros_like(tols)
all_inf = True
min_dist = np.full((poly_to_count.n_points,), np.inf)
for poly_ref in polys_ref:
poly_ref_bb = poly_ref.get_bounding_box()
intersection = poly_to_count_bb.intersection(poly_ref_bb)
# Early stopping criterion
if min(intersection.width, intersection.height) < -3.0 * tols[-1]:
continue
# Build and expand numpy arrays from points
poly_to_count_x = np.array(poly_to_count.x_points)
poly_to_count_y = np.array(poly_to_count.y_points)
poly_ref_x = np.expand_dims(np.asarray(poly_ref.x_points), axis=1)
poly_ref_y = np.expand_dims(np.asarray(poly_ref.y_points), axis=1)
# Calculate minimum distances
dist_x = abs(poly_to_count_x - poly_ref_x)
dist_y = abs(poly_to_count_y - poly_ref_y)
if all_inf:
all_inf = False
min_dist = np.amin(dist_x + dist_y, axis=0)
else:
min_dist = np.minimum(min_dist, np.amin(dist_x + dist_y, axis=0))
# Calculate masks for two tolerance cases
tols_t = np.expand_dims(np.asarray(tols), axis=1)
mask1 = (min_dist <= tols_t).astype(float)
mask2 = (min_dist <= 3.0 * tols_t).astype(float)
mask2 = mask2 - mask1
# Calculate relative hits
rel_hits = np.zeros(mask1.shape)
if not all_inf:
for i in range(mask1.shape[0]):
for j in range(mask1.shape[1]):
if np.isinf(min_dist[j]):
continue
rel_hits[i, j] = mask1[i, j] + \
mask2[i, j] * ((3.0 * tols_t[i, i] - min_dist[j]) / (2.0 * tols_t[i, i]))
rel_hits = np.sum(rel_hits, axis=1)
rel_hits /= poly_to_count.n_points
return rel_hits
if __name__ == '__main__':
print(os.environ["PYTHONPATH"])
z = np.zeros([1, 2])
if type(z) == np.ndarray:
print("is np.ndarray") | 0.859133 | 0.531939 |
from app.models import *
def create_registrant(db_session):
registrant = Registrant(
registration_value={
"name_first": "foo",
"name_last": "bar",
"dob":"01/01/2000",
"email":"<EMAIL>",
},
county="TEST",
reg_lookup_complete = True,
is_citizen=True
)
db_session.add(registrant)
db_session.commit()
return registrant
def test_vr_3_no_address_provided(app, db_session, client):
"""
An existing user tries to post without an address provided
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
assert response.status_code != 302
def test_vr_3_single_valid_address(app, db_session, client):
"""
An existing user provides a valid address, but no previous address or mailing address.
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
def test_vr_3_single_address_no_county(app, db_session, client):
registrant = create_registrant(db_session)
registrant.county = None
registrant.save(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = Registrant.lookup_by_session_id(registrant.session_id)
assert updated_registrant.county == 'Douglas'
def test_vr_3_single_address_wrong_zip(app, db_session, client):
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66043'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = Registrant.lookup_by_session_id(registrant.session_id)
assert updated_registrant.county == 'Douglas'
def test_vr_3_single_invalid_address(app, db_session, client):
"""
An existing user provides an invalid address, but no previous address or mailing address. Should still redirect.
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "123 Fake St",
'city': "FakeTown",
'state': "NA",
'zip': '00000'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '123 Fake St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses'] == False
def test_vr_3_with_prev_address(app, db_session, client):
"""
An existing user provides a valid address and valid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_prev_addr': True,
'prev_addr': "707 Vermont St",
'prev_unit': "Room B",
'prev_city': "Lawrence",
'prev_state': "KANSAS",
'prev_zip': '66044',
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert updated_registrant.registration_value['validated_addresses']['prev_addr']['unit'] == 'RM B'
def test_vr_3_with_invalid_prev_address(app, db_session, client):
"""
An existing user provides a valid address and invalid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_prev_addr': True,
'prev_addr': "123 Fake St",
'prev_city': "FakeTown",
'prev_state': "NA",
'prev_zip': '00000'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert ('error' in updated_registrant.registration_value['validated_addresses']['prev_addr'])
def test_vr_3_with_mail_address(app, db_session, client):
"""
An existing user provides a valid address and valid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_mail_addr': True,
'mail_addr': "707 Vermont St",
'mail_unit': "Room B",
'mail_city': "Lawrence",
'mail_state': "KANSAS",
'mail_zip': '66044',
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert updated_registrant.registration_value['validated_addresses']['mail_addr']['unit'] == 'RM B' | app/main/tests/test_step_vr_3.py | from app.models import *
def create_registrant(db_session):
registrant = Registrant(
registration_value={
"name_first": "foo",
"name_last": "bar",
"dob":"01/01/2000",
"email":"<EMAIL>",
},
county="TEST",
reg_lookup_complete = True,
is_citizen=True
)
db_session.add(registrant)
db_session.commit()
return registrant
def test_vr_3_no_address_provided(app, db_session, client):
"""
An existing user tries to post without an address provided
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
assert response.status_code != 302
def test_vr_3_single_valid_address(app, db_session, client):
"""
An existing user provides a valid address, but no previous address or mailing address.
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
def test_vr_3_single_address_no_county(app, db_session, client):
registrant = create_registrant(db_session)
registrant.county = None
registrant.save(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = Registrant.lookup_by_session_id(registrant.session_id)
assert updated_registrant.county == 'Douglas'
def test_vr_3_single_address_wrong_zip(app, db_session, client):
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66043'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = Registrant.lookup_by_session_id(registrant.session_id)
assert updated_registrant.county == 'Douglas'
def test_vr_3_single_invalid_address(app, db_session, client):
"""
An existing user provides an invalid address, but no previous address or mailing address. Should still redirect.
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "123 Fake St",
'city': "FakeTown",
'state': "NA",
'zip': '00000'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '123 Fake St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses'] == False
def test_vr_3_with_prev_address(app, db_session, client):
"""
An existing user provides a valid address and valid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_prev_addr': True,
'prev_addr': "707 Vermont St",
'prev_unit': "Room B",
'prev_city': "Lawrence",
'prev_state': "KANSAS",
'prev_zip': '66044',
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert updated_registrant.registration_value['validated_addresses']['prev_addr']['unit'] == 'RM B'
def test_vr_3_with_invalid_prev_address(app, db_session, client):
"""
An existing user provides a valid address and invalid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_prev_addr': True,
'prev_addr': "123 Fake St",
'prev_city': "FakeTown",
'prev_state': "NA",
'prev_zip': '00000'
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert ('error' in updated_registrant.registration_value['validated_addresses']['prev_addr'])
def test_vr_3_with_mail_address(app, db_session, client):
"""
An existing user provides a valid address and valid prev address
"""
registrant = create_registrant(db_session)
with client.session_transaction() as http_session:
http_session['session_id'] = str(registrant.session_id)
form_payload = {
'addr': "707 Vermont St",
'unit': "Room A",
'city': "Lawrence",
'state': "KANSAS",
'zip': '66044',
'has_mail_addr': True,
'mail_addr': "707 Vermont St",
'mail_unit': "Room B",
'mail_city': "Lawrence",
'mail_state': "KANSAS",
'mail_zip': '66044',
}
response = client.post('/vr/address', data=form_payload, follow_redirects=False)
redirect_data = response.data.decode()
assert response.status_code == 302
assert ('/vr/party' in redirect_data) == True
updated_registrant = db_session.query(Registrant).filter_by(session_id = registrant.session_id).first()
assert updated_registrant.registration_value.get('addr') == '707 Vermont St'
assert 'validated_addresses' in updated_registrant.registration_value
assert updated_registrant.registration_value['validated_addresses']['current_address']['state'] == 'KS'
assert updated_registrant.registration_value['validated_addresses']['mail_addr']['unit'] == 'RM B' | 0.708918 | 0.315446 |
import os
import io
import sys
import argparse
from jinja2 import Environment, BaseLoader, select_autoescape
try:
import yaml
except ImportError as e:
print (str(e) + " To install it, type:")
print ("pip install PyYAML")
exit(1)
donotedit_warning = \
"/*********** " + \
"WARNING: This is an auto-generated file. Do not edit!" + \
" ***********/"
DEFAULT_MANIFEST_LIST = os.path.join('tools', 'tfm_manifest_list.yaml')
DEFAULT_GEN_FILE_LIST = os.path.join('tools', 'tfm_generated_file_list.yaml')
OUT_DIR = None # The root directory that files are generated to
class TemplateLoader(BaseLoader):
"""
Template loader class.
An instance of this class is passed to the template engine. It is
responsible for reading the template file
"""
def __init__(self):
pass
def get_source(self, environment, template):
"""
This function reads the template files.
For detailed documentation see:
http://jinja.pocoo.org/docs/2.10/api/#jinja2.BaseLoader.get_source
Please note that this function always return 'false' as 'uptodate'
value, so the output file will always be generated.
"""
if not os.path.isfile(template):
raise TemplateNotFound(template)
with open(template) as f:
source = f.read()
return source, template, False
def process_manifest(manifest_list_file, append):
"""
Parse the input manifest, generate the data base for genereated files
and generate manifest header files.
Parameters
----------
manifest_list_file:
The manifest list to parse.
append:
To append the manifest to original or not.
Returns
-------
The manifest header list and the data base.
"""
db = []
manifest_header_list = []
manifest_list = []
if append:
# Load the default manifest first
with open(DEFAULT_MANIFEST_LIST) as default_manifest_list_yaml_file:
manifest_dic = yaml.safe_load(default_manifest_list_yaml_file)
manifest_list.extend(manifest_dic["manifest_list"])
with open(manifest_list_file) as manifest_list_yaml_file:
manifest_dic = yaml.safe_load(manifest_list_yaml_file)
manifest_list.extend(manifest_dic["manifest_list"])
templatefile_name = 'secure_fw/services/manifestfilename.template'
template = ENV.get_template(templatefile_name)
for manifest_item in manifest_list:
manifest_path = os.path.expandvars(manifest_item['manifest'])
file = open(manifest_path)
manifest = yaml.safe_load(file)
db.append({"manifest": manifest, "attr": manifest_item})
utilities = {}
utilities['donotedit_warning']=donotedit_warning
context = {}
context['manifest'] = manifest
context['attr'] = manifest_item
context['utilities'] = utilities
manifest_dir, manifest_name = os.path.split(manifest_path)
outfile_name = manifest_name.replace('yaml', 'h').replace('json', 'h')
context['file_name'] = outfile_name.replace('.h', '')
outfile_name = os.path.join(manifest_dir, "psa_manifest", outfile_name).replace('\\', '/')
manifest_header_list.append(outfile_name)
if OUT_DIR is not None:
outfile_name = os.path.join(OUT_DIR, outfile_name)
outfile_path = os.path.dirname(outfile_name)
if not os.path.exists(outfile_path):
os.makedirs(outfile_path)
print ("Generating " + outfile_name)
outfile = io.open(outfile_name, "w", newline='\n')
outfile.write(template.render(context))
outfile.close()
return manifest_header_list, db
def gen_files(context, gen_file_list, append):
"""
Generate files according to the gen_file_list
Parameters
----------
gen_file_list:
The list of files to generate
append:
To append the manifest to original or not
"""
file_list = []
if append:
# read default file list first
with open(DEFAULT_GEN_FILE_LIST) as file_list_yaml_file:
file_list_yaml = yaml.safe_load(file_list_yaml_file)
file_list.extend(file_list_yaml["file_list"])
with open(gen_file_list) as file_list_yaml_file:
# read list of files that need to be generated from templates using db
file_list_yaml = yaml.safe_load(file_list_yaml_file)
file_list.extend(file_list_yaml["file_list"])
for file in file_list:
outfile_name = os.path.expandvars(file["output"])
templatefile_name = os.path.expandvars(file["template"])
if OUT_DIR is not None:
outfile_name = os.path.join(OUT_DIR, outfile_name)
outfile_path = os.path.dirname(outfile_name)
if not os.path.exists(outfile_path):
os.makedirs(outfile_path)
template = ENV.get_template(templatefile_name)
outfile = io.open(outfile_name, "w", newline='\n')
outfile.write(template.render(context))
outfile.close()
print ("Generation of files done")
def parse_args():
parser = argparse.ArgumentParser(description='Parse secure partition manifest list and generate files listed by the file list')
parser.add_argument('-o', '--outdir'
, dest='outdir'
, required=False
, default=None
, metavar='out_dir'
, help='The root directory for generated files, the default is TF-M root folder.')
parser.add_argument('-m', '--manifest'
, nargs='*'
, dest='manifest_args'
, required=False
, default=[]
, metavar='manifest'
, help='The secure partition manifest list file to parse, the default is '+ DEFAULT_MANIFEST_LIST + '. \
Or the manifest can be append to the default one by explicitly \"append\" it:\
-m manifest_to_append append')
parser.add_argument('-f', '--file-list'
, nargs='*'
, dest='gen_file_args'
, required=False
, default=[]
, metavar='file-list'
, help='The file descripes the file list to generate, the default is ' + DEFAULT_GEN_FILE_LIST + '. \
Or the file list can be append to the default one by explicitly \"append\" it:\
-f files_to_append append')
args = parser.parse_args()
manifest_args = args.manifest_args
gen_file_args = args.gen_file_args
if len(manifest_args) > 2 or len(gen_file_args) > 2:
parser.print_help()
exit(1)
if len(manifest_args) == 2 and (manifest_args[1] != 'append' and manifest_args[1] != ''):
parser.print_help()
exit(1)
if len(gen_file_args) == 2 and (gen_file_args[1] != 'append' and gen_file_args[1] != ''):
parser.print_help()
exit(1)
return args
ENV = Environment(
loader = TemplateLoader(),
autoescape = select_autoescape(['html', 'xml']),
lstrip_blocks = True,
trim_blocks = True,
keep_trailing_newline = True
)
def main():
"""
The entry point of the script.
Generates the output files based on the templates and the manifests.
"""
global OUT_DIR
args = parse_args()
manifest_args = args.manifest_args
gen_file_args = args.gen_file_args
OUT_DIR = args.outdir
append_manifest = False
append_gen_file = False
if len(manifest_args) == 2 and manifest_args[1] == 'append':
append_manifest = True
if len(gen_file_args) == 2 and gen_file_args[1] == 'append':
append_gen_file = True
if len(manifest_args) == 0:
manifest_list = DEFAULT_MANIFEST_LIST
else:
"""
Only convert to abs path when value is not default
Because the default value is a fixed relative path to TF-M root folder,
it will be various to different execution path if converted to absolute path.
The same for gen_file_list
"""
manifest_list = os.path.abspath(args.manifest_args[0])
if len(gen_file_args) == 0:
gen_file_list = DEFAULT_GEN_FILE_LIST
else:
gen_file_list = os.path.abspath(args.gen_file_args[0])
# Arguments could be relative path.
# Convert to absolute path as we are going to change diretory later
if OUT_DIR is not None:
OUT_DIR = os.path.abspath(OUT_DIR)
"""
Relative path to TF-M root folder is supported in the manifests
and default value of manifest list and generated file list are relative to TF-M root folder as well,
so first change directory to TF-M root folder.
By doing this, the script can be executed anywhere
The script is located in <TF-M root folder>/tools, so sys.path[0]<location of the script>/.. is TF-M root folder.
"""
os.chdir(os.path.join(sys.path[0], ".."))
manifest_header_list, db = process_manifest(manifest_list, append_manifest)
utilities = {}
context = {}
utilities['donotedit_warning']=donotedit_warning
utilities['manifest_header_list']=manifest_header_list
context['manifests'] = db
context['utilities'] = utilities
gen_files(context, gen_file_list, append_gen_file)
if __name__ == "__main__":
main() | tools/tfm_parse_manifest_list.py |
import os
import io
import sys
import argparse
from jinja2 import Environment, BaseLoader, select_autoescape
try:
import yaml
except ImportError as e:
print (str(e) + " To install it, type:")
print ("pip install PyYAML")
exit(1)
donotedit_warning = \
"/*********** " + \
"WARNING: This is an auto-generated file. Do not edit!" + \
" ***********/"
DEFAULT_MANIFEST_LIST = os.path.join('tools', 'tfm_manifest_list.yaml')
DEFAULT_GEN_FILE_LIST = os.path.join('tools', 'tfm_generated_file_list.yaml')
OUT_DIR = None # The root directory that files are generated to
class TemplateLoader(BaseLoader):
"""
Template loader class.
An instance of this class is passed to the template engine. It is
responsible for reading the template file
"""
def __init__(self):
pass
def get_source(self, environment, template):
"""
This function reads the template files.
For detailed documentation see:
http://jinja.pocoo.org/docs/2.10/api/#jinja2.BaseLoader.get_source
Please note that this function always return 'false' as 'uptodate'
value, so the output file will always be generated.
"""
if not os.path.isfile(template):
raise TemplateNotFound(template)
with open(template) as f:
source = f.read()
return source, template, False
def process_manifest(manifest_list_file, append):
"""
Parse the input manifest, generate the data base for genereated files
and generate manifest header files.
Parameters
----------
manifest_list_file:
The manifest list to parse.
append:
To append the manifest to original or not.
Returns
-------
The manifest header list and the data base.
"""
db = []
manifest_header_list = []
manifest_list = []
if append:
# Load the default manifest first
with open(DEFAULT_MANIFEST_LIST) as default_manifest_list_yaml_file:
manifest_dic = yaml.safe_load(default_manifest_list_yaml_file)
manifest_list.extend(manifest_dic["manifest_list"])
with open(manifest_list_file) as manifest_list_yaml_file:
manifest_dic = yaml.safe_load(manifest_list_yaml_file)
manifest_list.extend(manifest_dic["manifest_list"])
templatefile_name = 'secure_fw/services/manifestfilename.template'
template = ENV.get_template(templatefile_name)
for manifest_item in manifest_list:
manifest_path = os.path.expandvars(manifest_item['manifest'])
file = open(manifest_path)
manifest = yaml.safe_load(file)
db.append({"manifest": manifest, "attr": manifest_item})
utilities = {}
utilities['donotedit_warning']=donotedit_warning
context = {}
context['manifest'] = manifest
context['attr'] = manifest_item
context['utilities'] = utilities
manifest_dir, manifest_name = os.path.split(manifest_path)
outfile_name = manifest_name.replace('yaml', 'h').replace('json', 'h')
context['file_name'] = outfile_name.replace('.h', '')
outfile_name = os.path.join(manifest_dir, "psa_manifest", outfile_name).replace('\\', '/')
manifest_header_list.append(outfile_name)
if OUT_DIR is not None:
outfile_name = os.path.join(OUT_DIR, outfile_name)
outfile_path = os.path.dirname(outfile_name)
if not os.path.exists(outfile_path):
os.makedirs(outfile_path)
print ("Generating " + outfile_name)
outfile = io.open(outfile_name, "w", newline='\n')
outfile.write(template.render(context))
outfile.close()
return manifest_header_list, db
def gen_files(context, gen_file_list, append):
"""
Generate files according to the gen_file_list
Parameters
----------
gen_file_list:
The list of files to generate
append:
To append the manifest to original or not
"""
file_list = []
if append:
# read default file list first
with open(DEFAULT_GEN_FILE_LIST) as file_list_yaml_file:
file_list_yaml = yaml.safe_load(file_list_yaml_file)
file_list.extend(file_list_yaml["file_list"])
with open(gen_file_list) as file_list_yaml_file:
# read list of files that need to be generated from templates using db
file_list_yaml = yaml.safe_load(file_list_yaml_file)
file_list.extend(file_list_yaml["file_list"])
for file in file_list:
outfile_name = os.path.expandvars(file["output"])
templatefile_name = os.path.expandvars(file["template"])
if OUT_DIR is not None:
outfile_name = os.path.join(OUT_DIR, outfile_name)
outfile_path = os.path.dirname(outfile_name)
if not os.path.exists(outfile_path):
os.makedirs(outfile_path)
template = ENV.get_template(templatefile_name)
outfile = io.open(outfile_name, "w", newline='\n')
outfile.write(template.render(context))
outfile.close()
print ("Generation of files done")
def parse_args():
parser = argparse.ArgumentParser(description='Parse secure partition manifest list and generate files listed by the file list')
parser.add_argument('-o', '--outdir'
, dest='outdir'
, required=False
, default=None
, metavar='out_dir'
, help='The root directory for generated files, the default is TF-M root folder.')
parser.add_argument('-m', '--manifest'
, nargs='*'
, dest='manifest_args'
, required=False
, default=[]
, metavar='manifest'
, help='The secure partition manifest list file to parse, the default is '+ DEFAULT_MANIFEST_LIST + '. \
Or the manifest can be append to the default one by explicitly \"append\" it:\
-m manifest_to_append append')
parser.add_argument('-f', '--file-list'
, nargs='*'
, dest='gen_file_args'
, required=False
, default=[]
, metavar='file-list'
, help='The file descripes the file list to generate, the default is ' + DEFAULT_GEN_FILE_LIST + '. \
Or the file list can be append to the default one by explicitly \"append\" it:\
-f files_to_append append')
args = parser.parse_args()
manifest_args = args.manifest_args
gen_file_args = args.gen_file_args
if len(manifest_args) > 2 or len(gen_file_args) > 2:
parser.print_help()
exit(1)
if len(manifest_args) == 2 and (manifest_args[1] != 'append' and manifest_args[1] != ''):
parser.print_help()
exit(1)
if len(gen_file_args) == 2 and (gen_file_args[1] != 'append' and gen_file_args[1] != ''):
parser.print_help()
exit(1)
return args
ENV = Environment(
loader = TemplateLoader(),
autoescape = select_autoescape(['html', 'xml']),
lstrip_blocks = True,
trim_blocks = True,
keep_trailing_newline = True
)
def main():
"""
The entry point of the script.
Generates the output files based on the templates and the manifests.
"""
global OUT_DIR
args = parse_args()
manifest_args = args.manifest_args
gen_file_args = args.gen_file_args
OUT_DIR = args.outdir
append_manifest = False
append_gen_file = False
if len(manifest_args) == 2 and manifest_args[1] == 'append':
append_manifest = True
if len(gen_file_args) == 2 and gen_file_args[1] == 'append':
append_gen_file = True
if len(manifest_args) == 0:
manifest_list = DEFAULT_MANIFEST_LIST
else:
"""
Only convert to abs path when value is not default
Because the default value is a fixed relative path to TF-M root folder,
it will be various to different execution path if converted to absolute path.
The same for gen_file_list
"""
manifest_list = os.path.abspath(args.manifest_args[0])
if len(gen_file_args) == 0:
gen_file_list = DEFAULT_GEN_FILE_LIST
else:
gen_file_list = os.path.abspath(args.gen_file_args[0])
# Arguments could be relative path.
# Convert to absolute path as we are going to change diretory later
if OUT_DIR is not None:
OUT_DIR = os.path.abspath(OUT_DIR)
"""
Relative path to TF-M root folder is supported in the manifests
and default value of manifest list and generated file list are relative to TF-M root folder as well,
so first change directory to TF-M root folder.
By doing this, the script can be executed anywhere
The script is located in <TF-M root folder>/tools, so sys.path[0]<location of the script>/.. is TF-M root folder.
"""
os.chdir(os.path.join(sys.path[0], ".."))
manifest_header_list, db = process_manifest(manifest_list, append_manifest)
utilities = {}
context = {}
utilities['donotedit_warning']=donotedit_warning
utilities['manifest_header_list']=manifest_header_list
context['manifests'] = db
context['utilities'] = utilities
gen_files(context, gen_file_list, append_gen_file)
if __name__ == "__main__":
main() | 0.400163 | 0.087876 |
import json
import os
from firecloud import api as fapi
from firecloud.errors import FireCloudServerError
from firecloud.entity import Entity
class Workspace(object):
"""A FireCloud Workspace.
Attributes:
api_url (str): API root used to interact with FireCloud,
normally https://api.firecloud.org/api
namespace (str): Google project for this workspace
name (str): Workspace name
"""
def __init__(self, namespace, name, api_url=fapi.PROD_API_ROOT):
"""Get an existing workspace from Firecloud by name.
This method assumes that a workspace with the given name and
namespace is present at the api_url given, and raises an error
if it does not exist. To create a new workspace, use
Workspace.new()
Raises:
FireCloudServerError: Workspace does not exist, or
API call fails
"""
self.api_url = api_url
self.namespace = namespace
self.name = name
## Call out to FireCloud
r = fapi.get_workspace(namespace, name, api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
@staticmethod
def new(namespace, name, protected=False,
attributes=dict(), api_url=fapi.PROD_API_ROOT):
"""Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed.
"""
r = fapi.create_workspace(namespace, name, protected, attributes, api_url)
fapi._check_response_code(r, 201)
return Workspace(namespace, name, api_url)
def refresh(self):
"""Reload workspace metadata from firecloud.
Workspace metadata is cached in the data attribute of a Workspace,
and may become stale, requiring a refresh().
"""
r = fapi.get_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
return self
def delete(self):
"""Delete the workspace from FireCloud.
Note:
This action cannot be undone. Be careful!
"""
r = fapi.delete_workspace(self.namespace, self.name)
fapi._check_response_code(r, 202)
# Getting useful information out of the bucket
def __str__(self):
"""Return a JSON representation of the bucket."""
return json.dumps(self.data, indent=2)
def bucket(self):
"""Return google bucket id for this workspace."""
return str(self.data["workspace"]["bucketName"])
def lock(self):
"""Lock this Workspace.
This causes the workspace to behave in a read-only way,
regardless of access permissions.
"""
r = fapi.lock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = True
return self
def unlock(self):
"""Unlock this Workspace."""
r = fapi.unlock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = False
return self
def attributes(self):
"""Return a dictionary of workspace attributes"""
return self.data["workspace"]["attributes"]
def get_attribute(self, attr):
"""Return value of workspace attribute.
If the attribute does not exist, return None
"""
return self.data["workspace"]["attributes"].get(attr, None)
def update_attribute(self, attr, value):
"""Set the value of a workspace attribute."""
update = [fapi._attr_up(attr, value)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
fapi._check_response_code(r, 200)
def remove_attribute(self, attr):
"""Remove attribute from a workspace.
Args:
attr (str): attribute name
"""
update = [fapi._attr_rem(attr)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
self.data["workspace"]["attributes"].pop(attr, None)
fapi._check_response_code(r, 200)
def import_tsv(self, tsv_file):
"""Upload entity data to workspace from tsv loadfile.
Args:
tsv_file (file): Tab-delimited file of entity data
"""
r = fapi.upload_entities_tsv(self.namespace, self.name,
self.tsv_file, self.api_url)
fapi._check_response_code(r, 201)
def get_entity(self, etype, entity_id):
"""Return entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.get_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 200)
dresp = r.json()
return Entity(etype, entity_id, dresp['attributes'])
def delete_entity(self, etype, entity_id):
"""Delete an entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.delete_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 202)
def import_entities(self, entities):
"""Upload entity objects.
Args:
entities: iterable of firecloud.Entity objects.
"""
edata = Entity.create_payload(entities)
r = fapi.upload_entities(self.namespace, self.name,
edata, self.api_url)
fapi._check_response_code(r, 201)
def create_set(self, set_id, etype, entities):
"""Create a set of entities and upload to FireCloud.
Args
etype (str): one of {"sample, "pair", "participant"}
entities: iterable of firecloud.Entity objects.
"""
if etype not in {"sample", "pair", "participant"}:
raise ValueError("Unsupported entity type:" + str(etype))
payload = "membership:" + etype + "_set_id\t" + etype + "_id\n"
for e in entities:
if e.etype != etype:
msg = "Entity type '" + e.etype + "' does not match "
msg += "set type '" + etype + "'"
raise ValueError(msg)
payload += set_id + '\t' + e.entity_id + '\n'
r = fapi.upload_entities(self.namespace, self.name,
payload, self.api_url)
fapi._check_response_code(r, 201)
def create_sample_set(self, sset_id, samples):
"""Create FireCloud sample_set"""
return self.create_set(sset_id, "sample", samples)
def create_pair_set(self, pset_id, pairs):
"""Create FireCloud pair_set"""
return self.create_set(pset_id, "pair", pairs)
def create_participant_set(self, pset_id, participants):
"""Create FireCloud participant_set"""
return self.create_set(pset_id, "participant", participants)
def submissions(self):
"""List job submissions in workspace."""
r = fapi.get_submissions(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def entity_types(self):
"""List entity types in workspace."""
r = fapi.get_entity_types(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json().keys()
def entities(self):
"""List all entities in workspace."""
r = fapi.get_entities_with_type(self.namespace,
self.name, self.api_url)
fapi._check_response_code(r, 200)
edicts = r.json()
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in edicts]
def __get_entities(self, etype):
"""Helper to get entities for a given type."""
r = fapi.get_entities(self.namespace, self.name,
etype, self.api_url)
fapi._check_response_code(r, 200)
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in r.json()]
def samples(self):
"""List samples in a workspace."""
return self.__get_entities("sample")
def participants(self):
"""List participants in a workspace."""
return self.__get_entities("participant")
def pairs(self):
"""List pairs in a workspace."""
return self.__get_entities("pair")
def sample_sets(self):
"""List sample sets in a workspace."""
return self.__get_entities("sample_set")
def participant_sets(self):
"""List participant sets in a workspace."""
return self.__get_entities("participant_set")
def pair_sets(self):
"""List pair sets in a workspace."""
return self.__get_entities("pair_set")
def copy_entities(self, from_namespace, from_workspace, etype, enames):
"""Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
"""
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201)
def configs(self):
"""Get method configurations in a workspace."""
raise NotImplementedError
r = fapi.get_configs(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
cdata = r.json()
configs = []
for c in cdata:
cnamespace = c['namespace']
cname = c['name']
root_etype = c['rootEntityType']
method_namespace = c['methodRepoMethod']['methodNamespace']
method_name = c['methodRepoMethod']['methodName']
method_version = c['methodRepoMethod']['methodVersion']
def acl(self):
"""Get the access control list for this workspace."""
r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def set_acl(self, role, users):
"""Set access permissions for this workspace
Args:
role (str): Access level
one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"}
users (list(str)): List of users to give role to
"""
acl_updates = [{"email": user, "accessLevel": role} for user in users]
r = fapi.update_workspace_acl(self.namespace, self.name,
acl_updates, self.api_url)
fapi._check_response_code(r, 200)
def clone(self, to_namespace, to_name):
"""Clone this workspace.
Args:
to_namespace (str): Target workspace namespace
to_name (str): Target workspace name
"""
r = fapi.clone_workspace(self.namespace, self.name,
to_namespace, to_name, self.api_url)
fapi._check_response_code(r, 201)
return Workspace(to_namespace, to_name, self.api_url) | firecloud/workspace.py | import json
import os
from firecloud import api as fapi
from firecloud.errors import FireCloudServerError
from firecloud.entity import Entity
class Workspace(object):
"""A FireCloud Workspace.
Attributes:
api_url (str): API root used to interact with FireCloud,
normally https://api.firecloud.org/api
namespace (str): Google project for this workspace
name (str): Workspace name
"""
def __init__(self, namespace, name, api_url=fapi.PROD_API_ROOT):
"""Get an existing workspace from Firecloud by name.
This method assumes that a workspace with the given name and
namespace is present at the api_url given, and raises an error
if it does not exist. To create a new workspace, use
Workspace.new()
Raises:
FireCloudServerError: Workspace does not exist, or
API call fails
"""
self.api_url = api_url
self.namespace = namespace
self.name = name
## Call out to FireCloud
r = fapi.get_workspace(namespace, name, api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
@staticmethod
def new(namespace, name, protected=False,
attributes=dict(), api_url=fapi.PROD_API_ROOT):
"""Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed.
"""
r = fapi.create_workspace(namespace, name, protected, attributes, api_url)
fapi._check_response_code(r, 201)
return Workspace(namespace, name, api_url)
def refresh(self):
"""Reload workspace metadata from firecloud.
Workspace metadata is cached in the data attribute of a Workspace,
and may become stale, requiring a refresh().
"""
r = fapi.get_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
self.data = r.json()
return self
def delete(self):
"""Delete the workspace from FireCloud.
Note:
This action cannot be undone. Be careful!
"""
r = fapi.delete_workspace(self.namespace, self.name)
fapi._check_response_code(r, 202)
# Getting useful information out of the bucket
def __str__(self):
"""Return a JSON representation of the bucket."""
return json.dumps(self.data, indent=2)
def bucket(self):
"""Return google bucket id for this workspace."""
return str(self.data["workspace"]["bucketName"])
def lock(self):
"""Lock this Workspace.
This causes the workspace to behave in a read-only way,
regardless of access permissions.
"""
r = fapi.lock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = True
return self
def unlock(self):
"""Unlock this Workspace."""
r = fapi.unlock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = False
return self
def attributes(self):
"""Return a dictionary of workspace attributes"""
return self.data["workspace"]["attributes"]
def get_attribute(self, attr):
"""Return value of workspace attribute.
If the attribute does not exist, return None
"""
return self.data["workspace"]["attributes"].get(attr, None)
def update_attribute(self, attr, value):
"""Set the value of a workspace attribute."""
update = [fapi._attr_up(attr, value)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
fapi._check_response_code(r, 200)
def remove_attribute(self, attr):
"""Remove attribute from a workspace.
Args:
attr (str): attribute name
"""
update = [fapi._attr_rem(attr)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
self.data["workspace"]["attributes"].pop(attr, None)
fapi._check_response_code(r, 200)
def import_tsv(self, tsv_file):
"""Upload entity data to workspace from tsv loadfile.
Args:
tsv_file (file): Tab-delimited file of entity data
"""
r = fapi.upload_entities_tsv(self.namespace, self.name,
self.tsv_file, self.api_url)
fapi._check_response_code(r, 201)
def get_entity(self, etype, entity_id):
"""Return entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.get_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 200)
dresp = r.json()
return Entity(etype, entity_id, dresp['attributes'])
def delete_entity(self, etype, entity_id):
"""Delete an entity in this workspace.
Args:
etype (str): Entity type
entity_id (str): Entity name/unique id
"""
r = fapi.delete_entity(self.namespace, self.name, etype,
entity_id, self.api_url)
fapi._check_response_code(r, 202)
def import_entities(self, entities):
"""Upload entity objects.
Args:
entities: iterable of firecloud.Entity objects.
"""
edata = Entity.create_payload(entities)
r = fapi.upload_entities(self.namespace, self.name,
edata, self.api_url)
fapi._check_response_code(r, 201)
def create_set(self, set_id, etype, entities):
"""Create a set of entities and upload to FireCloud.
Args
etype (str): one of {"sample, "pair", "participant"}
entities: iterable of firecloud.Entity objects.
"""
if etype not in {"sample", "pair", "participant"}:
raise ValueError("Unsupported entity type:" + str(etype))
payload = "membership:" + etype + "_set_id\t" + etype + "_id\n"
for e in entities:
if e.etype != etype:
msg = "Entity type '" + e.etype + "' does not match "
msg += "set type '" + etype + "'"
raise ValueError(msg)
payload += set_id + '\t' + e.entity_id + '\n'
r = fapi.upload_entities(self.namespace, self.name,
payload, self.api_url)
fapi._check_response_code(r, 201)
def create_sample_set(self, sset_id, samples):
"""Create FireCloud sample_set"""
return self.create_set(sset_id, "sample", samples)
def create_pair_set(self, pset_id, pairs):
"""Create FireCloud pair_set"""
return self.create_set(pset_id, "pair", pairs)
def create_participant_set(self, pset_id, participants):
"""Create FireCloud participant_set"""
return self.create_set(pset_id, "participant", participants)
def submissions(self):
"""List job submissions in workspace."""
r = fapi.get_submissions(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def entity_types(self):
"""List entity types in workspace."""
r = fapi.get_entity_types(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json().keys()
def entities(self):
"""List all entities in workspace."""
r = fapi.get_entities_with_type(self.namespace,
self.name, self.api_url)
fapi._check_response_code(r, 200)
edicts = r.json()
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in edicts]
def __get_entities(self, etype):
"""Helper to get entities for a given type."""
r = fapi.get_entities(self.namespace, self.name,
etype, self.api_url)
fapi._check_response_code(r, 200)
return [Entity(e['entityType'], e['name'], e['attributes'])
for e in r.json()]
def samples(self):
"""List samples in a workspace."""
return self.__get_entities("sample")
def participants(self):
"""List participants in a workspace."""
return self.__get_entities("participant")
def pairs(self):
"""List pairs in a workspace."""
return self.__get_entities("pair")
def sample_sets(self):
"""List sample sets in a workspace."""
return self.__get_entities("sample_set")
def participant_sets(self):
"""List participant sets in a workspace."""
return self.__get_entities("participant_set")
def pair_sets(self):
"""List pair sets in a workspace."""
return self.__get_entities("pair_set")
def copy_entities(self, from_namespace, from_workspace, etype, enames):
"""Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
"""
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201)
def configs(self):
"""Get method configurations in a workspace."""
raise NotImplementedError
r = fapi.get_configs(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
cdata = r.json()
configs = []
for c in cdata:
cnamespace = c['namespace']
cname = c['name']
root_etype = c['rootEntityType']
method_namespace = c['methodRepoMethod']['methodNamespace']
method_name = c['methodRepoMethod']['methodName']
method_version = c['methodRepoMethod']['methodVersion']
def acl(self):
"""Get the access control list for this workspace."""
r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 200)
return r.json()
def set_acl(self, role, users):
"""Set access permissions for this workspace
Args:
role (str): Access level
one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"}
users (list(str)): List of users to give role to
"""
acl_updates = [{"email": user, "accessLevel": role} for user in users]
r = fapi.update_workspace_acl(self.namespace, self.name,
acl_updates, self.api_url)
fapi._check_response_code(r, 200)
def clone(self, to_namespace, to_name):
"""Clone this workspace.
Args:
to_namespace (str): Target workspace namespace
to_name (str): Target workspace name
"""
r = fapi.clone_workspace(self.namespace, self.name,
to_namespace, to_name, self.api_url)
fapi._check_response_code(r, 201)
return Workspace(to_namespace, to_name, self.api_url) | 0.699049 | 0.249767 |
import socket
from socket import socket as sock
PROGRAM_VERSION = 'V1.04'
NEW_GAME = '%MOVE -1000'
PLAY_BLACK = '%MOVE -1001'
PLAY_WHITE = '%MOVE -1002'
CONNECTED_WITH = ['connected with {} ({})', 'mit {} ({}) verbunden']
PROTOCOL_ERROR = ['Protocol Error', 'Protokollfehler']
INCORRECT_PW = ['Incorrect password', '<PASSWORD>s Passwort. Vielleicht wird der Name schon verwendet.']
WAIT_FOR_PLAYER = ['...waiting for player...', '...warte auf Spieler...']
ALREADY_ASSIGNED = ['\'{}\' is already assigned. Please choose a different name',
'Der Name \'{}\' ist schon vergeben. Waehle einen anderen Namen']
NOT_LINKED = ['You are not linked with any player.', 'Du bist mit keinem Spieler verbunden.']
TIMEOUT_ERROR = ['Error: connection timeout', 'Fehler: Zeitüberschreitung']
AUTH_ERROR = ['Authentication failed', 'Fehler bei der Authentifizierung']
TOO_MANY_IP = ['Too many users with same ip address', 'Zu viele Nutzer mit derselben IP-Adresse']
BUFFER_SIZE = 256
ETX = chr(0x03) # ASCII 'end of text'
SEPARATOR_LF = '---------------------------------------\n'
SEPARATOR = '---------------------------------------'
class Language:
EN = 0
DE = 1
__language = DE
@staticmethod
def set_en():
Language.__language = Language.EN
@staticmethod
def set_de():
Language.__language = Language.DE
@staticmethod
def set_lang(lang: int):
Language.__language = lang % 2
@staticmethod
def get(item) -> str:
if isinstance(item, list):
return item[Language.__language]
else:
return str(item)
def string(item) -> str:
return Language.get(item)
def get_local_ip():
s = sock(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def elo_rating(rating_a: int, rating_b: int, result: float, weight: int) -> int:
expectancy = 1 / (1 + 10 ** ((rating_b - rating_a) / 400))
return round(rating_a + weight * (result - expectancy)) | chessServer/shared.py | import socket
from socket import socket as sock
PROGRAM_VERSION = 'V1.04'
NEW_GAME = '%MOVE -1000'
PLAY_BLACK = '%MOVE -1001'
PLAY_WHITE = '%MOVE -1002'
CONNECTED_WITH = ['connected with {} ({})', 'mit {} ({}) verbunden']
PROTOCOL_ERROR = ['Protocol Error', 'Protokollfehler']
INCORRECT_PW = ['Incorrect password', '<PASSWORD>s Passwort. Vielleicht wird der Name schon verwendet.']
WAIT_FOR_PLAYER = ['...waiting for player...', '...warte auf Spieler...']
ALREADY_ASSIGNED = ['\'{}\' is already assigned. Please choose a different name',
'Der Name \'{}\' ist schon vergeben. Waehle einen anderen Namen']
NOT_LINKED = ['You are not linked with any player.', 'Du bist mit keinem Spieler verbunden.']
TIMEOUT_ERROR = ['Error: connection timeout', 'Fehler: Zeitüberschreitung']
AUTH_ERROR = ['Authentication failed', 'Fehler bei der Authentifizierung']
TOO_MANY_IP = ['Too many users with same ip address', 'Zu viele Nutzer mit derselben IP-Adresse']
BUFFER_SIZE = 256
ETX = chr(0x03) # ASCII 'end of text'
SEPARATOR_LF = '---------------------------------------\n'
SEPARATOR = '---------------------------------------'
class Language:
EN = 0
DE = 1
__language = DE
@staticmethod
def set_en():
Language.__language = Language.EN
@staticmethod
def set_de():
Language.__language = Language.DE
@staticmethod
def set_lang(lang: int):
Language.__language = lang % 2
@staticmethod
def get(item) -> str:
if isinstance(item, list):
return item[Language.__language]
else:
return str(item)
def string(item) -> str:
return Language.get(item)
def get_local_ip():
s = sock(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def elo_rating(rating_a: int, rating_b: int, result: float, weight: int) -> int:
expectancy = 1 / (1 + 10 ** ((rating_b - rating_a) / 400))
return round(rating_a + weight * (result - expectancy)) | 0.467332 | 0.107204 |
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
zeroList = [
'Result.hex = Result0 = c(0)',
'Result.hex = Result = c(0)',
]
# randomBitVector
# randomFilter
# factor
# bug?
exprList = [
# 'Result.hex = Result<n> = slice(c.hex[<col1>],<row>)',
'Result<n> = c.hex[,<col1>] = ((c.hex[,2]==0))',
'Result<n> = c.hex[,<col1>]',
# 'Result<n> = min(c.hex[,<col1>], c.hex[,<col1>])',
# 'Result<n> = max(c.hex[,<col1>], c.hex[,<col1>]) + Result.hex[,0]',
### 'Result.hex = Result<n> = mean(c.hex[,<col1>]) + Result.hex[0]',
# 'Result<n> = sum(c.hex[,<col1>], c.hex[,<col1>]) + Result.hex[,0]',
# have to figure out how to avoid infinity results
# 'Result<n> = log(c.hex[<col1>]) + Result.hex[0]',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec2_covtype_rand1(self):
h2o.beta_features = True
csvPathname = 'standard/covtype.data'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local', hex_key='c.hex', timeoutSecs=15)
print "\nParse key is:", parseResult['destination_key']
### h2b.browseTheCloud()
h2e.exec_zero_list(zeroList)
start = time.time()
h2e.exec_expr_list_rand(len(h2o.nodes), exprList, 'c.hex',
maxCol=54, maxRow=400000, maxTrials=200, timeoutSecs=10)
h2o.check_sandbox_for_errors()
print "exec end on ", "covtype.data" , 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main() | py/testdir_single_jvm_fvec/test_exec2_covtype_rand1.py | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
zeroList = [
'Result.hex = Result0 = c(0)',
'Result.hex = Result = c(0)',
]
# randomBitVector
# randomFilter
# factor
# bug?
exprList = [
# 'Result.hex = Result<n> = slice(c.hex[<col1>],<row>)',
'Result<n> = c.hex[,<col1>] = ((c.hex[,2]==0))',
'Result<n> = c.hex[,<col1>]',
# 'Result<n> = min(c.hex[,<col1>], c.hex[,<col1>])',
# 'Result<n> = max(c.hex[,<col1>], c.hex[,<col1>]) + Result.hex[,0]',
### 'Result.hex = Result<n> = mean(c.hex[,<col1>]) + Result.hex[0]',
# 'Result<n> = sum(c.hex[,<col1>], c.hex[,<col1>]) + Result.hex[,0]',
# have to figure out how to avoid infinity results
# 'Result<n> = log(c.hex[<col1>]) + Result.hex[0]',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec2_covtype_rand1(self):
h2o.beta_features = True
csvPathname = 'standard/covtype.data'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local', hex_key='c.hex', timeoutSecs=15)
print "\nParse key is:", parseResult['destination_key']
### h2b.browseTheCloud()
h2e.exec_zero_list(zeroList)
start = time.time()
h2e.exec_expr_list_rand(len(h2o.nodes), exprList, 'c.hex',
maxCol=54, maxRow=400000, maxTrials=200, timeoutSecs=10)
h2o.check_sandbox_for_errors()
print "exec end on ", "covtype.data" , 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main() | 0.207857 | 0.295306 |
import logging
import os
import environ
env = environ.Env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '<KEY>'
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'accounts',
'item_master',
'unit_master',
'company_master',
'salt_master',
'godown_master',
'home',
'sales',
'purchase',
"compressor",
]
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
},
'rest_backend': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHE_MIDDLEWARE_KEY_PREFIX = 'croma'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'croma.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'croma.wsgi.application'
backup_dir = "E:\\backup"
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db/db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
log_dir = os.path.join(os.path.dirname(BASE_DIR), "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
PRINTINV_FILEPATH = os.path.join(os.path.dirname(BASE_DIR), "logs/print_inv.txt")
log_file = os.path.join(os.path.dirname(BASE_DIR), "logs/views.log")
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s: %(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M',
level=logging.ERROR, filename=log_file)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
# USE_TZ = True
USE_L10N = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn") | src/croma/settings.py |
import logging
import os
import environ
env = environ.Env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '<KEY>'
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'accounts',
'item_master',
'unit_master',
'company_master',
'salt_master',
'godown_master',
'home',
'sales',
'purchase',
"compressor",
]
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
},
'rest_backend': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHE_MIDDLEWARE_KEY_PREFIX = 'croma'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'croma.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'croma.wsgi.application'
backup_dir = "E:\\backup"
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db/db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
log_dir = os.path.join(os.path.dirname(BASE_DIR), "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
PRINTINV_FILEPATH = os.path.join(os.path.dirname(BASE_DIR), "logs/print_inv.txt")
log_file = os.path.join(os.path.dirname(BASE_DIR), "logs/views.log")
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s: %(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M',
level=logging.ERROR, filename=log_file)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
# USE_TZ = True
USE_L10N = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn") | 0.127381 | 0.053626 |