text stringlengths 957 885k |
|---|
<reponame>BlooAM/Day-ahead-prices
from datetime import datetime
import pandas as pd
from src.data_generator.day_ahead_extractors.pse.base_day_ahead_extractor import PseDataDayAheadExtractor
__all__ = ('RealUnitsOutagesDayAheadExtractor',)
class RealUnitsOutagesDayAheadExtractor(PseDataDayAheadExtractor):
def extract(self) -> pd.DataFrame:
raw_data_copy = self.raw_df.copy()
for column in ['Wielkość ubytku elektrownianego', 'Wielkość ubytku sieciowego',
'Dostępne zdolności wytwórcze']:
raw_data_copy[column] = \
raw_data_copy[column].apply(self.delete_unnecessary_commas_and_add_dot)
raw_data_copy[column] = raw_data_copy[column].astype('float')
data_with_complete_columns = self._handle_time_shift(data=raw_data_copy)
data_transformed = self._get_datetime_from_dates_and_hours(data_with_complete_columns)
data_transformed = data_transformed.rename(
columns={'Kod JW': 'code', 'Wielkość ubytku elektrownianego': 'outage'}
)
data_transformed = data_transformed[['date', 'code', 'outage']]
unit_static_data = pd.read_csv('resources/unit_codes_by_sources.csv')
installed_capacity = unit_static_data[['unit_code_pse', 'capacity_installed', 'unit_type']]
data_with_capacity_installed = \
pd.merge(data_transformed, unit_static_data, left_on='code', right_on='unit_code_pse')
data_with_capacity_classes = \
self.__divide_into_unit_classes(data=data_with_capacity_installed)
data_by_capacity_and_type = \
data_with_capacity_classes.groupby(by=['date', 'unit_type', 'capacity_class']).sum()
data_by_capacity_and_type = data_by_capacity_and_type.reset_index()
data_by_capacity_and_type = \
data_by_capacity_and_type[['date', 'unit_type', 'capacity_class', 'outage']]
installed_capacity_with_capacity_classes = \
self.__divide_into_unit_classes(data=installed_capacity)
installed_capacity_by_capacity_and_type = installed_capacity_with_capacity_classes.groupby(
by=['unit_type', 'capacity_class']
).sum().reset_index()
data_transformed = pd.merge(data_by_capacity_and_type,
installed_capacity_by_capacity_and_type,
on=['unit_type', 'capacity_class'],
)
data_transformed['available_capacity'] = \
data_transformed['capacity_installed'] - data_transformed['outage']
available_capacity = data_transformed.pivot(
index='date',
columns=['unit_type', 'capacity_class'],
values='available_capacity',
)
total_capacity = installed_capacity_by_capacity_and_type.pivot(
columns=['unit_type', 'capacity_class'],
values='capacity_installed',
).bfill()
for column in total_capacity.columns:
capacity_installed = total_capacity[column][0]
if column not in available_capacity.columns:
available_capacity[column] = capacity_installed
available_capacity[column].fillna(capacity_installed, inplace=True)
columns = [' '.join(col) + ' available' for col in available_capacity.columns]
available_capacity.columns = columns
return available_capacity
def _get_prediction_flag(self) -> bool:
return True
def __divide_into_unit_classes(self, data: pd.DataFrame) -> pd.DataFrame:
data['capacity_class'] = pd.cut(
x=data['capacity_installed'],
bins=[0, 100, 300, 400, 700, 9999],
labels=['100', '200', '300', '500', '1000'],
).astype(str)
return data
if __name__ == '__main__':
from src.data_generator.extractors.pse.real_units_outages import RealUnitsOutagesExtractor
df = RealUnitsOutagesExtractor().extract(date=datetime(2020, 3, 29))
transformed = RealUnitsOutagesDayAheadExtractor(
start_date=datetime(2020, 3, 29),
end_date=datetime(2020, 6, 30),
).extract()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from prettytable import PrettyTable
from pycompss.api.api import compss_wait_on
from ddf_library.utils import _gen_uuid
from ddf_library.utils import delete_result, merge_info
import networkx as nx
class Status(object):
STATUS_WAIT = 'WAIT'
STATUS_DELETED = 'DELETED'
STATUS_COMPLETED = 'COMPLETED'
STATUS_PERSISTED = 'PERSISTED' # to persist in order to reuse later
class OPTGroup(object):
OPT_SERIAL = 'serial' # it can be grouped with others operations
OPT_OTHER = 'other' # it can not be performed any kind of task optimization
OPT_LAST = 'last' # it contains two or more stages,
# but only the last stage can be grouped
class CatalogTask(object):
dag = nx.DiGraph()
catalog_tasks = dict()
# task_map: a dictionary to stores all following information about a task:
# - name: task name
# - parameters: a dictionary with function's input parameters;
# - status: WAIT, COMPLETED, PERSISTED
# - result: if status is COMPLETED, a dictionary with the results.
# The keys of this dictionary is index that represents the output
# (to handle with multiple outputs, like split task);
# - schema:
# to speedup the searching for completed tasks:
completed_tasks = list()
def clear(self): # TODO
for id_task in list(self.catalog_tasks):
data = self.get_task_return(id_task)
if check_serialization(data):
delete_result(data)
self.catalog_tasks = dict()
self.completed_tasks = list()
self.dag = nx.DiGraph()
def show_tasks(self):
"""
Show all tasks in the current code. Only to debug.
:return:
"""
t = PrettyTable(['uuid', 'Task name', 'STATUS', 'Example result'])
for uuid in self.catalog_tasks:
r = self.get_task_return(uuid)
if isinstance(r, list):
if len(r) > 0:
r = r[0]
else:
r = ''
t.add_row([uuid[:8], self.get_task_name(uuid),
self.get_task_status(uuid), r])
print("\nList of all tasks:\n", t, '\n')
def gen_new_uuid(self):
new_state_uuid = _gen_uuid()
while new_state_uuid in self.catalog_tasks:
new_state_uuid = _gen_uuid()
return new_state_uuid
def add_completed(self, uuid):
self.completed_tasks.append(uuid)
def rm_completed(self, uuid):
# to prevent multiple occurrences
self.completed_tasks = \
list(filter(lambda a: a != uuid,
self.completed_tasks))
def list_completed(self):
return self.completed_tasks
def list_all(self):
return list(self.catalog_tasks)
def get_all_schema(self, uuid):
return self.catalog_tasks[uuid].get('schema', {})
def get_merged_schema(self, uuid):
sc = self.get_all_schema(uuid)
if isinstance(sc, list):
sc = merge_info(sc)
sc = compss_wait_on(sc)
self.set_schema(uuid, sc.copy())
return sc
def set_schema(self, uuid, schema):
self.catalog_tasks[uuid]['schema'] = schema
def rm_schema(self, uuid):
self.catalog_tasks[uuid].pop('schema', None)
def set_new_task(self, uuid_task, args):
self.catalog_tasks[uuid_task] = args
def get_task_name(self, uuid_task):
return self.catalog_tasks[uuid_task].get('name', '')
def get_task_opt_type(self, uuid_task):
return self.catalog_tasks[uuid_task]['operation'].phi_category
def get_task_operation(self, uuid_task):
return self.catalog_tasks[uuid_task]['operation']
def get_task_parameters(self, uuid_task):
return self.catalog_tasks[uuid_task]['operation'].settings
def set_task_parameters(self, uuid_task, settings):
self.catalog_tasks[uuid_task]['operation'].settings = settings
def get_task_return(self, uuid_task):
return self.catalog_tasks[uuid_task].get('result', [])
def set_task_return(self, uuid_task, data):
self.catalog_tasks[uuid_task]['result'] = data
def rm_task_return(self, uuid_task):
data = self.get_task_return(uuid_task)
if check_serialization(data):
delete_result(data)
self.catalog_tasks[uuid_task]['result'] = None
def get_task_status(self, uuid_task):
return self.catalog_tasks[uuid_task].get('status', Status.STATUS_WAIT)
def set_task_status(self, uuid_task, status):
self.catalog_tasks[uuid_task]['status'] = status
if status == Status.STATUS_COMPLETED:
self.completed_tasks.append(uuid_task)
def get_task_parents(self, uuid_task):
return [i for i, o in self.dag.in_edges(uuid_task)]
def get_task_children(self, uuid_task):
return [o for i, o in self.dag.out_edges(uuid_task)]
def topological_sort(self):
return list(nx.algorithms.topological_sort(self.dag))
def add_task_parent(self, uuid_task, uuid_p):
self.dag.add_edge(uuid_p, uuid_task)
def remove_intermediate_node(self, node1, node2, node3):
self.dag.add_edge(node1, node3)
self.dag.remove_node(node2)
def remove_node(self, node1):
self.dag.remove_node(node1)
def change_node_position(self, node1, node2):
"""
Move node1 to above node2
:param node1:
:param node2:
:return:
"""
for p in self.get_task_parents(node1):
for c in self.get_task_children(node1):
self.dag.add_edge(p, c)
self.dag.remove_node(node1) # remove all edges
self.dag.add_node(node1)
for old_p in self.get_task_parents(node2):
self.dag.add_edge(old_p, node1)
self.dag.remove_edge(old_p, node2)
self.dag.add_edge(node1, node2)
def move_node_to_up(self, node1, node2):
"""
Move node2 to the node1's place
:param node1:
:param node2:
:return:
"""
for p in self.get_task_parents(node2):
self.dag.remove_edge(p, node2)
parents = self.get_task_parents(node1)
for p in parents:
self.dag.remove_edge(p, node1)
self.dag.add_edge(p, node2)
self.dag.add_edge(node2, node1)
def move_node_to_down(self, node1, node2):
"""
Move node2 to the node1's place
:param node1:
:param node2:
:return:
"""
for p in self.get_task_parents(node2):
for c in self.get_task_children(node2):
self.dag.add_edge(p, c)
self.dag.remove_node(node2)
self.dag.add_node(node2)
self.dag.add_edge(node1, node2)
def get_n_input(self, uuid_task):
return len(self.get_task_parents(uuid_task))
def get_task_sibling(self, uuid_task):
return self.catalog_tasks[uuid_task].get('sibling', [uuid_task])
def set_task_sibling(self, uuid_task, siblings):
if uuid_task not in self.catalog_tasks:
raise Exception('uuid "{}" not in '
'catalog_tasks'.format(uuid_task[:8]))
self.catalog_tasks[uuid_task]['sibling'] = siblings
def get_input_data(self, id_parents):
return [self.get_task_return(id_p) for id_p in id_parents]
# def get_info_condition(self, uuid_task):
# name_task = self.get_task_name(uuid_task)
# return self.task_definitions[name_task].get('schema', False)
def check_serialization(data):
"""
Check if output is a str file object (Future) or is a BufferIO.
:param data:
:return:
"""
if isinstance(data, list):
if len(data) > 0:
return isinstance(data[0], str)
return False
|
import random
import pickle
from pathlib import Path
from itertools import chain, islice, tee
from collections import deque
from pipelib import parallel
from pipelib import iterators
class Dataset:
def __init__(self, dataset):
if isinstance(dataset, Dataset):
self._dataset = dataset._dataset
else:
self._dataset = dataset
def __iter__(self):
yield from self._dataset
def get_prefetch_iterator(self, n_prefetch=1):
return iterators.PrefetchIterator(self, n_prefetch)
def apply(self, func):
return PipelinedDataset(self, func)
def repeat(self):
def f(dataset):
while True:
yield from dataset
return PipelinedDataset(self, f)
def batch(self, batch_size):
def f(dataset):
iterator = iter(dataset)
yield from iter(lambda: list(islice(iterator, batch_size)), [])
return PipelinedDataset(self, f)
def shuffle(self, shuffle_size):
def f(dataset):
iterator = iter(dataset)
for chunk in iter(lambda: list(islice(iterator, shuffle_size)), []):
random.shuffle(chunk)
yield from chunk
return PipelinedDataset(self, f)
def window(self, window_size):
def f(dataset):
yield from zip(*(deque(islice(it, i), 0) or it
for i, it in enumerate(tee(dataset, window_size))))
return PipelinedDataset(self, f)
def map(self, map_func):
def f(dataset):
return map(map_func, dataset)
return PipelinedDataset(self, f)
def flat_map(self, map_func):
def f(dataset):
return chain.from_iterable(map(map_func, dataset))
return PipelinedDataset(self, f)
def filter(self, predicate):
def f(dataset):
return filter(predicate, dataset)
return PipelinedDataset(self, f)
def zip(self, *others):
assert all(isinstance(other, Dataset) for other in others)
def f(dataset):
yield from zip(dataset, *others)
return PipelinedDataset(self, f)
def concat(self, *others):
assert all(isinstance(other, Dataset) for other in others)
def f(dataset):
yield from chain(dataset, *others)
return PipelinedDataset(self, f)
def map_parallel(self, map_func, n=None, chunksize=1, unordered=False):
return PipelinedDataset(
self, parallel.MapParallel(map_func, n, chunksize, unordered))
def flat_map_parallel(self, map_func, n=None, chunksize=1, unordered=False):
return PipelinedDataset(
self, parallel.FlatMapParallel(map_func, n, chunksize, unordered))
def filter_parallel(self, predicate, n=None, chunksize=1, unordered=False):
return PipelinedDataset(
self, parallel.FilterParallel(predicate, n, chunksize, unordered))
def all(self):
return list(self)
def take(self, n):
return list(islice(self, n))
def first(self):
return next(iter(self))
def save(self, filename):
cache = list(self)
with open(filename, 'wb') as f:
pickle.dump(cache, f)
return CacheDataset(self, cache)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
dataset = pickle.load(f)
return Dataset(dataset)
class _NestedFunc:
__slots__ = ['_prev_func', '_func']
def __init__(self, prev_func, func):
self._prev_func = prev_func
self._func = func
def _flatten_func(self, func):
if isinstance(func, _NestedFunc):
yield from self._flatten_func(func._prev_func)
yield from self._flatten_func(func._func)
else:
yield func
def __call__(self, dataset):
for func in self._flatten_func(self):
dataset = func(dataset)
return dataset
class PipelinedDataset(Dataset):
def __init__(self, dataset, func):
if not isinstance(dataset, PipelinedDataset):
self._func = func
else:
self._func = _NestedFunc(dataset._func, func)
super().__init__(dataset)
def __iter__(self):
yield from self._func(self._dataset)
class CacheDataset(PipelinedDataset):
def __init__(self, dataset, cache):
super().__init__(dataset, lambda x: x)
self._cache = cache
def __iter__(self):
yield from self._cache
class _Repeated:
__slots__ = ['_generator', '_args', '_kwargs']
def __init__(self, generator, *args, **kwargs):
self._generator = generator
self._args = args
self._kwargs = kwargs
def __iter__(self):
return self._generator(*self._args, **self._kwargs)
class TextDataset(Dataset):
def __init__(self, filepath, encoding='utf-8'):
filepath = Path(filepath)
assert filepath.is_file()
self._filepath = filepath
self._encoding = encoding
@property
def _dataset(self):
def g(filepath, encoding):
with filepath.open(encoding=encoding) as f:
for line in f:
yield line.rstrip()
return _Repeated(g, filepath=self._filepath, encoding=self._encoding)
class DirDataset(Dataset):
def __init__(self, dirpath, pattern='*'):
dirpath = Path(dirpath)
assert dirpath.is_dir()
self._dirpath = dirpath
self._pattern = pattern
@property
def _dataset(self):
def g(dirpath, pattern):
for path in dirpath.glob(pattern):
yield str(path)
return _Repeated(g, dirpath=self._dirpath, pattern=self._pattern)
|
# 导入包
import math
import time
from code_w.recommand.chapter4.database import Dataset
from code_w.recommand.chapter4.metric import Metric
# 定义装饰器,监控运行时间
def timmer(func):
def wrapper(*args, **kwargs):
start_time = time.time()
res = func(*args, **kwargs)
stop_time = time.time()
print('Func %s, run time: %s' % (func.__name__, stop_time - start_time))
return res
return wrapper
# 1. 基于热门标签的推荐
def SimpleTagBased(train, N):
'''
:params: train, 训练数据集
:params: N, 超参数,设置取TopN推荐物品数目
:return: GetRecommendation,推荐接口函数
'''
# 统计user_tags和tag_items
user_tags, tag_items = {}, {}
for user in train:
user_tags[user] = {}
for item in train[user]:
for tag in train[user][item]:
if tag not in user_tags[user]:
user_tags[user][tag] = 0
user_tags[user][tag] += 1
if tag not in tag_items:
tag_items[tag] = {}
if item not in tag_items[tag]:
tag_items[tag][item] = 0
tag_items[tag][item] += 1
def GetRecommendation(user):
# 按照打分推荐N个未见过的
if user not in user_tags:
return []
seen_items = set(train[user])
item_score = {}
for tag in user_tags[user]:
for item in tag_items[tag]:
if item in seen_items:
continue
if item not in item_score:
item_score[item] = 0
item_score[item] += user_tags[user][tag] * tag_items[tag][item]
item_score = list(sorted(item_score.items(), key=lambda x: x[1], reverse=True))
return item_score[:N]
return GetRecommendation
# 2. 改进一:为热门标签加入惩罚项
def TagBasedTFIDF(train, N):
'''
:params: train, 训练数据集
:params: N, 超参数,设置取TopN推荐物品数目
:return: GetRecommendation,推荐接口函数
'''
# 统计user_tags和tag_items
user_tags, tag_items = {}, {}
# 统计标签的热门程度,即打过此标签的不同用户数
tag_pop = {}
for user in train:
user_tags[user] = {}
for item in train[user]:
for tag in train[user][item]:
if tag not in user_tags[user]:
user_tags[user][tag] = 0
user_tags[user][tag] += 1
if tag not in tag_items:
tag_items[tag] = {}
if item not in tag_items[tag]:
tag_items[tag][item] = 0
tag_items[tag][item] += 1
if tag not in tag_pop:
tag_pop[tag] = set()
tag_pop[tag].add(user)
tag_pop = {k: len(v) for k, v in tag_pop.items()}
# tag_pop = {k: math.log(1 + len(v))for k, v in tag_pop.items()}
def GetRecommendation(user):
# 按照打分推荐N个未见过的
if user not in user_tags:
return []
seen_items = set(train[user])
item_score = {}
for tag in user_tags[user]:
for item in tag_items[tag]:
if item in seen_items:
continue
if item not in item_score:
item_score[item] = 0
item_score[item] += user_tags[user][tag] * tag_items[tag][item] / tag_pop[tag]
item_score = list(sorted(item_score.items(), key=lambda x: x[1], reverse=True))
return item_score[:N]
return GetRecommendation
# 3. 改进二:同时也为热门商品加入惩罚项
def TagBasedTFIDF_Improved(train, N):
'''
:params: train, 训练数据集
:params: N, 超参数,设置取TopN推荐物品数目
:return: GetRecommendation,推荐接口函数
'''
# 统计user_tags和tag_items
user_tags, tag_items = {}, {}
# 统计标签和物品的热门程度,即打过此标签的不同用户数,和物品对应的不同用户数
tag_pop, item_pop = {}, {}
for user in train:
user_tags[user] = {}
for item in train[user]:
if item not in item_pop:
item_pop[item] = 0
item_pop[item] += 1
for tag in train[user][item]:
if tag not in user_tags[user]:
user_tags[user][tag] = 0
user_tags[user][tag] += 1
if tag not in tag_items:
tag_items[tag] = {}
if item not in tag_items[tag]:
tag_items[tag][item] = 0
tag_items[tag][item] += 1
if tag not in tag_pop:
tag_pop[tag] = set()
tag_pop[tag].add(user)
tag_pop = {k: len(v) for k, v in tag_pop.items()}
# tag_pop = {k: math.log(1+len(v)) for k, v in tag_pop.items()}
def GetRecommendation(user):
# 按照打分推荐N个未见过的
if user not in user_tags:
return []
seen_items = set(train[user])
item_score = {}
for tag in user_tags[user]:
for item in tag_items[tag]:
if item in seen_items:
continue
if item not in item_score:
item_score[item] = 0
# item_score[item] += user_tags[user][tag] * tag_items[tag][item] / tag_pop[tag] / math.log(item_pop[item]+1)
item_score[item] += user_tags[user][tag] * tag_items[tag][item] / tag_pop[tag] / item_pop[item]
item_score = list(sorted(item_score.items(), key=lambda x: x[1], reverse=True))
return item_score[:N]
return GetRecommendation
# 4. 基于标签改进的推荐
def ExpandTagBased(train, N, M=20):
'''
:params: train, 训练数据集
:params: N, 超参数,设置取TopN推荐物品数目
:params: M,超参数,设置取TopM的标签填补不满M个标签的用户
:return: GetRecommendation,推荐接口函数
'''
# 1. 计算标签之间的相似度
item_tag = {}
for user in train:
for item in train[user]:
if item not in item_tag:
item_tag[item] = set()
for tag in train[user][item]:
item_tag[item].add(tag)
tag_sim, tag_cnt = {}, {}
for item in item_tag:
for u in item_tag[item]:
if u not in tag_cnt:
tag_cnt[u] = 0
tag_cnt[u] += 1
if u not in tag_sim:
tag_sim[u] = {}
for v in item_tag[item]:
if u == v:
continue
if v not in tag_sim[u]:
tag_sim[u][v] = 0
tag_sim[u][v] += 1
for u in tag_sim:
for v in tag_sim[u]:
tag_sim[u][v] /= math.sqrt(tag_cnt[u] * tag_cnt[v])
# 2. 为每个用户扩展标签
user_tags = {}
for user in train:
if user not in user_tags:
user_tags[user] = {}
for item in train[user]:
for tag in train[user][item]:
if tag not in user_tags[user]:
user_tags[user][tag] = 0
user_tags[user][tag] += 1
expand_tags = {}
for user in user_tags:
if len(user_tags[user]) >= M:
expand_tags[user] = user_tags[user]
continue
# 不满M个的进行标签扩展
expand_tags[user] = {}
seen_tags = set(user_tags[user])
for tag in user_tags[user]:
for t in tag_sim[tag]:
if t in seen_tags:
continue
if t not in expand_tags[user]:
expand_tags[user][t] = 0
expand_tags[user][t] += user_tags[user][tag] * tag_sim[tag][t]#相关性加权,生成新的tag权值
expand_tags[user].update(user_tags[user])
expand_tags[user] = dict(list(sorted(expand_tags[user].items(), key=lambda x: x[1], reverse=True))[:M])
# 3. SimpleTagBased算法
tag_items = {}
for user in train:
for item in train[user]:
for tag in train[user][item]:
if tag not in tag_items:
tag_items[tag] = {}
if item not in tag_items[tag]:
tag_items[tag][item] = 0
tag_items[tag][item] += 1
def GetRecommendation(user):
# 按照打分推荐N个未见过的
if user not in user_tags:
return []
seen_items = set(train[user])
item_score = {}
for tag in expand_tags[user]:
for item in tag_items[tag]:
if item in seen_items:
continue
if item not in item_score:
item_score[item] = 0
item_score[item] += expand_tags[user][tag] * tag_items[tag][item]
item_score = list(sorted(item_score.items(), key=lambda x: x[1], reverse=True))
return item_score[:N]
return GetRecommendation
class Experiment():
def __init__(self, M, N, fp='../../../data/hetrec2011-delicious-2k/user_taggedbookmarks.dat', rt='SimpleTagBased'):
'''
:params: M, 进行多少次实验
:params: N, TopN推荐物品的个数
:params: fp, 数据文件路径
:params: rt, 推荐算法类型
'''
self.M = M
self.N = N
self.fp = fp
self.rt = rt
self.alg = {'SimpleTagBased': SimpleTagBased, 'TagBasedTFIDF': TagBasedTFIDF, \
'TagBasedTFIDF_Improved': TagBasedTFIDF_Improved, 'ExtendTagBased': ExpandTagBased}
# 定义单次实验
@timmer
def worker(self, train, test):
'''
:params: train, 训练数据集
:params: test, 测试数据集
:return: 各指标的值
'''
getRecommendation = self.alg[self.rt](train, self.N)
metric = Metric(train, test, getRecommendation)
return metric.eval()
# 多次实验取平均
@timmer
def run(self):
metrics = {'Precision': 0, 'Recall': 0,
'Coverage': 0, 'Diversity': 0,
'Popularity': 0}
dataset = Dataset(self.fp)
for ii in range(self.M):
train, test = dataset.splitData(self.M, ii)
print('Experiment {}:'.format(ii))
metric = self.worker(train, test)
metrics = {k: metrics[k] + metric[k] for k in metrics}
metrics = {k: metrics[k] / self.M for k in metrics}
print('Average Result (M={}, N={}): {}'.format(self.M, self.N, metrics))
# # 1. SimpleTagBased实验
# M, N = 10, 10
# exp = Experiment(M, N, rt='SimpleTagBased')
# exp.run()
# 2. TagBasedTFIDF实验
# M, N = 10, 10
# exp = Experiment(M, N, rt='TagBasedTFIDF')
# exp.run()
# 3. TagBasedTFIDF++实验
M, N = 10, 10
exp = Experiment(M, N, rt='TagBasedTFIDF_Improved')
exp.run()
# # 4. TagExtend实验
# M, N = 10, 10
# exp = Experiment(M, N, rt='ExtendTagBased')
# exp.run()
|
import re
import json
from datetime import datetime
import urllib.request
import urllib.parse
import itertools
from functools import reduce
# story types
CHORE = "chore"
BUG = "bug"
FEATURE = "feature"
# story states
UNSCHEDULED = "unscheduled"
UNSTARTED = "unstarted"
STARTED = "started"
FINISHED = "finished"
DELIVERED = "delivered"
REJECTED = "rejected"
ACCEPTED = "accepted"
api_base = "https://www.pivotaltracker.com/services/v5/"
token = None
def set_token(t):
global token
token = t
def parse_date(date_str):
curr = None
try:
curr = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ")
except:
pass
if curr is None:
print("Possibly invalid date format: %s" % date_str)
return curr
class UnknownPropertyException(Exception):
pass
class NoTokenException(Exception):
pass
class Base():
json = None
def __init__(self, json):
self.json = json
def __getattr__(self, attr):
if attr in self.json.keys():
return self.json[attr]
raise UnknownPropertyException("%s has no attribute '%s'" % (self.__class__.__name__, attr))
@staticmethod
def fetch(path, parser=None):
if not token:
raise NoTokenException('No token had been set!')
opener = urllib.request.build_opener()
opener.addheaders = [('X-TrackerToken', token)]
urllib.request.install_opener(opener)
response = urllib.request.urlopen(api_base + path).read()
json_object = json.loads(response.decode('utf-8'))
return json_object if parser is None else parser(json_object)
class Project(Base):
_epics = None
def __getattr__(self, attr):
if attr in ['epics']:
if self._epics is None:
self._epics = Epic.fetch_all(self.id)
return self._epics
return super().__getattr__(attr)
@staticmethod
def fetch_all():
return Project.fetch('projects', lambda ps: [Project(p) for p in ps])
class Epic(Base):
_priority = None
_stories = None
_activities = None
_label = None
def __init__(self, priority, *args, **kwargs):
super().__init__(*args, **kwargs)
self._priority = priority
def __getattr__(self, attr):
if attr in ['priority']:
return self._priority
if attr in ['created_at', 'updated_at']:
return parse_date(self.json[attr])
if attr in ['label']:
if self._label is None:
self._label = Label(self.json['label'])
return self._label
if attr in ['stories']:
if self._stories is None:
self._stories = Story.fetch_all(self.project_id, self.label.name)
return self._stories
if attr in ['activities']:
if self._activities is None:
self._activities = Activity.fetch_all(self.project_id, self.id)
return self._activities
return super().__getattr__(attr)
def get_estimate(self):
total = 0
ongoing = 0
accepted = 0
# Note: we aren't consurned about unscheduled (I.e. iceboxed) stories
for s in self.stories:
if not s.is_feature():
continue
k = s.current_state
if k != UNSCHEDULED:
total = total + s.get_points()
if k in [STARTED, FINISHED, DELIVERED]:
ongoing = ongoing + s.get_points()
if k == ACCEPTED:
accepted = accepted + s.get_points()
return (total, ongoing, accepted)
def get_most_recent_estimate(self):
if self.engman_data is None or 'estimates' not in self.engman_data.keys():
return None
est = 0
curr = datetime.strptime("1982-05-03 23:00", "%Y-%m-%d %H:%M") #random old date
for estimate in self.engman_data['estimates']:
est_date = Epic.parse_date(estimate['datetime'])
if curr < est_date:
est = estimate['size']
curr = est_date
return est
def get_launch(self):
if self.engman_data is None or 'launch' not in self.engman_data.keys():
return None
return self.parse_date(self.engman_data['launch'])
@staticmethod
def fetch_all(project_id):
"""Fetches all epics in project"""
return Epic.fetch('projects/' + str(project_id) + '/epics', lambda es: [Epic(i + 1, e) for i, e in enumerate(es)])
def has_outstanding_stories(self, story_fetcher):
self.ensure_stories_loaded(story_fetcher)
for s in self.stories:
if s.get_state() not in [UNSCHEDULED, ACCEPTED]:
return True
def get_story_distribution(self):
stats = {}
stats[UNSCHEDULED] = 0
stats[UNSTARTED] = 0
stats[STARTED] = 0
stats[FINISHED] = 0
stats[DELIVERED] = 0
stats[REJECTED] = 0
stats[ACCEPTED] = 0
for s in self.stories:
stats[s.current_state] = stats[s.current_state] + 1
return stats
def get_links(self):
if self.engman_data is None or 'links' not in self.engman_data.keys():
return None
return [ "%s: %s" % (name, self.engman_data['links'][name]) for name in self.engman_data['links']]
class Activity(Base):
_person = None
@staticmethod
def fetch_all(project_id, epic_id):
endpoint = '/projects/%s/epics/%s/activity' % (project_id, epic_id)
return Activity.fetch(endpoint, lambda aa: [Activity(a) for a in aa])
def __getattr__(self, attr):
if attr in ['performed_by']:
if self._person is None:
self._person = Person(super().__getattr__('performed_by'))
return self._person
return super().__getattr__(attr)
class Person(Base):
pass
class Label(Base):
pass
class Story(Base):
def __getattr__(self, attr):
if attr in ['accepted_at']:
return parse_date(self.json[attr])
return super().__getattr__(attr)
@staticmethod
def fetch_all(project_id, epic_label):
endpoint = 'projects/%s/stories?with_label=%s' % (str(project_id), urllib.parse.quote(epic_label))
return Story.fetch(endpoint, lambda ss: [Story(s) for s in ss])
def is_active(self):
pass
def get_points(self):
return self.estimate if self.has_estimate() else 0
def is_chore(self):
return self.story_type == CHORE
def is_bug(self):
return self.story_type == BUG
def is_feature(self):
return self.story_type == FEATURE
def has_estimate(self):
return 'estimate' in self.json.keys()
|
import knocker
import vk_api
from bs4 import BeautifulSoup
import requests
import datetime
import time
import os
import Farseer
import json
class SheduleBot:
def __init__(self):
self.days = ['', '']
self.group = None
pass
def main(self, targetUrl: str):
if targetUrl == "":
url = "http://time-rtu.ru/?group=%D0%91%D0%91%D0%91%D0%9E-05-18"
else:
d = datetime.date.today()
a = str(d)
a = f""
url = targetUrl
if d.day < 10:
a += f'0{str(d.day)}.'
else:
a += f'{str(d.day)}.'
if d.month < 10:
a += f'0{str(d.month)}.'
else:
a += f'{str(d.month)}.'
a += f'{str(d.year)}'
url += f'#{a}'
self.date = a
headers = {'accept': "*/*",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0",
"referer": url}
session = requests.Session()
rawHtml = session.get(url=url, headers=headers)
preParsedHtml = BeautifulSoup(rawHtml.content, "html.parser")
i = 0
cards = preParsedHtml.find_all('div', attrs={"id": "card"})
for card in preParsedHtml.find_all('div', attrs={"id": "card"}):
if i < 2:
try:
if self.days[i] == "":
if card.contents[1].attrs['name'] == str(self.date):
date = card.contents[1].attrs['name']
self.days[i] += (date + "\n")
print(date)
for _lesson in card.contents[5].contents[1]:
if _lesson != "\n" and _lesson != 'Выходной':
print (_lesson.text)
time = _lesson.contents[1].text.replace('\n', '').replace(" ", '')
print(time)
self.days[i] += (time + "\n")
for lesson in _lesson.contents:
if (lesson != '\n') and (len(lesson.contents) > 1):
subject = lesson.contents[1].text.replace('\n', '').replace(" ", '')
auditory = lesson.contents[3].text.replace('\n', '').replace(" ", '')
teacher = lesson.contents[5].text.replace('\n', '').replace(" ", '')
border = ''
if len(lesson.contents) == 9:
border = lesson.contents[7].text.replace('\n', '').replace(" ", '')
self.days[i] += (subject + '\n' + auditory +
'\n' + teacher + '\n' + border + '\n')
print(subject)
print(auditory)
print(teacher)
print(border)
if subject == '- - - - - - - - - - - - - - - - - - - - - - - ':
if self.group["finalClassEnds"] == "":
self.group["finalClassEnds"] = time.replace(" ", "")
# self.group["finalClassEnds"] = "10:44"
else:
pass
if time == '18:10':
if self.group['finalClassEnds'] == '':
self.group['finalClassEnds'] = "19:40"
pass
elif _lesson == 'Выходной':
if i == 1:
self.days[i] = "Завтра пар нет. Кути, бухай, еби гусей!\n\n"
elif i == 0:
self.days[i] = "Сегодня пар нет. Кути, бухай, еби гусей!\n\n"
self.group['finalClassEnds'] = "18:00"
i += 1
d = datetime.date.today()
a = f""
url = targetUrl
if d.day+1 < 10:
a += f'0{str(d.day+1)}.'
else:
a += f'{str(d.day+1)}.'
if d.month < 10:
a += f'0{str(d.month)}.'
else:
a += f'{str(d.month)}.'
a += f'{str(d.year)}'
url += f'#{a}'
self.date = a
except Exception as e:
print(str(e))
pass
pass
else:
break
pass
print(os.getpid())
token = open("./token.token", 'r').readline()
bot = knocker.Knocker(token=token)
Farseer.SpawnConfig(name = "SheduleBot", peerId = 160500068)
sheduleBot = SheduleBot()
while True:
m = datetime.datetime.now().minute
if m >= 10:
currentTime = str(datetime.datetime.now().hour) + ":" + str(datetime.datetime.now().minute)
else:
currentTime = str(datetime.datetime.now().hour) + ":0" + str(datetime.datetime.now().minute)
config = json.load(open('./config.json', 'r'))
for group in config["Groups"]:
sheduleBot.group = group
if currentTime == "6:00" and group["finalClassEnds"] == '':
sheduleBot.main(targetUrl= group['url'])
for peer in group['peers']:
print("sent to " + str(peer))
bot.SendMsg(messageText=sheduleBot.days[0], peerId=peer)
pass
elif currentTime == group["finalClassEnds"]:
sheduleBot.main(group['url'])
for peer in group['peers']:
bot.SendMsg(messageText=sheduleBot.days[1], peerId=peer)
pass
group["finalClassEnds"] = ""
json.dump(config, open("./config.json", "w"))
time.sleep(10)
|
<reponame>DanielMiao1/ChessGraphics
# -*- coding: utf-8 -*-
"""
board.py
Chess Board Graphics
"""
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from chess.functions import *
class Promotion(QLabel):
def __init__(self, parent, piece_symbol, piece, color):
super(Promotion, self).__init__(parent)
self.setCursor(Qt.PointingHandCursor)
self.piece_symbol, self.piece, self.color = piece_symbol, piece, color
self.hide()
def mouseReleaseEvent(self, event):
if self.parent().piece is not None:
self.parent().move_name.name = self.parent().move_name.name[:-1] + self.piece_symbol
self.parent().move_name.name = self.parent().move_name.promotion = self.piece_symbol
self.parent().piece.movePiece(self.parent().move_name, promotion=True)
super(Promotion, self).mouseReleaseEvent(event)
def resizeEvent(self, event):
self.setPixmap(QPixmap("images/standard/" + self.color + "_" + self.piece + ".png").scaled(event.size().width(), event.size().width()))
super(Promotion, self).resizeEvent(event)
class Promotions(QPushButton):
def __init__(self, parent, color):
super(Promotions, self).__init__(parent)
self.piece = self.move_name = None
self.promotions = []
self.color = color
for x, y in self.parent().game.properties["promotions"].items():
self.promotions.append(Promotion(self, x, y, color))
self.setStyleSheet("border: none; background: rgba(0, 0, 0, 0.2);")
self.hide()
def showEvent(self, event):
for i in self.promotions:
i.show()
super(Promotions, self).showEvent(event)
def hideEvent(self, event):
for i in self.promotions:
i.hide()
super(Promotions, self).hideEvent(event)
def updatePosition(self, position):
self.move(position)
self.resize(QSize((self.parent().parent().width() // 25), (self.parent().parent().width() // 25) * len(self.promotions)))
for x, y in enumerate(self.promotions):
y.move(QPoint(0, 0 + (self.parent().parent().width() // 25 * x)))
y.resize(QSize(self.parent().parent().width() // 25, self.parent().parent().width() // 25))
self.raise_()
class MoveBullet(QLabel):
def __init__(self, parent, piece, move, position) -> None:
super(MoveBullet, self).__init__(parent)
self.hide()
self.piece = piece
self.position = position
self.setCursor(Qt.PointingHandCursor)
self.setPixmap(QPixmap("images/bullet.png").scaled(self.parent().parent().width() // 25, self.parent().parent().width() // 25))
self.resize(QSize(self.parent().parent().width() // 25, self.parent().parent().width() // 25))
self.move((coordinateToIndex(self.position)[1] + 1) * (self.parent().parent().width() // 25), (coordinateToIndex(self.position)[0] + 1) * (self.parent().parent().width() // 25))
self.move_ = self.move
self.move = move
def enterEvent(self, event: QHoverEvent) -> None:
self.setStyleSheet("background-color: rgba(12, 36, 255, 0.5)")
super(MoveBullet, self).enterEvent(event)
def leaveEvent(self, event: QHoverEvent) -> None:
self.setStyleSheet("background-color: transparent")
super(MoveBullet, self).leaveEvent(event)
def mousePressEvent(self, event) -> None:
if event.button() == Qt.LeftButton:
self.piece.movePiece(self.move)
super(MoveBullet, self).mousePressEvent(event)
def resizeEvent(self, event):
self.setPixmap(QPixmap("images/bullet.png").scaled(event.size().width(), event.size().width()))
self.move_((coordinateToIndex(self.position)[1] + 1) * event.size().width(), (coordinateToIndex(self.position)[0] + 1) * event.size().width())
super(MoveBullet, self).resizeEvent(event)
class Piece(QLabel):
def __init__(self, parent, position, color, piece) -> None:
super(Piece, self).__init__(parent=parent)
self.piece, self.color = piece, color
self.position = position
self.move_animation = None
self.moves_loaded = True
self.moves = [MoveBullet(self.parent(), self, i, i.new_position) for i in self.parent().game.pieceAt(self.position).moves(show_data=True, evaluate_checks=self.parent().parent().variant == "Three Check")]
self.showing_moves = False
self.dragging = False
self.setCursor(Qt.PointingHandCursor)
self.setPixmap(QPixmap("images/standard/" + color + "_" + piece).scaled(self.parent().parent().width() // 25, self.parent().parent().width() // 25))
self.resize(QSize(self.parent().parent().width() // 25, self.parent().parent().width() // 25))
self.setFocusPolicy(Qt.ClickFocus)
def showMoves(self, change_background=True):
if self.parent() is None:
return
if self.parent().game.game_over:
return
if change_background:
self.setStyleSheet("background-color: rgba(86, 12, 255, 0.5);")
for x in self.parent().pieces:
if x.showing_moves:
x.showing_moves = False
x.setStyleSheet("background-color: transparent;")
for y in x.moves:
y.hide()
if self.parent().game.turn == self.color:
if not self.moves_loaded:
self.moves = [MoveBullet(self.parent(), self, i, i.new_position) for i in self.parent().game.pieceAt(self.position).moves(show_data=True, evaluate_checks=self.parent().parent().variant == "Three Check")]
self.moves_loaded = True
for i in self.moves:
i.show()
def moveEvent(self, event):
if not self.dragging:
self.original_position = event.pos()
super(Piece, self).moveEvent(event)
def mousePressEvent(self, event) -> None:
if event.button() == Qt.LeftButton:
if self.color != self.parent().game.turn or (self.parent().parent().type_ == "computer" and self.color != self.parent().parent().player_color) or (self.parent().parent().type_ == "computer" and self.parent().parent().computer_moving):
for x in self.parent().pieces:
if x.showing_moves:
for y in x.moves:
y.hide()
x.showing_moves = False
x.setStyleSheet("background-color: transparent;")
self.setStyleSheet("background-color: rgba(86, 12, 255, 0.5);")
super(Piece, self).mousePressEvent(event)
self.mouse_event_start = self.mouse_event_position = None
return
if event.button() == Qt.MouseButton.RightButton:
self.showing_moves = False
self.setStyleSheet("background-color: transparent;")
for i in self.moves:
i.hide()
for i in self.parent().squares:
if i.position == self.position:
i.mousePressEvent(event, remove_move_bullets=False)
break
self.mouse_event_start = self.mouse_event_position = None
if event.button() == Qt.LeftButton:
self.mouse_event_start, self.mouse_event_position = event.globalPos(), event.globalPos()
super(Piece, self).mousePressEvent(event)
def mouseMoveEvent(self, event) -> None:
if event.buttons() == Qt.LeftButton and not self.parent().parent().game_over and self.mouse_event_start is not None and self.mouse_event_position is not None:
if self.dragging == False:
self.dragging = True
self.parent().drag_square = Square(self.parent(), "rgba(86, 12, 255, 0.5);", self.position)
self.parent().drag_square.move(self.pos())
self.parent().drag_square.show()
self.raise_()
if self.showing_moves:
self.setStyleSheet("background-color: transparent;")
else:
self.showMoves(False)
self.showing_moves = True
self.raise_()
self.move(self.mapFromGlobal(self.mapToGlobal(self.pos()) + event.globalPos() - self.mouse_event_position))
self.mouse_event_position = event.globalPos()
super(Piece, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event) -> None:
super(Piece, self).mousePressEvent(event)
if self.dragging:
self.parent().drag_square.deleteLater()
self.parent().drag_square = None
self.dragging = False
if (event.globalPos() - self.mouse_event_start).manhattanLength() < 50:
self.move(self.original_position)
self.showMoves()
self.showing_moves = True
self.setStyleSheet("background-color: rgba(86, 12, 255, 0.5);")
return
for i in self.moves:
if event.globalPos() in QRect(self.parent().mapToGlobal(i.pos()), i.size()):
if i.move.name in self.parent().game.legal_moves():
self.movePiece(i.move, animate=False)
self.showing_moves = False
for i in self.moves:
i.hide()
self.move(self.original_position)
return
if event.button() == Qt.LeftButton:
if self.showing_moves:
self.setStyleSheet("background-color: transparent;")
if not (self.parent().parent().type_ == "computer" and self.color != self.parent().parent().player_color) and not (self.parent().parent().type_ == "computer" and self.parent().parent().computer_moving):
for i in self.moves:
i.hide()
else:
if not (self.parent().parent().type_ == "computer" and self.color != self.parent().parent().player_color) and not (self.parent().parent().type_ == "computer" and self.parent().parent().computer_moving):
self.showMoves()
self.showing_moves = not self.showing_moves
def movePiece(self, move, animate=True, promotion=False) -> None:
if self.parent() is None:
return
if self.parent().parent().game_over:
return
self.setStyleSheet("background-color: transparent;")
if self.piece == "pawn":
if self.color == "white":
if move.new_position[1] == "8":
if promotion:
self.parent().promotion_dialog_white.hide()
else:
self.parent().promotion_dialog_white.move_name = move
self.parent().promotion_dialog_white.piece = self
self.parent().promotion_dialog_white.updatePosition(QPoint((coordinateToIndex(move.new_position)[1] + 1) * (self.parent().parent().width() // 25), (coordinateToIndex(move.new_position)[0] + 1) * (self.parent().parent().width() // 25)))
self.parent().promotion_dialog_white.show()
return
else:
if move.new_position[1] == "1":
if promotion:
self.parent().promotion_dialog_black.hide()
else:
self.parent().promotion_dialog_black.move_name = move
self.parent().promotion_dialog_black.piece = self
self.parent().promotion_dialog_black.updatePosition(QPoint((coordinateToIndex(move.new_position)[1] + 1) * (self.parent().parent().width() // 25), (coordinateToIndex(move.new_position)[0] + 1) * (self.parent().parent().width() // 25)))
self.parent().promotion_dialog_black.show()
return
if move.is_capture:
for i in self.parent().pieces:
i.moves_loaded = False
if i.position == move.captured_piece.position:
i.setParent(None)
else:
for i in self.parent().pieces:
i.moves_loaded = False
self.position = move.new_position
if animate:
self.move_animation = QPropertyAnimation(self, b"pos")
self.move_animation.setEndValue(QPoint((coordinateToIndex(self.position)[1] + 1) * (self.parent().parent().width() // 25), (coordinateToIndex(self.position)[0] + 1) * (self.parent().parent().width() // 25)))
self.move_animation.setDuration({"Default": 100, "Slow": 225, "Fast": 50}[self.parent().parent().settings_values["piece-animation-speed"]])
self.move_animation.start()
else:
self.move(QPoint((coordinateToIndex(self.position)[1] + 1) * (self.parent().parent().width() // 25), (coordinateToIndex(self.position)[0] + 1) * (self.parent().parent().width() // 25)))
if move.castle is not None:
if animate:
self.parent().castle_rook_animation = QPropertyAnimation(self.parent().pieceAt(move.castle_rook.position), b"pos")
if move.castle == "kingside":
if animate:
self.parent().castle_rook_animation.setEndValue(QPoint(6 * (self.parent().parent().width() // 25), (coordinateToIndex(move.castle_rook.position)[0] + 1) * (self.parent().parent().width() // 25)))
else:
self.parent().pieceAt(move.castle_rook.position).move(QPoint(6 * (self.parent().parent().width() // 25), (coordinateToIndex(move.castle_rook.position)[0] + 1) * (self.parent().parent().width() // 25)))
self.parent().pieceAt(move.castle_rook.position).position = "f" + move.old_position[1]
else:
if animate:
self.parent().castle_rook_animation.setEndValue(QPoint(4 * (self.parent().parent().width() // 25), (coordinateToIndex(move.castle_rook.position)[0] + 1) * (self.parent().parent().width() // 25)))
else:
self.parent().pieceAt(move.castle_rook.position).move(QPoint(4 * (self.parent().parent().width() // 25), (coordinateToIndex(move.castle_rook.position)[0] + 1) * (self.parent().parent().width() // 25)))
self.parent().pieceAt(move.castle_rook.position).position = "d" + move.old_position[1]
if animate:
self.parent().castle_rook_animation.setDuration({"Default": 100, "Slow": 225, "Fast": 50}[self.parent().parent().settings_values["piece-animation-speed"]])
self.parent().castle_rook_animation.start()
self.parent().game.move(move)
if self.parent().parent().variant == "Atomic":
if move.is_capture:
squares = self.parent().parent().game.generateExplosionRadius(move.new_position)
if squares:
for i in self.parent().pieces[:]:
if i.piece == "pawn":
continue
if i.position in squares:
self.parent().pieces.remove(i)
i.deleteLater()
self.parent().white_king.setStyleSheet("background-color: transparent;")
self.parent().black_king.setStyleSheet("background-color: transparent;")
if self.parent().game.in_check:
if self.parent().game.turn == "white":
self.parent().white_king.setStyleSheet("background-color: #e96160;")
else:
self.parent().black_king.setStyleSheet("background-color: #e96160;")
for i in self.moves:
i.deleteLater()
self.moves = []
self.parent().parent().parent().parent().setWindowTitle("2-Player Chess Game: " + self.parent().game.turn.title() + " to move")
self.parent().parent().addMove(move.name)
if promotion:
self.piece = move.piece.piece_type
self.setPixmap(QPixmap("images/standard/" + self.color + "_" + self.piece).scaled(self.width(), self.width()))
def resizeEvent(self, event):
self.setPixmap(QPixmap("images/standard/" + self.color + "_" + self.piece).scaled(event.size().width(), event.size().width()))
super(Piece, self).resizeEvent(event)
class Square(QPushButton):
def __init__(self, parent, color, position) -> None:
super(Square, self).__init__(parent=parent)
self.resize(QSize((self.parent().parent().width() // 25), (self.parent().parent().width() // 25)))
self.setStyleSheet(f"background-color: {color}; border: none;")
self.position = position
self.highlight_square = QPushButton(self.parent())
self.highlight_square.setStyleSheet("background-color: rgba(0, 174, 255, 0.5); border: none;")
self.highlight_square.resize(self.size())
self.highlight_square.mousePressEvent = self.mousePressEvent
self.highlight_square.hide()
self.setFocusPolicy(Qt.ClickFocus)
def highlight(self):
if self.highlight_square.isHidden():
self.highlight_square.show()
else:
self.highlight_square.hide()
def mousePressEvent(self, event, remove_move_bullets=True) -> None:
if event.button() == Qt.MouseButton.LeftButton:
for i in self.parent().squares:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
if remove_move_bullets:
for x in self.parent().pieces:
if x.showing_moves:
for y in x.moves:
y.hide()
x.showing_moves = False
x.setStyleSheet("background-color: transparent;")
else:
self.highlight()
super(Square, self).mousePressEvent(event)
def moveEvent(self, event):
self.highlight_square.move(event.pos())
super(Square, self).moveEvent(event)
def resizeEvent(self, event):
self.highlight_square.resize(event.size())
super(Square, self).resizeEvent(event)
class Board(QWidget):
def __init__(self, parent, game) -> None:
super(Board, self).__init__(parent=parent)
self.game = game
self.squares, self.pieces = [], []
self.drag_square = None
self.castle_rook_animation = None
self.promotion_dialog_white, self.promotion_dialog_black = Promotions(self, "white"), Promotions(self, "black")
for x in self.game.squares:
for y in x:
self.squares.append(Square(self, parent.settings_values["light-square-color"] if y.color == "white" else parent.settings_values["dark-square-color"], y.position))
self.squares[-1].move((coordinateToIndex(y.position)[1] + 1) * (self.parent().width() // 25), (coordinateToIndex(y.position)[0] + 1) * (self.parent().width() // 25))
for i in self.game.pieces:
self.pieces.append(Piece(self, i.position, i.color, i.piece_type))
self.pieces[-1].move((coordinateToIndex(i.position)[1] + 1) * (self.parent().width() // 25), (coordinateToIndex(i.position)[0] + 1) * (self.parent().width() // 25))
if i.piece_type == "king":
if i.color == "white":
self.white_king = self.pieces[-1]
if i.color == "black":
self.black_king = self.pieces[-1]
self.setFocusPolicy(Qt.ClickFocus)
def updatePieces(self):
for i in self.pieces:
i.deleteLater()
self.pieces = []
for i in self.game.pieces:
self.pieces.append(Piece(self, i.position, i.color, i.piece_type))
self.pieces[-1].move((coordinateToIndex(i.position)[1] + 1) * (self.parent().width() // 25), (coordinateToIndex(i.position)[0] + 1) * (self.parent().width() // 25))
self.pieces[-1].show()
if i.piece_type == "king":
if i.color == "white":
self.white_king = self.pieces[-1]
if i.color == "black":
self.black_king = self.pieces[-1]
def evaluateMove(self, string):
try:
string = toSAN(string, self.game)
except:
return False
legal_moves = self.game.legal_moves(show_data=True)
for i in legal_moves:
if i.name == string:
self.pieceAt(i.old_position).movePiece(i)
return True
return False
@staticmethod
def generateTemporaryValidCharacters(string):
if not string:
return ["a", "b", "c", "d", "e", "f", "g", "h", "P", "N", "B", "R", "Q", "K"]
if string[-1] == "x":
return ["a", "b", "c", "d", "e", "f", "g", "h"]
if string[-1] in ["a", "b", "c", "d", "e", "f", "g", "h"]:
return ["1", "2", "3", "4", "5", "6", "7", "8", "x"]
if string[-1].isnumeric() and len([True for i in string if i.isnumeric()]) <= 1:
return ["a", "b", "c", "d", "e", "f", "g", "h", "x"]
if string[-1] in ["P", "N", "B", "R", "Q", "K"]:
return ["a", "b", "c", "d", "e", "f", "g", "h", "x"]
if string[-1].upper() == "O" and len(string) <= 4:
return ["-"]
if string[-1] == "-" and "O" in string.upper():
return ["O"]
return []
def keyPressEvent(self, event):
if self.parent().temporary_move is None:
if event.text() in ["a", "b", "c", "d", "e", "f", "g", "h", "P", "N", "B", "R", "Q", "K", "O"]:
self.parent().addTemporaryMove(event.text())
if event.text() in ["O", "0"]:
return
if event.text() in ["a", "b", "c", "d", "e", "f", "g", "h"]:
for i in self.squares:
if i.position[0] == event.text():
if i.highlight_square.isHidden():
i.highlight_square.show()
else:
positions = list(map(lambda x: str(x.position), self.game.pieceType({"P": "pawn", "N": "knight", "B": "bishop", "R": "rook", "Q": "queen", "K": "king"}[event.text()], color=self.game.turn)))
for i in self.squares:
if i.position in positions:
if i.highlight_square.isHidden():
i.highlight_square.show()
else:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
elif event.key() in [Qt.Key.Key_Backspace, Qt.Key.Key_Delete]:
self.parent().temporary_move.setText(self.parent().temporary_move.text()[:-1])
elif event.key() in [Qt.Key.Key_Return, Qt.Key.Key_Enter] and self.parent().temporary_move is not None:
temporary_move_text = self.parent().temporary_move.text()
self.parent().moves_layout.removeWidget(self.parent().temporary_move)
self.parent().move_buttons.remove(self.parent().temporary_move)
self.parent().temporary_move.deleteLater()
if self.evaluateMove(self.parent().temporary_move.text().replace("P", "")):
self.parent().temporary_move = None
for i in self.squares:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
else:
self.parent().addTemporaryMove(temporary_move_text)
elif event.key() == Qt.Key.Key_Escape:
self.parent().moves_layout.removeWidget(self.parent().temporary_move)
self.parent().move_buttons.remove(self.parent().temporary_move)
self.parent().temporary_move.deleteLater()
self.parent().temporary_move = None
for i in self.squares:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
elif event.text() in self.generateTemporaryValidCharacters(self.parent().temporary_move.text()):
self.parent().temporary_move.setText(self.parent().temporary_move.text() + event.text())
if event.text() in ["P", "N", "B", "R", "Q", "K"]:
positions = list(map(lambda x: str(x.position), self.game.pieceType({"P": "pawn", "N": "knight", "B": "bishop", "R": "rook", "Q": "queen", "K": "king"}[event.text()], color=self.game.turn)))
for i in self.squares:
if i.position in positions:
if i.highlight_square.isHidden():
i.highlight_square.show()
else:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
elif event.text() in ["a", "b", "c", "d", "e", "f", "g", "h"]:
for i in self.squares:
if i.position[0] == event.text():
if i.highlight_square.isHidden():
i.highlight_square.show()
else:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
elif event.text().isnumeric():
if self.pieceAt(self.parent().temporary_move.text()[-2:]) and self.pieceAt(self.parent().temporary_move.text()[-2:]).color == self.game.turn:
for i in self.squares:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
self.pieceAt(self.parent().temporary_move.text()[-2:]).showMoves()
self.pieceAt(self.parent().temporary_move.text()[-2:]).showing_moves = True
self.pieceAt(self.parent().temporary_move.text()[-2:]).setStyleSheet("background-color: rgba(86, 12, 255, 0.5);")
else:
for i in self.squares:
if i.position == self.parent().temporary_move.text()[-2:]:
if i.highlight_square.isHidden():
i.highlight_square.show()
else:
if not i.highlight_square.isHidden():
i.highlight_square.hide()
super(Board, self).keyPressEvent(event)
def pieceAt(self, position):
for i in self.pieces:
if i.position == position:
return i
return False
def resizeComponents(self):
self.resize(QSize(self.parent().width() // 25 * 9, self.parent().width() // 25 * 9))
for i in self.squares:
i.resize(QSize(self.parent().width() // 25, self.parent().width() // 25))
i.move((coordinateToIndex(i.position)[1] + 1) * (self.parent().width() // 25), (coordinateToIndex(i.position)[0] + 1) * (self.parent().width() // 25))
for x in self.pieces:
x.resize(QSize(self.parent().width() // 25, self.parent().width() // 25))
x.move((coordinateToIndex(x.position)[1] + 1) * (self.parent().width() // 25), (coordinateToIndex(x.position)[0] + 1) * (self.parent().width() // 25))
for y in x.moves:
y.resize(QSize(self.parent().width() // 25, self.parent().width() // 25))
|
<gh_stars>0
#!/usr/bin/env python3
import argparse
import os
import sys
import shutil
import subprocess
dir_path = os.path.dirname(os.path.realpath(__file__))
spack_version = 'v0.15.4'
spack_repo = 'https://github.com/spack/spack.git'
def main():
parser = argparse.ArgumentParser(
description=
'Small config script which can be used to install a spack instance with the correct configuration files and mch spack packages.'
)
parser.add_argument(
'-i',
'--idir',
type=str,
default=dir_path,
required=True,
help=
'Where the Spack instance is installed or you want it to be installed')
parser.add_argument('-m',
'--machine',
type=str,
required=True,
help='Required: machine name')
parser.add_argument('-u',
'--upstreams',
type=str,
default='ON',
choices=('ON', 'OFF'),
help='ON or OFF, install upstreams.yaml file')
parser.add_argument('-v',
'--version',
type=str,
default=spack_version,
help='Spack version, Default: ' + spack_version)
parser.add_argument('-r',
'--reposdir',
type=str,
help='repos.yaml install directory')
parser.add_argument(
'-p',
'--pckgidir',
type=str,
help=
'Define spack package, modules installation directory. Default: tsa; /scratch/$USER/spack, daint; /scratch/snx3000/$USER/spack'
)
parser.add_argument(
'-s',
'--stgidir',
type=str,
help=
'Define spack stages directory. Default: tsa; /scratch/$USER/spack, daint; /scratch/snx3000/$USER/spack'
)
parser.add_argument(
'-c',
'--cacheidir',
type=str,
help=
'Define spack caches (source and misc) directories. Default: ~/.spack/machine/source_cache and ~/.spack/machine/cache'
)
args = parser.parse_args()
if not os.path.isdir(args.idir + '/spack'):
print('Cloning spack instance to: ' + args.idir)
if args.version is None:
args.version = spack_version
cmd = 'git clone {repo} -b {branch} {dest_dir}'.format(
repo=spack_repo,
branch=args.version,
dest_dir=os.path.join(args.idir, 'spack'))
subprocess.run(cmd.split(), check=True)
print('Installing custom dev-build command')
shutil.copy('./tools/spack-scripting/scripting/cmd/dev_build.py',
args.idir + '/spack/lib/spack/spack/cmd/')
sys.path.insert(1, os.path.join(args.idir, 'spack/lib/spack/external'))
from ruamel import yaml
print('Installing mch packages & ' + args.machine + ' config files.')
if not args.reposdir:
args.reposdir = args.idir + '/spack/etc/spack'
# installing repos.yaml
if not os.path.isdir(args.reposdir):
raise OSError(
"repository directory requested with -r does not exists: " +
args.reposdir)
print('Installing repos.yaml on ' + args.reposdir)
shutil.copy(dir_path + '/sysconfigs/repos.yaml', args.reposdir)
reposfile = os.path.join(args.reposdir, 'repos.yaml')
repos_data = yaml.safe_load(open(reposfile, 'r'))
repos_data['repos'] = [dir_path]
yaml.safe_dump(repos_data, open(reposfile, 'w'), default_flow_style=False)
# configure config.yaml
# copy config.yaml file in site scope of spack instance
configfile = args.idir + '/spack/etc/spack' + '/config.yaml'
shutil.copy(
'sysconfigs/' + args.machine.replace('admin-', '') + '/config.yaml',
configfile)
config_data = yaml.safe_load(open(configfile, 'r'))
if not args.pckgidir:
if 'admin' in args.machine:
args.pckgidir = '/project/g110'
else:
args.pckgidir = '$SCRATCH'
if not args.stgidir:
args.stgidir = '$SCRATCH'
if not args.cacheidir:
args.cacheidir = '~/.spack'
def to_spack_abs_path(path: str) -> str:
# Spack paths support environment variables and `~` in paths, so we need to handle them separately.
# (see: https://spack.readthedocs.io/en/latest/configuration.html#config-file-variables )
# It's enough to check only the start
# (environment variables in the middle of a path are fine):
if path.startswith(("$", "~")):
# We assume environment variables to be absolute.
# (we can't really fix them anyways, since they could change)
return path
# convert to absolute path
return os.path.realpath(path)
config_data['config']['install_tree'] = (
to_spack_abs_path(args.pckgidir) + '/spack-install/' +
args.machine.replace('admin-', ''))
config_data['config']['source_cache'] = (
to_spack_abs_path(args.cacheidir) + '/' +
args.machine.replace('admin-', '') + '/source_cache')
config_data['config']['misc_cache'] = (to_spack_abs_path(args.cacheidir) +
'/' +
args.machine.replace('admin-', '') +
'/cache')
config_data['config']['build_stage'] = [
to_spack_abs_path(args.stgidir) + '/spack-stages/' + args.machine
]
config_data['config']['module_roots']['tcl'] = (
to_spack_abs_path(args.pckgidir) + '/modules/' + args.machine)
config_data['config']['extensions'] = [dir_path + '/tools/spack-scripting']
yaml.safe_dump(config_data,
open(configfile, 'w'),
default_flow_style=False)
# copy modified upstreams.yaml if not admin
if args.upstreams == 'ON':
upstreamfile = args.idir + '/spack/etc/spack' + '/upstreams.yaml'
shutil.copy('sysconfigs/upstreams.yaml', upstreamfile)
upstreams_data = yaml.safe_load(open(upstreamfile, 'r'))
upstreams_data['upstreams']['spack-instance-1']['install_tree'] = '/project/g110/spack-install/' + \
args.machine.replace('admin-', '')
yaml.safe_dump(upstreams_data,
open(upstreamfile, 'w'),
default_flow_style=False)
# copy modules.yaml, packages.yaml and compiles.yaml files in site scope of spack instance
config_files = ["compilers.yaml", "modules.yaml", "packages.yaml"]
for afile in config_files:
cmd = 'cp ' + dir_path + '/sysconfigs/' + args.machine.replace(
'admin-', '') + '/' + afile + ' ' + args.idir + '/spack/etc/spack/'
subprocess.run(cmd.split(), check=True)
print('Spack successfully installed. \nSource ' + args.idir +
'/spack/share/spack/setup-env.sh for setting up the instance.')
if __name__ == "__main__":
main()
|
<gh_stars>0
#!/usr/bin/env python
"""
udocker unit tests: OciLocalFileAPI
"""
from unittest import TestCase, main
from udocker.oci import OciLocalFileAPI
try:
from unittest.mock import patch, Mock
except ImportError:
from mock import patch, Mock
class OciLocalFileAPITestCase(TestCase):
"""Test OciLocalFileAPI()."""
def setUp(self):
str_local = 'udocker.container.localrepo.LocalRepository'
self.lrepo = patch(str_local)
self.local = self.lrepo.start()
self.mock_lrepo = Mock()
self.local.return_value = self.mock_lrepo
def tearDown(self):
self.lrepo.stop()
# def test_01__init(self):
# """Test01 OciLocalFileAPI() constructor."""
@patch('udocker.oci.FileUtil.isdir')
@patch('udocker.oci.os.listdir')
@patch('udocker.container.localrepo.LocalRepository.load_json', autospec=True)
def test_02__load_structure(self, mock_ljson, mock_oslist, mock_isdir):
"""Test02 OciLocalFileAPI()._load_structure."""
mock_ljson.side_effect = [[], []]
status = OciLocalFileAPI(self.local)._load_structure('tmpimg')
self.assertEqual(status, {})
out_res = {'repolayers': {},
'manifest': {},
'oci-layout': 'oci_lay1',
'index': 'idx1'}
mock_ljson.side_effect = ['oci_lay1', 'idx1']
mock_oslist.return_value = ['f1']
mock_isdir.return_value = False
status = OciLocalFileAPI(self.local)._load_structure('tmpimg')
self.assertEqual(status, out_res)
out_res = {'repolayers': {'f1:f2': {'layer_a': 'f1',
'layer_f': 'tmpimg/blobs/f1/f2',
'layer_h': 'f2'}},
'manifest': {},
'oci-layout': 'oci_lay1',
'index': 'idx1'}
mock_ljson.side_effect = ['oci_lay1', 'idx1']
mock_oslist.side_effect = [['f1'], ['f2']]
mock_isdir.return_value = True
status = OciLocalFileAPI(self.local)._load_structure('tmpimg')
self.assertEqual(status, out_res)
def test_03__get_from_manifest(self):
"""Test03 OciLocalFileAPI()._get_from_manifest."""
imgtag = '123'
struct = {'manifest': {'123': {'json': {'layers': [{'digest': 'd1'},
{'digest': 'd2'}],
'config': {'digest': 'dgt'}}}}}
lay_out = ['d2', 'd1']
conf_out = 'dgt'
status = OciLocalFileAPI(self.local)._get_from_manifest(struct, imgtag)
self.assertEqual(status, (conf_out, lay_out))
# @patch('udocker.oci.Unique.imagename')
# @patch('udocker.oci.Unique.imagetag')
# @patch('udocker.container.localrepo.LocalRepository.load_json', autospec=True)
# def test_04__load_manifest(self, mock_ljson, mock_uniqtag, mock_uniqname):
# """Test04 OciLocalFileAPI()._load_manifest."""
# manifest = {'annotations': {'org.opencontainers.image.ref.name': '123'}}
# mock_uniqtag.return_value = '123'
# mock_uniqname.return_value = 'imgname'
# mock_ljson.return_value = {'layers': [{'digest': 'd1'},
# {'digest': 'd2'}],
# 'config': {'digest': 'dgt'}}
# struct = {'manifest': {'123': {'json': mock_ljson}},
# 'repolayers': {'digest': {'layer_a': 'f1',
# 'layer_f': 'tmpimg/blobs/f1/f2',
# 'layer_h': 'f2'}}
# }
# status = OciLocalFileAPI(self.local)._load_manifest(struct, manifest)
# self.assertEqual(status, ['123'])
# def test_05__load_repositories(self):
# """Test05 OciLocalFileAPI()._load_repositories."""
# def test_06__load_image_step2(self):
# """Test07 OciLocalFileAPI()._load_image_step2."""
@patch('udocker.oci.Msg.err')
@patch.object(OciLocalFileAPI, '_load_repositories')
@patch.object(OciLocalFileAPI, '_load_structure')
def test_07_load(self, mock_loadstruct, mock_loadrepo, mock_msg):
"""Test07 OciLocalFileAPI().load."""
tmpdir = '/ROOT'
imgrepo = 'somerepo'
mock_loadstruct.return_value = {}
status = OciLocalFileAPI(self.local).load(tmpdir, imgrepo)
self.assertTrue(mock_msg.called)
self.assertEqual(status, [])
tmpdir = '/ROOT'
imgrepo = 'somerepo'
mock_loadstruct.return_value = {'repolayers':
{'f1:f2': {'layer_a': 'f1',
'layer_f': 'tmpimg/blobs/f1/f2',
'layer_h': 'f2'}},
'manifest': {},
'oci-layout': 'oci_lay1',
'index': 'idx1'}
mock_loadrepo.return_value = ['r1', 'r2']
status = OciLocalFileAPI(self.local).load(tmpdir, imgrepo)
self.assertEqual(status, ['r1', 'r2'])
if __name__ == '__main__':
main()
|
from typing import (
List, Optional, Sequence, Dict, Tuple, Any,
Generator, ClassVar, Callable, Union,
)
from enum import Enum
import os
from dataclasses import dataclass
from html.parser import HTMLParser
from collections import defaultdict
import json
from abc import ABCMeta, abstractmethod
try:
import requests
_session: requests.Session = requests.Session()
def get_json(url: str) -> Optional[dict]:
response = _session.get(url)
if not (200 <= response.status_code < 300):
return None
return response.json()
except ImportError:
from urllib import request
def get_json(url: str) -> Optional[dict]:
response = request.urlopen(url)
if not (200 <= response.status < 300):
return None
return json.loads(response.read())
if os.name == "posix":
import sys
import tty
import termios
def getch() -> str:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
else:
print("Sorry, but only posix systems are supported for now")
exit(1)
############################################################
ENABLE_CACHE: bool = True # Can't be disabled, will break things
PERSISTENT_CACHE: bool = True # Everything is lost upon restart if False
HN_API_BASE_URL: str = "https://hacker-news.firebaseio.com/v0"
SAVE_FILE: str = "hnjobs.json"
COLORS: bool = True
class ItemType(str, Enum):
JOB = "job"
STORY = "story"
COMMENT = "comment"
POLL = "poll"
POLLOPT = "pollopt"
@dataclass(kw_only=True, slots=False)
class HNItem(object):
id: int
type: ItemType
time: int # Unix timestamp
title: Optional[str] = None
text: Optional[str] = None # In HTML
parent: Optional[int] = None
kids: Sequence[int] = tuple()
descendants: Optional[int] = None
deleted: Optional[bool] = None
by: Optional[str] = None
url: Optional[str] = None
dead: Optional[bool] = None
score: Optional[int] = None
poll: Optional[int] = None
parts: Optional[List[int]] = None
def _get_item(id_: int) -> Optional[HNItem]:
dict_item = get_json(HN_API_BASE_URL + f"/item/{id_}.json")
if dict_item is None:
return None
return HNItem(**dict_item)
_item_cache: Dict[int, HNItem] = {}
def _get_item_cached(id_: int) -> Optional[HNItem]:
if (item := _item_cache.get(id_, None)) is not None:
return item
item = _get_item(id_)
if item is not None:
_item_cache[id_] = item
return item
get_item = _get_item_cached if ENABLE_CACHE else _get_item
class CustomHTMLParser(HTMLParser):
__slots__ = ("parts",)
parts: List[str]
def reset(self) -> None:
self.parts = []
super().reset()
def get_text(self) -> str:
return "".join(self.parts)
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Any]]):
match tag:
case "br":
self.parts.append("\n")
case "p":
self.parts.append("\n\n")
def handle_endtag(self, tag: str):
pass
def handle_data(self, data: str):
self.parts.append(data)
def html_to_text(html: Optional[str]) -> str:
if html is None:
return ""
parser = CustomHTMLParser()
parser.feed(html)
parser.close()
return parser.get_text()
def display_item(item: HNItem) -> None:
os.system("clear")
if item.title is not None:
print(f"{item.title}\n")
print(html_to_text(item.text))
def get_all_kids(base_item: HNItem) -> Generator[HNItem, None, None]:
for id_ in base_item.kids:
if (item := get_item(id_)) is not None:
yield item
def command(arg: Union[Callable, str]) -> Callable:
if callable(arg):
key = arg.__name__[0]
elif isinstance(arg, str):
if len(arg) != 1:
raise Exception("Command shortcuts must be a single character")
key = arg
def annotate_func(func: Callable) -> Callable:
func.__shortcut__ = key
return func
if callable(arg):
return annotate_func(arg)
return annotate_func
class UserInterface(object, metaclass=ABCMeta):
__slots__ = ("_run",)
_tooltips_line: ClassVar[str]
_tooltips_dict: ClassVar[Dict[str, Callable[[], None]]]
_run: bool
def __init__(self):
super().__init__()
self._run = True
@staticmethod
def register_command(
dct: Dict[str, Callable],
shortcut: str,
command: Callable
) -> None:
if shortcut in dct:
shortcut = shortcut.swapcase()
if shortcut in dct:
raise Exception(f"Cannot register command {command}"
f" with shortcut {shortcut}")
dct[shortcut] = command
@classmethod
def __init_subclass__(cls, /, **kwargs) -> None:
super().__init_subclass__(**kwargs)
ttd = {}
for attr_name in dir(cls):
try:
attr = getattr(cls, attr_name)
except AttributeError:
continue
if (sc := getattr(attr, "__shortcut__", None)) is not None:
cls.register_command(ttd, sc, attr)
cls._tooltips_dict = ttd
tooltips: List[str] = []
for k, v in ttd.items():
name = v.__name__
if k.lower() == name[0].lower():
name = name[1:]
tooltips.append(f"({k}){name}")
cls._tooltips_line = " ".join(tooltips)
def wait_command(self) -> None:
while True:
ch = getch()
if ch not in self._tooltips_dict:
continue
break
self._tooltips_dict[ch](self)
@abstractmethod
def update_display(self) -> str:
raise NotImplementedError
@classmethod
def print_tooltips(cls) -> None:
print(f"{cls._tooltips_line}\n\n")
def refresh(self) -> None:
os.system("clear")
self.print_tooltips()
print(self.update_display())
def loop(self) -> None:
while self._run:
self.refresh()
self.wait_command()
def stop(self) -> None:
self._run = False
_item_user_tags: Dict[int, List[str]] = defaultdict(list)
_item_user_ratings: Dict[int, int] = {}
class MainInterface(UserInterface):
__slots__ = ("display",)
def __init__(self):
super().__init__()
self.display = ""
def update_display(self) -> str:
return "Main interface\n\n" + self.display
def display_now(self, s: str) -> None:
self.display = s
self.refresh()
@command
def quit(self) -> None:
self.stop()
@command
def update_jobs(self) -> None:
self.display_now("Please enter WhoIsHiring link or item id: ")
i = input()
try:
i = i.split("id=")[-1]
i = i.split("&")[0]
id_ = int(i, 10)
except Exception:
self.display_now("Bad link or id")
return
self.display_now("Fetching...")
item = get_item(id_)
if not item:
self.display_now("Could not fetch HN post!\n")
return
if item.by != "whoishiring":
self.display_now("This does not seem to be a WhoIsHiring post!\n")
return
self.display_now(f"There are {len(item.kids)} comments here,"
" fetch them? [y/n]")
while True:
c = getch()
if c.lower() == "n":
return
if c.lower() == "y":
break
self.display += "\nplease enter y or n"
self.refresh()
total = len(item.kids)
n = 0
self.display_now(f"fetching {total} items...")
for id_ in item.kids:
self.display_now(f"{n}/{total} comments fetched...")
n += 1
get_item(id_)
self.display += "\ndone."
@command
def select_some_items(self) -> None:
SelectorInterface().loop()
class InvalidFilterOrSorter(Exception):
pass
FILTER_FUNCS = {
"tag": lambda tag: lambda item: tag in _item_user_tags[item.id],
"rated": lambda _: lambda item: item.id in _item_user_ratings,
"contains": lambda s: lambda item: item.text and (s.lower() in item.text.lower()),
}
def filter_from_str(s: str) -> Callable:
inverted = False
if s.startswith("!"):
inverted = True
s = s[1:]
parts = s.split(":")
if len(parts) == 1:
filter_name, arg = parts[0], None
elif len(parts) == 2:
filter_name, arg = parts
else:
raise InvalidFilterOrSorter("Too many ':'")
try:
func = FILTER_FUNCS[filter_name](arg)
if inverted:
return lambda x: not func(x)
return func
except Exception as e:
raise InvalidFilterOrSorter(e)
SORTER_FUNCS = {
"tag": lambda tag: lambda item: 0 if tag in _item_user_tags[item.id] else 1,
"recent": lambda _: lambda item: -item.time,
# It is strange to compare int with floats, but inf is quite useful here...
"rating": lambda _: lambda item: -_item_user_ratings.get(item.id, float("-inf")),
"contains": lambda s: lambda item: 0 if (item.text and (s.lower() in item.text.lower())) else 1,
}
def sorter_from_str(s: str) -> Callable:
inverted = False
if s.startswith("!"):
inverted = True
s = s[1:]
parts = s.split(":")
if len(parts) == 1:
sorter_name, arg = parts[0], None
elif len(parts) == 2:
sorter_name, arg = parts
else:
raise InvalidFilterOrSorter("Too many ':'")
try:
func = SORTER_FUNCS[sorter_name](arg)
if inverted:
return lambda x: -func(x)
return func
except Exception as e:
raise InvalidFilterOrSorter(e)
class SelectorInterface(UserInterface):
__slots__ = "display", "filters", "sorters"
display: str
help: ClassVar[str] = (
"Filter/sorter format: (!)<name>(:value)\n"
"'!' inverts a filter/sorter\n"
f"Available filters: {', '.join(FILTER_FUNCS.keys())}\n"
f"Available sorters: {', '.join(SORTER_FUNCS.keys())}\n"
)
filters: List[str]
sorters: List[str]
def _summary(self) -> None:
self.display = (
f"{self.help}\n"
f"Current filters: {self.filters}\n"
f"Current sorters: {self.sorters}\n"
)
self.refresh()
def __init__(self):
super().__init__()
self.filters = []
self.sorters = []
self._summary()
def update_display(self) -> str:
return self.display
def display_now(self, s: str) -> None:
self.display = s
self.refresh()
@command("f")
def update_filters(self) -> None:
new_filters_str = input("\nEnter new filters separated with ','\n")
new_filters = new_filters_str.replace(" ", "").split(",")
for f in new_filters:
try:
filter_from_str(f)
except InvalidFilterOrSorter as e:
self.display += f"Filter {f} is invalid: {e}"
return
self.filters = new_filters
self._summary()
@command("s")
def update_sorters(self) -> None:
new_sorters_str = input("\nEnter new sorters separated with ','\n")
new_sorters = new_sorters_str.replace(" ", "").split(",")
for s in new_sorters:
try:
sorter_from_str(s)
except InvalidFilterOrSorter as e:
self.display += f"Sorter {s} is invalid: {e}"
return
self.sorters = new_sorters
self._summary()
def _get_selected(self) -> List[HNItem]:
items = filter(lambda item: item.type == ItemType.COMMENT, _item_cache.values())
for f in self.filters:
items = filter(filter_from_str(f), items)
items = list(items)
for s in self.sorters[::-1]:
items.sort(key=sorter_from_str(s))
return items
@command
def review_selected(self) -> None:
self.stop()
ReviewInterface(self._get_selected()).loop()
@command
def tag_selected(self) -> None:
tag = input("\nEnter the tag to add to matching items: ")
for item in self._get_selected():
tags = _item_user_tags[item.id]
if tag not in tags:
tags.append(tag)
@command
def quit(self) -> None:
self.stop()
class ReviewInterface(UserInterface):
__slots__ = "current_index", "items"
items: List[HNItem]
current_index: int
def __init__(self, items: List[HNItem]):
self.items = items
if not items:
self.stop()
return
self.current_index = 0
super().__init__()
@property
def current_item(self) -> HNItem:
return self.items[self.current_index]
def update_display(self) -> str:
item = self.current_item
return (
f"Item {self.current_index + 1}/{len(self.items)}\n"
f"Rating: {_item_user_ratings.get(item.id, '???')}\n"
f"Tags: {_item_user_tags[item.id]}\n"
"===============================================================\n"
f"{html_to_text(item.text)}"
)
@command
def next(self) -> None:
self.current_index += 1
self.current_index = min(len(self.items) - 1, self.current_index)
@command
def previous(self) -> None:
self.current_index -= 1
self.current_index = max(0, self.current_index)
@command("t")
def add_tags(self) -> None:
tags = _item_user_tags[self.current_item.id]
new_tags = input("Enter new tags separated by ',':\n").split("'")
for tag in new_tags:
if tag not in tags:
tags.append(tag)
@command
def rate(self):
try:
rating = int(input("Enter new rating:\n"), 10)
_item_user_ratings[self.current_item.id] = rating
except ValueError:
return
@command
def quit(self) -> None:
self.stop()
def save() -> None:
to_save = {
"tags": _item_user_tags,
"ratings": _item_user_ratings,
}
if PERSISTENT_CACHE:
to_save["cache"] = dict(
(k, v.__dict__) for k, v in _item_cache.items())
json.dump(to_save, open(SAVE_FILE, "w"))
def load() -> None:
global _item_user_tags
global _item_user_ratings
global _item_cache
loaded: dict = json.load(open(SAVE_FILE, "r"))
_item_user_tags = defaultdict(
list,
((int(k), v) for k, v in loaded["tags"].items())
)
_item_user_ratings = dict(
(int(k), v) for k, v in loaded["ratings"].items()
)
if PERSISTENT_CACHE:
_item_cache = dict(
(int(k, 10), HNItem(**v))
for k, v in
loaded.get("cache", {}).items())
def main() -> None:
interface = MainInterface()
interface.loop()
if __name__ == "__main__":
try:
load()
except FileNotFoundError:
pass
try:
main()
except KeyboardInterrupt:
pass
finally:
save()
|
import numpy as np
from tqdm import trange
from mlutils.models._fm import _sgd_update
from sklearn.base import BaseEstimator, ClassifierMixin
class FactorizationMachineClassifier(BaseEstimator, ClassifierMixin):
"""
Factorization Machine [1]_ using Stochastic Gradient Descent.
For binary classification only.
Warning : not ready for use yet.
Parameters
----------
n_iters : int, default 10
Number of iterations to train the algorithm.
n_factors : int, default 10
Number/dimension of features' latent factors.
learning_rate : float, default 0.1
Learning rate for the gradient descent optimizer.
reg_coef : float, default 0.01
Regularization strength for weights/coefficients.
reg_factors : float, default 0.01
Regularization strength for features' latent factors.
random_state : int, default 1234
Seed for the randomly initialized features latent factors
verbose : bool, default True
Whether to print progress bar while training.
Attributes
----------
intercept_ : double
Intercept term, w0 based on the original notations.
coef_ : 1d ndarray, shape [n_features,]
Coefficients, w based on the original notations.
feature_factors_ : 2d ndarray, shape [n_factors, n_features]
Latent factors for all features. v based on the original
notations. The learned factors can be viewed as the
embeddings for each features. If a pair of features tends
to co-occur often, then their embeddings should be
close/similar (in terms of cosine similarity) to each other.
history_ : list
Loss function's history at each iteration, useful
for evaluating whether the algorithm converged or not.
References
----------
.. [1] `S. Rendle Factorization Machines (2010)
<http://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf>`_
Ideas for improvements
1. summed use pointer and memset
2. hogwild for samples
3. cache exp (gensim word2vec)
4. lambda_t ??, do we need the learning rate weight decay
5. early stopping for the fit function
6. allow freezing embeddings
"""
def __init__(self, n_iters = 10, n_factors = 10,
learning_rate = 0.1, reg_coef = 0.01,
reg_factors = 0.01, random_state = 1234, verbose = False):
self.n_iters = n_iters
self.verbose = verbose
self.reg_coef = reg_coef
self.n_factors = n_factors
self.reg_factors = reg_factors
self.random_state = random_state
self.learning_rate = learning_rate
def fit(self, X, y):
"""
Fit the model to the input data and label.
Parameters
----------
X : scipy sparse csr_matrix, shape [n_samples, n_features]
Data in sparse matrix format.
y : 1d ndarray, shape [n_samples,]
Training data's corresponding label.
Returns
-------
self
"""
n_features = X.shape[1]
self.coef_ = np.zeros(n_features)
self.intercept_ = 0.0
# the factors are often initialized with a mean of 0 and standard deviation
# of 1 / sqrt(number of latent factor specified)
np.random.seed(self.random_state)
self.feature_factors_ = np.random.normal(
scale = 1 / np.sqrt(self.n_factors), size = (self.n_factors, n_features))
# the gradient is implemented in a way that requires
# the negative class to be labeled as -1 instead of 0
y = y.copy().astype(np.int32)
y[y == 0] = -1
loop = range(self.n_iters)
if self.verbose:
loop = trange(self.n_iters)
self.history_ = []
for n_iter in loop:
loss = _sgd_update(X, y, self.intercept_, self.coef_,
self.feature_factors_, self.n_factors,
self.learning_rate, self.reg_coef, self.reg_factors)
self.history_.append(loss)
return self
def predict_proba(self, X):
"""
Probability estimates. The returned estimates for
all classes are ordered by the label of classes.
Paramters
---------
X : scipy sparse csr_matrix, shape [n_samples, n_features]
Data in sparse matrix format.
Returns
-------
proba : 2d ndarray, shape [n_samples, n_classes]
The probability of the sample for each class in the model.
"""
pred = self._predict(X)
pred_proba = 1.0 / (1.0 + np.exp(-pred))
proba = np.vstack((1 - pred_proba, pred_proba)).T
return proba
def _predict(self, X):
"""Similar to _predict_instance but vectorized for all samples"""
linear_output = X * self.coef_
v = self.feature_factors_.T
term = (X * v) ** 2 - (X.power(2) * (v ** 2))
factor_output = 0.5 * np.sum(term, axis = 1)
return self.intercept_ + linear_output + factor_output
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : scipy sparse csr_matrix, shape [n_samples, n_features]
Data in sparse matrix format.
Returns
-------
Predicted class label per sample.
"""
pred_proba = self.predict_proba(X)[:, 1]
return pred_proba.round().astype(np.int)
|
import logging
import sys
from collections import namedtuple
class KanjiIterator:
def __init__(self, kanji):
self._values = iter(vars(kanji).values())
def __iter__(self):
return self
def __next__(self):
v = next(self._values)
if type(v) == list:
return ", ".join(v)
return v
class Kanji:
def __init__(self):
self.characters = ""
self.level = 0
self.sort_field = ""
self.meanings = [] # List of str.
self.readings_onyomi = [] # List of str.
self.readings_kunyomi = [] # List of str.
self.readings_nanori = [] # List of str.
self.meaning_mnemonic = ""
self.meaning_hint = ""
self.reading_mnemonic = ""
self.reading_hint = ""
def csv_iter(self):
return KanjiIterator(self)
@classmethod
def from_wanikani(cls, json):
"""json is the r["data"][n]["data"] of a response from WaniKani."""
c = cls()
c.characters = json["characters"]
c.level = json["level"]
c.sort_field = f"{c.level:02}" + "_1"
c.meanings = [m["meaning"] for m in json["meanings"]]
c.readings_onyomi = [r["reading"] for r in json["readings"] if r["type"] == "onyomi"]
c.readings_kunyomi = [r["reading"] for r in json["readings"] if r["type"] == "kunyomi"]
c.readings_nanori = [r["reading"] for r in json["readings"] if r["type"] == "nanori"]
c.meaning_mnemonic = json["meaning_mnemonic"]
c.meaning_hint = json["meaning_hint"]
c.reading_mnemonic = json["reading_mnemonic"]
c.reading_hint = json["reading_hint"]
return c
def __str__(self):
return f"{self.characters}\n" +\
f"{self.level}\n" +\
f"{self.meanings}\n" +\
f"{self.readings_onyomi}\n" +\
f"{self.readings_kunyomi}\n" +\
f"{self.readings_nanori}\n" +\
f"{self.meaning_mnemonic}\n" +\
f"{self.meaning_hint}\n" +\
f"{self.reading_mnemonic}\n" +\
f"{self.reading_hint}"
class RadicalIterator:
def __init__(self, radical):
self._values = iter(vars(radical).values())
def __iter__(self):
return self
def __next__(self):
v = next(self._values)
if type(v) == list:
return ", ".join(v)
return v
class Radical:
def __init__(self):
self.slug = ""
self.characters = ""
self.character_svg = ""
self.level = 0
self.sort_field = ""
self.meanings = [] # List of str.
self.meaning_mnemonic = ""
def csv_iter(self):
return RadicalIterator(self)
@classmethod
def from_wanikani(cls, json):
"""json is the r["data"][n]["data"] of a response from WaniKani."""
c = cls()
c.slug = json["slug"]
c.characters = json["characters"]
for i in json["character_images"]:
if i["content_type"] == "image/svg+xml" and\
"inline_styles" in i["metadata"] and\
i["metadata"]["inline_styles"] == False:
c.character_svg = i["url"]
break
if c.character_svg == "":
logging.warning(f"Radical with slug '{json['slug']}' has no svg")
if c.characters is None or c.characters == "":
logging.warning(f"Radical with slug '{json['slug']}' has no characters")
c.level = json["level"]
c.sort_field = f"{c.level:02}" + "_0"
c.meanings = [m["meaning"] for m in json["meanings"]]
c.meaning_mnemonic = json["meaning_mnemonic"]
return c
def __str__(self):
return f"{self.characters}\n" +\
f"{self.character_svg}\n" +\
f"{self.level}\n" +\
f"{self.meanings}\n" +\
f"{self.meaning_mnemonic}"
class VocabularyIterator:
def __init__(self, vocabulary):
self._values = iter(vars(vocabulary).values())
def __iter__(self):
return self
def __next__(self):
v = next(self._values)
if type(v) == list:
if type(v[0]) == SentencePair:
div_en = '<div class="context-sentence-en">'
div_jp = '<div class="context-sentence-jp">'
div_end = "</div>"
string = ""
for pair in v:
string += div_en + pair.en + div_end + div_jp + pair.jp + div_end
return string
else:
return ", ".join(v)
return v
SentencePair = namedtuple("SentencePair", ["en", "jp"])
class Vocabulary:
def __init__(self):
self.characters = ""
self.level = 0
self.sort_field = ""
self.meanings = [] # List of str.
self.readings = [] # List of str.
self.parts_of_speech = [] # List of str.
self.meaning_mnemonic = ""
self.reading_mnemonic = ""
self.context_sentences = [] # List of SentencePair.
def csv_iter(self):
return VocabularyIterator(self)
@classmethod
def from_wanikani(cls, json):
c = cls()
c.characters = json["characters"]
c.level = json["level"]
c.sort_field = f"{c.level:02}" + "_2"
c.meanings = [m["meaning"] for m in json["meanings"]]
c.readings = [r["reading"] for r in json["readings"]]
c.parts_of_speech = json["parts_of_speech"]
c.meaning_mnemonic = json["meaning_mnemonic"]
c.reading_mnemonic = json["reading_mnemonic"]
for cs in json["context_sentences"]:
c.context_sentences.append(SentencePair(cs["en"], cs["ja"]))
return c
def __str__(self):
return f"{self.characters}\n" +\
f"{self.level}\n" +\
f"{self.meanings}\n" +\
f"{self.readings}\n" +\
f"{self.parts_of_speech}\n" +\
f"{self.meaning_mnemonic}\n" +\
f"{self.reading_mnemonic}\n" +\
f"{[[cs.en, cs.jp] for cs in self.context_sentences]}"
|
<reponame>Jp29tkDg79/samplewebsite<gh_stars>0
from flask import Flask
from flask import render_template
from flask import request
from flask import url_for
import os
from database import person
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html', info='')
if request.method == 'POST':
# get data and check data
username = request.form.get('username')
password = request.form.get('password')
info = ''
if username == '':
info = 'ユーザ名が未入力です'
elif password == '':
info = 'パスワードが未入力です'
else:
# create persons object
persondb = person.persondb()
# check login data
match_count = persondb.check_login(username, password)
if match_count == 1:
return viewhome(username)
else:
info = '登録されていません'
return render_template('login.html', info=info)
@app.route('/newentry', methods=['GET', 'POST'])
def newuser():
if request.method == 'GET':
return render_template('newentry.html', info='')
if request.method == 'POST':
# get data and check data
username = request.form.get('username')
password = request.form.get('password')
info = ''
if username == '':
info = 'ユーザ名が未入力です'
elif 14 < len(username):
info = 'ユーザ名は14文字内で入力してください'
elif password == '':
info = 'パスワードが未入力です'
elif password != request.form.get('re<PASSWORD>'):
info = '入力したパスワードが異なります 再度入力してください'
else:
# create persons object
persondb = person.persondb()
# insert data
err = persondb.insert(username, password)
if err == '':
return viewhome(username)
else:
info = '既に登録されています'
return render_template('newentry.html', info=info)
@app.route('/change_pw/<username>', methods=['GET', 'POST'])
def change_pw(username):
if request.method == 'GET':
return render_template('change_pw.html', username=username, info='')
if request.method == 'POST':
befor_pw = request.form.get('befor_pw')
after_pw = request.form.get('after_pw')
info = ''
if befor_pw == '':
info = '変更前のパスワードが入力されていません'
elif after_pw == '':
info = '変更後のパスワードが入力されていません'
# check password
elif after_pw != request.form.get('check_pw'):
info = '変更後と再確認のパスワードが相違しています'
else:
# create person object
persondb = person.persondb()
err = persondb.update(username, befor_pw, after_pw)
if err == '':
return viewhome(username)
else:
info = '変更前のパスワードが誤っています'
return render_template('change_pw.html', username=username, info=info)
@app.route('/home/<username>', methods=['GET'])
def viewhome(username):
return render_template('home.html', username=username)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path, endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
def main(debug=False):
app.run(host='0.0.0.0', port='5000', debug=debug)
|
<reponame>colour-science/trimesh
try:
from . import generic as g
except BaseException:
import generic as g
class RepairTests(g.unittest.TestCase):
def test_fill_holes(self):
for mesh_name in ['unit_cube.STL',
'machinist.XAML',
'round.stl',
'sphere.ply',
'teapot.stl',
'soup.stl',
'featuretype.STL',
'angle_block.STL',
'quadknot.obj']:
mesh = g.get_mesh(mesh_name)
if not mesh.is_watertight:
# output of fill_holes should match watertight status
returned = mesh.fill_holes()
assert returned == mesh.is_watertight
continue
hashes = [{mesh._data.crc(),
mesh._data.md5(),
mesh._data.fast_hash()}]
mesh.faces = mesh.faces[1:-1]
assert not mesh.is_watertight
assert not mesh.is_volume
# color some faces
g.trimesh.repair.broken_faces(mesh,
color=[255, 0, 0, 255])
hashes.append({mesh._data.crc(),
mesh._data.md5(),
mesh._data.fast_hash()})
assert hashes[0] != hashes[1]
# run the fill holes operation should succeed
assert mesh.fill_holes()
# should be a superset of the last two
assert mesh.is_volume
assert mesh.is_watertight
assert mesh.is_winding_consistent
hashes.append({mesh._data.crc(),
mesh._data.md5(),
mesh._data.fast_hash()})
assert hashes[1] != hashes[2]
def test_fix_normals(self):
for mesh in g.get_meshes(5):
mesh.fix_normals()
def test_winding(self):
"""
Reverse some faces and make sure fix_face_winding flips
them back.
"""
meshes = [g.get_mesh(i) for i in
['unit_cube.STL',
'machinist.XAML',
'round.stl',
'quadknot.obj',
'soup.stl']]
for i, mesh in enumerate(meshes):
# turn scenes into multibody meshes
if g.trimesh.util.is_instance_named(mesh, 'Scene'):
meta = mesh.metadata
meshes[i] = mesh.dump().sum()
meshes[i].metadata = meta
timing = {}
for mesh in meshes:
# save the initial state
is_volume = mesh.is_volume
winding = mesh.is_winding_consistent
tic = g.time.time()
# flip faces to break winding
mesh.faces[:4] = g.np.fliplr(mesh.faces[:4])
# run the operation
mesh.fix_normals()
# make sure mesh is repaired to former glory
assert mesh.is_volume == is_volume
assert mesh.is_winding_consistent == winding
# save timings
timing[mesh.metadata['file_name']] = g.time.time() - tic
# print timings as a warning
g.log.warning(g.json.dumps(timing, indent=4))
def test_multi(self):
"""
Try repairing a multibody geometry
"""
# create a multibody mesh with two cubes
a = g.get_mesh('unit_cube.STL')
b = a.copy()
b.apply_translation([2, 0, 0])
m = a + b
# should be a volume: watertight, correct winding
assert m.is_volume
# flip one face of A
a.faces[:1] = g.np.fliplr(a.faces[:1])
# flip every face of A
a.invert()
# flip one face of B
b.faces[:1] = g.np.fliplr(b.faces[:1])
m = a + b
# not a volume
assert not m.is_volume
m.fix_normals(multibody=False)
# shouldn't fix inversion of one cube
assert not m.is_volume
# run fix normal with multibody mode
m.fix_normals()
# should be volume again
assert m.is_volume
# mesh should be volume of two boxes, and positive
assert g.np.isclose(m.volume, 2.0)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
'''texplain
Create a clean output directory with only included files/citations.
Usage:
texplain [options] <input.tex> <output-directory>
Options:
--version Show version.
-h, --help Show help.
(c - MIT) <NAME> | <EMAIL> | www.geus.me | github.com/tdegeus/texplain
'''
__version__ = '0.3.4'
import os
import re
import sys
import docopt
import click
from copy import deepcopy
from shutil import copyfile
from shutil import rmtree
class TeX:
def __init__(self, filename):
if not os.path.isfile(filename):
raise IOError('"{0:s}" does not exist'.format(filename))
self.tex = open(filename, 'r').read()
self.dirname = os.path.dirname(filename)
self.filename = os.path.split(filename)[1]
if len(self.dirname) == 0:
self.dirname = '.'
has_input = re.search(r'(.*)(\\input\{)(.*)(\})', self.tex, re.MULTILINE)
has_include = re.search(r'(.*)(\\include\{)(.*)(\})', self.tex, re.MULTILINE)
if has_input or has_include:
raise IOError(r'TeX-files with \input{...} or \include{...} not yet supported')
def read_float(self, cmd=r'\includegraphics'):
r'''
Extract the keys of 'float' commands (e.g. "\includegraphics{...}", "\bibliography{...}") and
reconstruct their file-names.
:options:
**cmd** ([``r'\includegraphics'``] | ``<str>``)
The command to look for.
:returns:
A list ``[('key', 'filename'), (...), ...]`` in order of appearance.
'''
import numpy as np
# mimic the LaTeX behaviour where an extension is automatically added to a
# file-name without any extension
def filename(dirname, name):
if os.path.isfile(os.path.join(dirname, name)):
return os.path.relpath(os.path.join(dirname, name), dirname)
if os.path.isfile(os.path.join(dirname, name) + '.pdf'):
return os.path.relpath(os.path.join(dirname, name) + '.pdf', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.eps'):
return os.path.relpath(os.path.join(dirname, name) + '.eps', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.png'):
return os.path.relpath(os.path.join(dirname, name) + '.png', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.jpg'):
return os.path.relpath(os.path.join(dirname, name) + '.jpg', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.tex'):
return os.path.relpath(os.path.join(dirname, name) + '.tex', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.bib'):
return os.path.relpath(os.path.join(dirname, name) + '.bib', dirname)
raise IOError('Cannot find {0:s}'.format(name))
# read the contents of the command
# - "\includegraphics" accepts "\includegraphics[...]{...}"
# - "\bibliography" rejects "\bibliographystyle{...}"
include = []
for i in self.tex.split(cmd)[1:]:
if i[0] in ['[', '{']:
include += [i.split('{')[1].split('}')[0]]
# extract the filename
out = [(i, filename(self.dirname, i)) for i in include]
# check for duplicates
filenames = [i[1] for i in out]
assert(np.unique(np.array(filenames)).size == len(filenames))
return out
def rename_float(self, old, new, cmd=r'\includegraphics'):
r'''
Rename a key of a 'float' command (e.g. "\includegraphics{...}", "\bibliography{...}").
:arguments:
**old, new** (``<str>``)
The old and the new key.
:options:
**cmd** ([``r'\includegraphics'``] | ``<str>``)
The command to look for.
'''
text = self.tex.split(cmd)
for i in range(1, len(text)):
pre, key = text[i].split('{', 1)
key, post = key.split('}', 1)
if key != old:
continue
if text[i][0] not in ['[', '{']:
continue
text[i] = pre + '{' + new + '}' + post
self.tex = cmd.join(text)
def read_citation_keys(self):
r'''
Read the citation keys in the TeX file (those keys in "\cite{...}", "\citet{...}", ...).
Note that the output is unique, in the order or appearance.
'''
# extract keys from "cite"
def extract(string):
try:
return list(re.split(
r'([pt])?(\[.*\]\[.*\])?(\{[a-zA-Z0-9\.\,\-\ \_]*\})',
string)[3][1: -1].split(','))
except:
if len(string) >= 100:
string = string[:100]
raise IOError('Error in interpreting\n {0:s} ...'.format(string))
# read all keys in "cite", "citet", "citep" commands
cite = [extract(i) for i in self.tex.split(r'\cite')[1:]]
cite = list(set([item for sublist in cite for item in sublist]))
cite = [i.replace(' ', '') for i in cite]
return cite
def find_by_extension(self, ext):
r'''
Find all files with a certain extensions in the directory of the TeX-file.
'''
filenames = os.listdir(self.dirname)
return [i for i in filenames if os.path.splitext(i)[1] == ext]
def read_config(self):
r'''
Read configuration files in the directory of the TeX-file. A possible extension would be to look
if the files are actually used or not.
'''
ext = ['.sty', '.cls', '.bst']
out = []
for e in ext:
out += self.find_by_extension(e)
return out
def bib_select(text, keys):
r'''
Limit a BibTeX file to a list of keys.
:arguments:
**test** (``<str>``)
The BibTeX file, opened and read.
**keys** (``<list<str>>``)
The list of keys to select.
:returns:
The (reduced) BibTeX file, as string.
'''
text = '\n' + text
bib = list(filter(None, text.split('@')))[1:]
out = []
for i in bib:
if re.match(r'(string\{)(.*)', i):
continue
if re.match(r'(Comment\ )(.*)', i, re.IGNORECASE):
continue
if re.match(r'(comment\{)(.*)', i, re.IGNORECASE):
continue
if re.split(r'(.*\{)(.*)(,\n.*)', i)[2] in keys:
out += [i]
out = '\n@' + '\n@'.join(out)
while '\n\n\n' in out:
out = out.replace('\n\n\n', '\n\n')
return out
def from_commandline():
r'''
Main function (see command-line help)
'''
args = docopt.docopt(__doc__, version=__version__)
newdir = args['<output-directory>']
if not os.path.isfile(args['<input.tex>']):
raise IOError('"{0:s}" does not exist'.format(args['<input.tex>']))
if os.path.isdir(newdir):
if os.listdir(newdir):
raise IOError('"{0:s}" is not empty, please provide a new or empty directory'.format(newdir))
else:
os.makedirs(newdir)
old = TeX(args['<input.tex>'])
new = deepcopy(old)
new.dirname = newdir
includegraphics = old.read_float(r'\includegraphics')
bibfiles = old.read_float(r'\bibliography')
bibkeys = old.read_citation_keys()
config_files = old.read_config()
# Copy configuration files
for ofile in config_files:
copyfile(
os.path.join(old.dirname, ofile),
os.path.join(new.dirname, ofile))
# Copy/rename figures
if len(includegraphics) > 0:
new_includegraphics = []
for i, (okey, ofile) in enumerate(includegraphics):
nkey = 'figure_{0:d}'.format(i + 1)
ext = os.path.splitext(ofile)[1]
nfile = ofile.replace(os.path.normpath(okey), nkey)
if len(os.path.splitext(nfile)[1]) == 0:
nfile += ext
new_includegraphics += [(nkey, nfile)]
for (okey, ofile), (nkey, nfile) in zip(includegraphics, new_includegraphics):
new.rename_float(
okey,
nkey,
r'\includegraphics')
copyfile(
os.path.join(old.dirname, ofile),
os.path.join(new.dirname, nfile))
# Copy/reduce BibTeX files
if len(bibfiles) > 0:
if len(bibfiles) > 1:
raise IOError('texplain is only implemented for one BibTeX file')
okey, ofile = bibfiles[0]
nkey = 'library'
nfile = ofile.replace(os.path.normpath(okey), nkey)
bib = bib_select(
open(os.path.join(old.dirname, ofile), 'r').read(),
bibkeys)
new.rename_float(
okey,
nkey,
r'\bibliography')
open(os.path.join(new.dirname, nfile), 'w').write(bib)
# Write modified TeX file
output = os.path.join(new.dirname, 'main.tex')
if os.path.isfile(output):
output = os.path.join(new.dirname, new.filename)
open(output, 'w').write(new.tex)
def main():
try:
from_commandline()
except Exception as e:
print(e)
return 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import time
import uuid
from .runners import create_node_runner
from .expr import AlgoCollectionExpr, ODPSModelExpr, ModelDataCollectionExpr, MetricsResultExpr
from .utils import is_temp_table
from ..df.backends.context import context
from ..df.backends.analyzer import BaseAnalyzer
from ..df.backends.engine import Engine
from ..df.backends.odpssql import types
from ..df.backends.odpssql.types import df_schema_to_odps_schema
from ..df.backends.errors import CompileError
from ..df.backends.utils import refresh_dynamic
from ..df import DataFrame
from ..df.expr.collections import Node, CollectionExpr, Scalar
from ..df.expr.core import ExprDAG
from ..df.expr.dynamic import DynamicMixin
from ..df.utils import is_source_collection, is_constant_scalar
from .. import options, tempobj, utils
from ..compat import six, futures
from ..errors import ODPSError
from ..models import Partition, Schema
from ..ui import fetch_instance_group, reload_instance_status
class OdpsAlgoContext(object):
def __init__(self, odps):
self._odps = odps
self._node_caches = dict()
def register_exec(self, idx, parameters):
pass
class OdpsAlgoAnalyzer(BaseAnalyzer):
def visit_algo(self, expr):
pass
class OdpsAlgoEngine(Engine):
def __init__(self, odps):
self._odps = odps
self._ctx = OdpsAlgoContext(odps)
self._instances = []
def _dispatch(self, expr_dag, expr, ctx):
if expr._need_cache and not ctx.is_cached(expr):
# when the expr should be disk-persisted, skip
if expr is expr_dag.root and not expr._mem_cache:
return None
return super(OdpsAlgoEngine, self)._dispatch(expr_dag, expr, ctx)
def stop(self):
for inst in self._instances:
try:
self._odps.stop_instance(inst.id)
except ODPSError:
pass
def _gen_table_name(self, expr):
if options.ml.dry_run:
if isinstance(expr, Node):
node_name = expr.node_name
else:
node_name = str(expr)
return '%s_%s' % (utils.TEMP_TABLE_PREFIX, utils.camel_to_underline(node_name))
table_name = '%s%s_%s' % (utils.TEMP_TABLE_PREFIX, int(time.time()),
str(uuid.uuid4()).replace('-', '_'))
tempobj.register_temp_table(self._odps, table_name)
return table_name
def _gen_model_name(self, expr):
from .utils import TEMP_MODEL_PREFIX
if options.ml.dry_run:
if isinstance(expr, Node):
node_name = expr.node_name
else:
node_name = str(expr)
return '%s%s' % (utils.TEMP_TABLE_PREFIX, utils.camel_to_underline(node_name))
model_id_str = utils.to_binary(str(int(time.time())) + '_' + str(uuid.uuid4()).replace('-', '_'))
digest = hashlib.md5(model_id_str).hexdigest()
model_name = TEMP_MODEL_PREFIX + digest[-(32 - len(TEMP_MODEL_PREFIX)):]
tempobj.register_temp_model(self._odps, model_name)
return model_name
def _reload_ui(self, group, instance, ui):
if group:
reload_instance_status(self._odps, group, instance.id)
ui.update_group()
return fetch_instance_group(group).instances.get(instance.id)
def _run(self, algo_name, params, metas, engine_kw, ui, **kw):
runner = create_node_runner(self, algo_name, params, metas, engine_kw, ui, **kw)
runner.execute()
def _new_analyzer(self, expr_dag, on_sub=None):
return OdpsAlgoAnalyzer(expr_dag, on_sub=on_sub)
def _build_model(self, expr, model_name):
if expr._is_offline_model:
model = self._odps.get_offline_model(model_name)
return ODPSModelExpr(_source_data=model, _is_offline_model=True)
model_params = expr._model_params.copy()
for meta in ['predictor', 'recommender']:
meta_val = getattr(expr, '_' + meta, None)
if meta_val:
model_params[meta] = meta_val
model = self._odps.get_tables_model(model_name, tables=list(six.iterkeys(expr._model_collections)))
model._params = model_params
sub = ODPSModelExpr(_source_data=model, _is_offline_model=False,
_model_params=expr._model_params.copy(), _predictor=expr._predictor)
data_exprs = dict()
for k, v in six.iteritems(expr._model_collections):
data_exprs[k] = ModelDataCollectionExpr(_mlattr_model=sub, _data_item=k)
data_exprs[k]._source_data = self._odps.get_table(data_exprs[k].table_name())
sub._model_collections = data_exprs
return sub
def _cache(self, expr_dag, dag, expr, **kwargs):
is_source_model = isinstance(expr, ODPSModelExpr) and expr_dag.root._source_data is not None
# prevent the `partition` and `partitions` kwargs come from `persist`
kwargs.pop('partition', None)
kwargs.pop('partitions', None)
if is_source_collection(expr_dag.root) or \
is_constant_scalar(expr_dag.root) or \
is_source_model:
return
execute_dag = ExprDAG(expr_dag.root, dag=expr_dag)
if isinstance(expr, CollectionExpr):
table_name = self._gen_table_name(expr)
table = self._odps.get_table(table_name)
root = expr_dag.root
sub = CollectionExpr(_source_data=table, _schema=expr.schema)
sub.add_deps(root)
expr_dag.substitute(root, sub)
kw = dict(kwargs)
kw['lifecycle'] = options.temp_lifecycle
execute_node = self._persist(table_name, execute_dag, dag, expr, **kw)
def callback(result):
if getattr(expr, 'is_extra_expr', False):
sub._source_data = result._source_data
if isinstance(expr, DynamicMixin):
sub._schema = types.odps_schema_to_df_schema(table.schema)
refresh_dynamic(sub, expr_dag)
execute_node.callback = callback
elif isinstance(expr, ODPSModelExpr):
model_name = self._gen_model_name(expr)
sub = self._build_model(expr, model_name)
root = expr_dag.root
sub.add_deps(root)
expr_dag.substitute(root, sub)
kw = dict(kwargs)
if 'lifecycle' in kw:
del kw['lifecycle']
execute_node = self._persist(model_name, execute_dag, dag, expr, **kw)
else:
assert isinstance(expr, Scalar) # sequence is not cache-able
class ValueHolder(object): pass
sub = Scalar(_value_type=expr.dtype)
sub._value = ValueHolder()
execute_node = self._execute(execute_dag, dag, expr, **kwargs)
def callback(res):
sub._value = res
execute_node.callback = callback
return sub, execute_node
def _write_persist_kw(self, name, expr, **kwargs):
if isinstance(expr, CollectionExpr):
persist_kw = kwargs.copy()
persist_kw['_table'] = name
project = persist_kw.pop('project', None)
if self._odps.project != project:
persist_kw['_project'] = project
expr.persist_kw = persist_kw
elif isinstance(expr, ODPSModelExpr):
persist_kw = kwargs.copy()
persist_kw['_model'] = name
project = persist_kw.pop('project', None)
if project is not None and self._odps.project != project:
persist_kw['_project'] = project
expr.persist_kw = persist_kw
def _persist(self, name, expr_dag, dag, expr, **kwargs):
self._write_persist_kw(name, expr, **kwargs)
return super(OdpsAlgoEngine, self)._persist(name, expr_dag, dag, expr, **kwargs)
@staticmethod
def _is_output_model_only(src_expr):
if isinstance(src_expr, MetricsResultExpr):
return False
output_exprs = src_expr.outputs()
return not any(1 for out_expr in six.itervalues(output_exprs) if isinstance(out_expr, CollectionExpr))
def _build_output_tables(self, expr):
from .expr.exporters import get_output_table_name
if not utils.str_to_bool(expr.algo_meta.get('buildTables', False)):
return
def create_output_table(table_name, table_schema):
lifecycle = options.temp_lifecycle if is_temp_table(table_name) else options.lifecycle
self._odps.create_table(table_name, table_schema, lifecycle=lifecycle)
table_names, table_schemas = [], []
for out_name, out_expr in six.iteritems(expr.outputs()):
if getattr(out_expr, '_algo', None) is None:
continue
tn = get_output_table_name(expr, out_name)
if tn:
ts = getattr(out_expr, '_algo_schema', None) or out_expr._schema
table_names.append(tn)
table_schemas.append(df_schema_to_odps_schema(ts))
executor = futures.ThreadPoolExecutor(10)
list(executor.map(create_output_table, table_names, table_schemas))
def _do_execute(self, expr_dag, src_expr, **kwargs):
expr = expr_dag.root
kwargs['_output_models_only'] = self._is_output_model_only(src_expr)
kw = kwargs.copy()
if isinstance(src_expr, ODPSModelExpr):
ui = kw.pop('ui')
progress_proportion = kw.pop('progress_proportion', 1)
download_progress = progress_proportion
ui_group = kw.pop('group', None)
if hasattr(src_expr, '_source_data'):
result_expr = src_expr
else:
if not context.is_cached(src_expr):
temp_name = self._gen_model_name(src_expr)
download_progress = 0.1 * progress_proportion
self._do_persist(expr_dag, src_expr, temp_name, ui=ui,
progress_proportion=0.9 * progress_proportion, group=ui_group, **kw)
result_expr = src_expr.get_cached(context.get_cached(src_expr))
if result_expr._is_offline_model:
from .expr.models.pmml import PmmlResult
from .runners import XFlowNodeRunner
model = result_expr._source_data
if not options.ml.use_model_transfer:
pmml = model.get_model()
return PmmlResult(pmml)
else:
volume_name = options.ml.model_volume
if not self._odps.exist_volume(volume_name):
self._odps.create_parted_volume(volume_name)
vol_part = hashlib.md5(utils.to_binary(model.name)).hexdigest()
tempobj.register_temp_volume_partition(self._odps, (volume_name, vol_part))
algo_params = {
'modelName': model.name,
'volumeName': volume_name,
'partition': vol_part,
'format': 'pmml'
}
runner = XFlowNodeRunner(self, 'modeltransfer', algo_params, {}, {},
ui=ui, progress_proportion=download_progress, group=ui_group)
runner.execute()
pmml = self._odps.open_volume_reader(volume_name, vol_part, model.name + '.xml').read()
self._odps.delete_volume_partition(volume_name, vol_part)
return PmmlResult(utils.to_str(pmml))
else:
from .expr.models.base import TablesModelResult
results = dict()
frac = 1.0 / len(result_expr._model_collections)
for key, item in six.iteritems(result_expr._model_collections):
result = item.execute(ui=ui, progress_proportion=frac * 0.1 * progress_proportion,
group=ui_group)
results[key] = result
return TablesModelResult(result_expr._model_params, results)
elif isinstance(src_expr, MetricsResultExpr):
if not src_expr.executed:
expr.tables = dict((pt.name, self._gen_table_name(src_expr)) for pt in src_expr.output_ports)
gen_params = expr.convert_params(src_expr)
ui = kw.pop('ui')
progress_proportion = kw.pop('progress_proportion', 1)
ui_group = kw.pop('group', None)
engine_kw = getattr(src_expr, '_engine_kw', {})
engine_kw['lifecycle'] = options.temp_lifecycle
if hasattr(src_expr, '_cases'):
kw['_cases'] = src_expr._cases
self._run(src_expr._algo, gen_params, src_expr.algo_meta, engine_kw, ui,
progress_proportion=progress_proportion, group=ui_group, **kw)
src_expr.executed = True
if options.ml.dry_run:
return None
else:
if hasattr(src_expr, '_result_callback'):
callback = src_expr._result_callback
else:
callback = lambda v: v
return callback(expr.calculator(self._odps))
else:
temp_name = self._gen_table_name(src_expr)
persist_kw = kwargs.copy()
persist_kw['_table'] = temp_name
expr.persist_kw = persist_kw
ui = kw.pop('ui')
progress_proportion = kw.pop('progress_proportion', 1)
ui_group = kw.pop('group', None)
kw['lifecycle'] = options.temp_lifecycle
df = self._do_persist(expr_dag, src_expr, temp_name, ui=ui,
progress_proportion=0.9 * progress_proportion, group=ui_group, **kw)
return df.execute(ui=ui, progress_proportion=0.1 * progress_proportion, group=ui_group)
def _handle_expr_persist(self, out_expr):
from ..df.backends.engine import ODPSSQLEngine
class ODPSEngine(ODPSSQLEngine):
def compile(self, expr, prettify=True, libraries=None):
expr = self._convert_table(expr)
expr_dag = expr.to_dag()
self._analyze(expr_dag, expr)
new_expr = self._rewrite(expr_dag)
sql = self._compile(new_expr, prettify=prettify, libraries=libraries)
if isinstance(sql, list):
return '\n'.join(sql)
return sql
if isinstance(out_expr, CollectionExpr):
partition = out_expr.persist_kw.get('partition')
partitions = out_expr.persist_kw.get('partitions')
drop_table = out_expr.persist_kw.get('drop_table', False)
create_table = out_expr.persist_kw.get('create_table', True)
drop_partition = out_expr.persist_kw.get('drop_partition', False)
create_partition = out_expr.persist_kw.get('create_partition', False)
overwrite = out_expr.persist_kw.get('overwrite', True)
cast = out_expr.persist_kw.get('cast', False)
expr_table = out_expr.persist_kw['_table']
expr_project = out_expr.persist_kw.get('_project')
expr_table_path = expr_table if expr_project is None else expr_project + '.' + expr_table
if partitions is None and partition is None:
if drop_table:
self._odps.delete_table(expr_table, project=expr_project, if_exists=True)
if self._odps.exist_table(expr_table):
temp_table_name = self._gen_table_name(out_expr)
out_expr.persist_kw['_table'] = temp_table_name
out_expr.persist_kw['_project'] = None
def callback():
t = self._odps.get_table(expr_table)
if t.schema.partitions:
raise CompileError('Cannot insert into partition table %s without specifying '
'`partition` or `partitions`.')
expr = self._odps.get_table(temp_table_name).to_df()
expr = self._reorder(expr, t, cast=cast)
sql = ODPSEngine(self._odps).compile(expr, prettify=False)
action_str = 'OVERWRITE' if overwrite else 'INTO'
return 'INSERT {0} TABLE {1} \n{2}'.format(action_str, expr_table_path, sql)
return callback
else:
return None
elif partition is not None:
temp_table_name = self._gen_table_name(out_expr)
out_expr.persist_kw['_table'] = temp_table_name
out_expr.persist_kw['_project'] = None
def callback():
t = self._odps.get_table(temp_table_name)
for col in out_expr.schema.columns:
if col.name.lower() not in t.schema:
raise CompileError('Column(%s) does not exist in target table %s, '
'writing cannot be performed.' % (col.name, t.name))
if drop_partition:
t.delete_partition(partition, if_exists=True)
if create_partition:
t.create_partition(partition, if_not_exists=True)
expr = t.to_df()
expr = self._reorder(expr, t, cast=cast)
sql = ODPSEngine(self._odps).compile(expr, prettify=False)
action_str = 'OVERWRITE' if overwrite else 'INTO'
return 'INSERT {0} TABLE {1} PARTITION({2}) {3}'.format(
action_str, expr_table_path, partition, sql,
)
return callback
else:
temp_table_name = self._gen_table_name(out_expr)
out_expr.persist_kw['_table'] = temp_table_name
out_expr.persist_kw['_project'] = None
if isinstance(partitions, tuple):
partitions = list(partitions)
if not isinstance(partitions, list):
partitions = [partitions, ]
def callback():
t = self._odps.get_table(temp_table_name)
schema = t.schema
columns = [c for c in schema.columns if c.name not in partitions]
ps = [Partition(name=pt, type=schema.get_type(pt)) for pt in partitions]
if drop_table:
self._odps.delete_table(expr_table, project=expr_project, if_exists=True)
if create_table:
self._odps.create_table(expr_table, Schema(columns=columns, partitions=ps),
project=expr_project)
expr = t.to_df()
expr = self._reorder(expr, t, cast=cast, with_partitions=True)
sql = ODPSEngine(self._odps).compile(expr, prettify=False)
action_str = 'OVERWRITE' if overwrite else 'INTO'
return 'INSERT {0} TABLE {1} PARTITION({2}) {3}'.format(
action_str, expr_table_path, ', '.join(partitions), sql,
)
return callback
elif isinstance(out_expr, ODPSModelExpr):
drop_model = out_expr.persist_kw.get('drop_model', False)
expr_model = out_expr.persist_kw['_model']
if drop_model:
if out_expr._is_offline_model:
self._odps.delete_offline_model(expr_model, if_exists=True)
else:
self._odps.delete_tables_model(expr_model, if_exists=True)
def _do_persist(self, expr_dag, src_expr, name, partitions=None, partition=None, project=None,
drop_table=False, create_table=True, drop_partition=False, create_partition=False,
**kwargs):
from .runners import SQLNodeRunner
from .enums import PortType
expr = expr_dag.root
kwargs['_output_models_only'] = self._is_output_model_only(src_expr)
output_exprs = src_expr.outputs()
shared_kw = src_expr.shared_kw
shared_kw['required_outputs'] = dict()
if hasattr(src_expr, 'output_ports'):
for out_port in src_expr.output_ports:
if not out_port.required and out_port.name not in output_exprs:
continue
if out_port.name in output_exprs:
out_expr = output_exprs[out_port.name]
if not getattr(out_expr, 'persist_kw', None):
expr_name = self._gen_table_name(out_expr) if isinstance(out_expr, CollectionExpr) \
else self._gen_model_name(expr)
self._write_persist_kw(expr_name, out_expr, **kwargs)
else:
expr_name = self._gen_table_name(src_expr.node_name) if out_port.type == PortType.DATA \
else self._gen_model_name(src_expr.node_name)
shared_kw['required_outputs'][out_port.name] = expr_name
src_expr.shared_kw = shared_kw
kw = kwargs.copy()
ui = kw.pop('ui')
progress_proportion = kw.pop('progress_proportion', 1)
ui_group = kw.pop('group', None)
engine_kw = getattr(src_expr, '_engine_kw', None)
if kw.get('lifecycle'):
engine_kw['lifecycle'] = kw['lifecycle']
elif options.lifecycle:
engine_kw['lifecycle'] = options.lifecycle
if hasattr(src_expr, '_cases'):
kw['_cases'] = src_expr._cases
if not options.ml.dry_run:
self._build_output_tables(src_expr)
sql_callbacks = []
expr.wait_execution()
if not src_expr.executed:
for out_expr in six.itervalues(output_exprs):
callback = self._handle_expr_persist(out_expr)
if callback is not None:
sql_callbacks.append(callback)
gen_params = expr.convert_params(src_expr)
if not src_expr.executed:
prog_ratio = 1
sub_ratio = 0
if sql_callbacks:
prog_ratio = 0.8
sub_ratio = (1 - prog_ratio) * progress_proportion / len(sql_callbacks)
try:
self._run(src_expr._algo, gen_params, src_expr.algo_meta, engine_kw, ui,
progress_proportion=prog_ratio * progress_proportion, group=ui_group, **kw)
for cb in sql_callbacks:
sql = cb()
runner = SQLNodeRunner(self, 'SQL', dict(sql=sql), dict(), dict(), ui,
progress_proportion=sub_ratio, group=ui_group)
runner.execute()
finally:
src_expr.executed = True
if getattr(src_expr, 'is_extra_expr', False):
t = src_expr._table_callback(self._odps, src_expr)
context.cache(src_expr, t)
if options.ml.dry_run:
df = CollectionExpr(_source_data=t, _schema=src_expr._schema)
else:
df = DataFrame(t)
df._ml_fields = src_expr._ml_fields
return df
ret = None
for out_name, out_expr in six.iteritems(output_exprs):
r = self._cache_expr_result(out_expr)
if out_name == src_expr._output_name:
ret = r
return ret
def _cache_expr_result(self, src_expr):
if isinstance(src_expr, ODPSModelExpr):
name = src_expr.persist_kw['_model']
model_expr = self._build_model(src_expr, name)
context.cache(src_expr, model_expr._source_data)
if not model_expr._is_offline_model:
params_str = utils.escape_odps_string(json.dumps(model_expr._source_data.params))
for k, v in six.iteritems(model_expr._model_collections):
if not options.ml.dry_run:
self._odps.run_sql("alter table %s set comment '%s'" % (v._source_data.name, params_str))
context.cache(src_expr._model_collections[k], v._source_data)
return model_expr
else:
name = src_expr.persist_kw['_table']
project = src_expr.persist_kw.get('_project')
t = self._odps.get_table(name, project=project)
context.cache(src_expr, t)
if options.ml.dry_run:
df = CollectionExpr(_source_data=t, _schema=src_expr._schema)
else:
df = DataFrame(t)
df._ml_fields = src_expr._ml_fields
return df
|
import csv
import os
import shutil
import sys
from datetime import date, time, datetime
from django.core.management import BaseCommand
from urllib.request import urlopen
from zipfile import ZipFile
from django.db import transaction
from gtfs.models import Agency, Stop, Route, Transfer, Calendar, CalendarDate, Trip, StopTime
ZIP = 'gtfs/gtfs.zip'
EXTRACTED = 'gtfs/gtfs'
class Command(BaseCommand):
def handle(self, *args, **options):
if not options['no_download']:
self.download()
self.clear_data()
with transaction.atomic():
self.import_stops()
self.import_agencies()
self.import_routes()
self.import_transfers()
self.import_calendar()
self.import_calendar_dates()
self.import_trips()
self.import_stop_times()
def add_arguments(self, parser):
parser.add_argument(
'--no-download',
action='store_true',
help='Only import previously downloaded data',
)
def download(self):
print('Download data ... ', end='')
sys.stdout.flush()
if os.path.isdir(EXTRACTED):
shutil.rmtree(EXTRACTED)
data = urlopen('https://opentransportdata.swiss/dataset/timetable-2019-gtfs/permalink')
with open(ZIP, 'wb') as output:
output.write(data.read())
zip_ref = ZipFile(ZIP, 'r')
zip_ref.extractall(EXTRACTED)
zip_ref.close()
print('done')
def clear_data(self):
print('Clear old data ... ', end='')
sys.stdout.flush()
StopTime.objects.all().delete()
Trip.objects.all().delete()
CalendarDate.objects.all().delete()
Calendar.objects.all().delete()
Transfer.objects.all().delete()
Route.objects.all().delete()
Stop.objects.all().delete()
Agency.objects.all().delete()
print('done')
def import_agencies(self):
print('Import agencies ... ', end='')
sys.stdout.flush()
with open(EXTRACTED + '/agency.txt', newline='', encoding='utf-8') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
Agency.objects.create(**row)
print('done')
def import_stops(self):
print('Import stops ... ', end='')
sys.stdout.flush()
with open(EXTRACTED + '/stops.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
# Default the int fields
if row['parent_station'] == '':
row['parent_station'] = None
if row['location_type'] == '':
row['location_type'] = 0
# Rename parent station to insert the id and not an object
row['parent_station_id'] = row.pop('parent_station')
# Get and process platform information which is in the id (in the case of SBB)
split_id = row['stop_id'].split(':')
if row['location_type'] == 0 and len(split_id) >= 3:
row['platform_code'] = split_id[2]
Stop.objects.create(**row)
print('done')
def import_routes(self):
print('Import routes ... ', end='')
sys.stdout.flush()
with open(EXTRACTED + '/routes.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
Route.objects.create(**row)
print('done')
def import_transfers(self):
print('Import transfers ... ', end='')
sys.stdout.flush()
with open(EXTRACTED + '/transfers.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
Transfer.objects.create(**row)
print('done')
def import_calendar(self):
print('Import calendar ... ', end='')
sys.stdout.flush()
with open(EXTRACTED + '/calendar.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
row['start_date'] = datetime.strptime(row['start_date'], '%Y%m%d')
row['end_date'] = datetime.strptime(row['end_date'], '%Y%m%d')
Calendar.objects.create(**row)
print('done')
def import_calendar_dates(self):
print('Import calendar_dates ... ', end='')
sys.stdout.flush()
count = 0
with open(EXTRACTED + '/calendar_dates.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
calendar_dates = list()
for row in csv_reader:
row['date'] = datetime.strptime(row['date'], '%Y%m%d')
calendar_dates.append(CalendarDate(**row))
count += 1
if count % 1000 == 0:
print('\rImport calendar_dates ... ' + 'read ' + str(count) + ' records', end='')
sys.stdout.flush()
print('\rImport calendar_dates ... ' + 'inserting ' + str(count) + ' records into db', end='')
sys.stdout.flush()
CalendarDate.objects.bulk_create(calendar_dates)
print('\rImport calendar_dates ... done ')
def import_trips(self):
print('Import trips ... ', end='')
sys.stdout.flush()
count = 0
with open(EXTRACTED + '/trips.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
trips = list()
for row in csv_reader:
trips.append(Trip(**row))
count += 1
if count % 1000 == 0:
print('\rImport trips ... ' + 'read ' + str(count) + ' records', end='')
sys.stdout.flush()
print('\rImport trips ... ' + 'inserting ' + str(count) + ' records into db', end='')
sys.stdout.flush()
Trip.objects.bulk_create(trips)
print('\rImport trips ... done ')
def import_stop_times(self):
print('Import stop_times ... ', end='')
sys.stdout.flush()
count = 0
with open(EXTRACTED + '/stop_times.txt', newline='', encoding='utf-8-sig') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
stop_times = list()
for row in csv_reader:
stop_times.append(StopTime(**row))
count += 1
if count % 1000 == 0:
print('\rImport stop_times ... ' + str(count) + ' records', end='')
sys.stdout.flush()
if count % 100000 == 0:
StopTime.objects.bulk_create(stop_times)
stop_times.clear()
sys.stdout.flush()
StopTime.objects.bulk_create(stop_times)
print('\rImport stop_times ... done ') |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: <NAME>
import httplib
import json
import logging
import socket
from quantum.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
class OFCClient(object):
"""A HTTP/HTTPS client for OFC Drivers"""
def __init__(self, host="127.0.0.1", port=8888, use_ssl=False,
key_file=None, cert_file=None):
"""Creates a new client to some OFC.
:param host: The host where service resides
:param port: The port where service resides
:param use_ssl: True to use SSL, False to use HTTP
:param key_file: The SSL key file to use if use_ssl is true
:param cert_file: The SSL cert file to use if use_ssl is true
"""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.key_file = key_file
self.cert_file = cert_file
self.connection = None
def get_connection_type(self):
"""Returns the proper connection type"""
if self.use_ssl:
return httplib.HTTPSConnection
else:
return httplib.HTTPConnection
def do_request(self, method, action, body=None):
LOG.debug("Client request: %s %s [%s]" % (method, action, str(body)))
if type(body) is dict:
body = json.dumps(body)
try:
connection_type = self.get_connection_type()
headers = {"Content-Type": "application/json"}
# Open connection and send request, handling SSL certs
certs = {'key_file': self.key_file, 'cert_file': self.cert_file}
certs = dict((x, certs[x]) for x in certs if certs[x] is not None)
if self.use_ssl and len(certs):
conn = connection_type(self.host, self.port, **certs)
else:
conn = connection_type(self.host, self.port)
conn.request(method, action, body, headers)
res = conn.getresponse()
data = res.read()
LOG.debug("OFC returns [%s:%s]" % (str(res.status), data))
if res.status in (httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT):
if data and len(data) > 1:
return json.loads(data)
else:
reason = _("An operation on OFC is failed.")
raise nexc.OFCException(reason=reason)
except (socket.error, IOError), e:
reason = _("Failed to connect OFC : %s" % str(e))
LOG.error(reason)
raise nexc.OFCException(reason=reason)
def get(self, action):
return self.do_request("GET", action)
def post(self, action, body=None):
return self.do_request("POST", action, body=body)
def put(self, action, body=None):
return self.do_request("PUT", action, body=body)
def delete(self, action):
return self.do_request("DELETE", action)
|
<filename>azurelinuxagent/distro/default/extension.py
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import os
import zipfile
import time
import json
import subprocess
import shutil
import azurelinuxagent.conf as conf
import azurelinuxagent.logger as logger
from azurelinuxagent.event import add_event, WALAEventOperation
from azurelinuxagent.exception import ExtensionError, ProtocolError, HttpError
from azurelinuxagent.future import ustr
from azurelinuxagent.metadata import AGENT_VERSION
from azurelinuxagent.protocol.restapi import ExtHandlerStatus, ExtensionStatus, \
ExtensionSubStatus, Extension, \
VMStatus, ExtHandler, \
get_properties, set_properties
import azurelinuxagent.utils.fileutil as fileutil
import azurelinuxagent.utils.restutil as restutil
import azurelinuxagent.utils.shellutil as shellutil
from azurelinuxagent.utils.textutil import Version
#HandlerEnvironment.json schema version
HANDLER_ENVIRONMENT_VERSION = 1.0
VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning']
VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"]
def validate_has_key(obj, key, fullname):
if key not in obj:
raise ExtensionError("Missing: {0}".format(fullname))
def validate_in_range(val, valid_range, name):
if val not in valid_range:
raise ExtensionError("Invalid {0}: {1}".format(name, val))
def parse_formatted_message(formatted_message):
if formatted_message is None:
return None
validate_has_key(formatted_message, 'lang', 'formattedMessage/lang')
validate_has_key(formatted_message, 'message', 'formattedMessage/message')
return formatted_message.get('message')
def parse_ext_substatus(substatus):
#Check extension sub status format
validate_has_key(substatus, 'status', 'substatus/status')
validate_in_range(substatus['status'], VALID_EXTENSION_STATUS,
'substatus/status')
status = ExtensionSubStatus()
status.name = substatus.get('name')
status.status = substatus.get('status')
status.code = substatus.get('code', 0)
formatted_message = substatus.get('formattedMessage')
status.message = parse_formatted_message(formatted_message)
return status
def parse_ext_status(ext_status, data):
if data is None or len(data) is None:
return
#Currently, only the first status will be reported
data = data[0]
#Check extension status format
validate_has_key(data, 'status', 'status')
status_data = data['status']
validate_has_key(status_data, 'status', 'status/status')
validate_in_range(status_data['status'], VALID_EXTENSION_STATUS,
'status/status')
applied_time = status_data.get('configurationAppliedTime')
ext_status.configurationAppliedTime = applied_time
ext_status.operation = status_data.get('operation')
ext_status.status = status_data.get('status')
ext_status.code = status_data.get('code', 0)
formatted_message = status_data.get('formattedMessage')
ext_status.message = parse_formatted_message(formatted_message)
substatus_list = status_data.get('substatus')
if substatus_list is None:
return
for substatus in substatus_list:
ext_status.substatusList.append(parse_ext_substatus(substatus))
class ExtHandlerState(object):
NotInstalled = "NotInstalled"
Installed = "Installed"
Enabled = "Enabled"
class ExtHandlersHandler(object):
def __init__(self, distro):
self.distro = distro
self.ext_handlers = None
self.last_etag = None
self.log_report = False
def run(self):
ext_handlers, etag = None, None
try:
self.protocol = self.distro.protocol_util.get_protocol()
ext_handlers, etag = self.protocol.get_ext_handlers()
except ProtocolError as e:
add_event(name="WALA", is_success=False, message=ustr(e))
return
if self.last_etag is not None and self.last_etag == etag:
logger.verb("No change to ext handler config:{0}, skip", etag)
self.log_report = False
else:
logger.info("Handle new ext handler config")
self.log_report = True #Log status report success on new config
self.handle_ext_handlers(ext_handlers)
self.last_etag = etag
self.report_ext_handlers_status(ext_handlers)
def handle_ext_handlers(self, ext_handlers):
if ext_handlers.extHandlers is None or \
len(ext_handlers.extHandlers) == 0:
logger.info("No ext handler config found")
return
for ext_handler in ext_handlers.extHandlers:
#TODO handle install in sequence, enable in parallel
self.handle_ext_handler(ext_handler)
def handle_ext_handler(self, ext_handler):
ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol)
try:
state = ext_handler.properties.state
ext_handler_i.logger.info("Expected handler state: {0}", state)
if state == "enabled":
self.handle_enable(ext_handler_i)
elif state == u"disabled":
self.handle_disable(ext_handler_i)
elif state == u"uninstall":
self.handle_uninstall(ext_handler_i)
else:
message = u"Unknown ext handler state:{0}".format(state)
raise ExtensionError(message)
except ExtensionError as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
ext_handler_i.report_event(message=ustr(e), is_success=False)
def handle_enable(self, ext_handler_i):
ext_handler_i.decide_version()
old_ext_handler_i = ext_handler_i.get_installed_ext_handler()
if old_ext_handler_i is not None and \
old_ext_handler_i.version_gt(ext_handler_i):
raise ExtensionError(u"Downgrade not allowed")
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("Current handler state is: {0}", handler_state)
if handler_state == ExtHandlerState.NotInstalled:
ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled)
ext_handler_i.download()
ext_handler_i.update_settings()
if old_ext_handler_i is None:
ext_handler_i.install()
elif ext_handler_i.version_gt(old_ext_handler_i):
old_ext_handler_i.disable()
ext_handler_i.copy_status_files(old_ext_handler_i)
ext_handler_i.update()
old_ext_handler_i.uninstall()
old_ext_handler_i.rm_ext_handler_dir()
ext_handler_i.update_with_install()
else:
ext_handler_i.update_settings()
ext_handler_i.enable()
def handle_disable(self, ext_handler_i):
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("Current handler state is: {0}", handler_state)
if handler_state == ExtHandlerState.Enabled:
ext_handler_i.disable()
def handle_uninstall(self, ext_handler_i):
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("Current handler state is: {0}", handler_state)
if handler_state != ExtHandlerState.NotInstalled:
if handler_state == ExtHandlerState.Enabled:
ext_handler_i.disable()
ext_handler_i.uninstall()
ext_handler_i.rm_ext_handler_dir()
def report_ext_handlers_status(self, ext_handlers):
"""Go thru handler_state dir, collect and report status"""
vm_status = VMStatus()
vm_status.vmAgent.version = AGENT_VERSION
vm_status.vmAgent.status = "Ready"
vm_status.vmAgent.message = "Guest Agent is running"
if ext_handlers is not None:
for ext_handler in ext_handlers.extHandlers:
try:
self.report_ext_handler_status(vm_status, ext_handler)
except ExtensionError as e:
add_event(name="WALA", is_success=False, message=ustr(e))
logger.verb("Report vm agent status")
try:
self.protocol.report_vm_status(vm_status)
except ProtocolError as e:
message = "Failed to report vm agent status: {0}".format(e)
add_event(name="WALA", is_success=False, message=message)
if self.log_report:
logger.info("Successfully reported vm agent status")
def report_ext_handler_status(self, vm_status, ext_handler):
ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol)
handler_status = ext_handler_i.get_handler_status()
if handler_status is None:
return
handler_state = ext_handler_i.get_handler_state()
if handler_state != ExtHandlerState.NotInstalled:
try:
active_exts = ext_handler_i.report_ext_status()
handler_status.extensions.extend(active_exts)
except ExtensionError as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
try:
heartbeat = ext_handler_i.collect_heartbeat()
if heartbeat is not None:
handler_status.status = heartbeat.get('status')
except ExtensionError as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
vm_status.vmAgent.extensionHandlers.append(handler_status)
class ExtHandlerInstance(object):
def __init__(self, ext_handler, protocol):
self.ext_handler = ext_handler
self.protocol = protocol
self.operation = None
self.pkg = None
prefix = "[{0}]".format(self.get_full_name())
self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix)
try:
fileutil.mkdir(self.get_log_dir(), mode=0o744)
except IOError as e:
self.logger.error(u"Failed to create extension log dir: {0}", e)
log_file = os.path.join(self.get_log_dir(), "CommandExecution.log")
self.logger.add_appender(logger.AppenderType.FILE,
logger.LogLevel.INFO, log_file)
def decide_version(self):
"""
If auto-upgrade, get the largest public extension version under
the requested major version family of currently installed plugin version
Else, get the highest hot-fix for requested version,
"""
self.logger.info("Decide which version to use")
try:
pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler)
except ProtocolError as e:
raise ExtensionError("Failed to get ext handler pkgs", e)
version = self.ext_handler.properties.version
update_policy = self.ext_handler.properties.upgradePolicy
version_frag = version.split('.')
if len(version_frag) < 2:
raise ExtensionError("Wrong version format: {0}".format(version))
version_prefix = None
if update_policy is not None and update_policy == 'auto':
version_prefix = "{0}.".format(version_frag[0])
else:
version_prefix = "{0}.{1}.".format(version_frag[0], version_frag[1])
packages = [x for x in pkg_list.versions \
if x.version.startswith(version_prefix) or \
x.version == version]
packages = sorted(packages, key=lambda x: Version(x.version),
reverse=True)
if len(packages) <= 0:
raise ExtensionError("Failed to find and valid extension package")
self.pkg = packages[0]
self.ext_handler.properties.version = packages[0].version
self.logger.info("Use version: {0}", self.pkg.version)
def version_gt(self, other):
self_version = self.ext_handler.properties.version
other_version = other.ext_handler.properties.version
return Version(self_version) > Version(other_version)
def get_installed_ext_handler(self):
lastest_version = None
ext_handler_name = self.ext_handler.name
for dir_name in os.listdir(conf.get_lib_dir()):
path = os.path.join(conf.get_lib_dir(), dir_name)
if os.path.isdir(path) and dir_name.startswith(ext_handler_name):
seperator = dir_name.rfind('-')
if seperator < 0:
continue
installed_name = dir_name[0: seperator]
installed_version = dir_name[seperator + 1:]
if installed_name != ext_handler_name:
continue
if lastest_version is None or \
Version(lastest_version) < Version(installed_version):
lastest_version = installed_version
if lastest_version is None:
return None
data = get_properties(self.ext_handler)
old_ext_handler = ExtHandler()
set_properties("ExtHandler", old_ext_handler, data)
old_ext_handler.properties.version = lastest_version
return ExtHandlerInstance(old_ext_handler, self.protocol)
def copy_status_files(self, old_ext_handler_i):
self.logger.info("Copy status files from old plugin to new")
old_ext_dir = old_ext_handler_i.get_base_dir()
new_ext_dir = self.get_base_dir()
old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq")
if os.path.isfile(old_ext_mrseq_file):
shutil.copy2(old_ext_mrseq_file, new_ext_dir)
old_ext_status_dir = old_ext_handler_i.get_status_dir()
new_ext_status_dir = self.get_status_dir()
if os.path.isdir(old_ext_status_dir):
for status_file in os.listdir(old_ext_status_dir):
status_file = os.path.join(old_ext_status_dir, status_file)
if os.path.isfile(status_file):
shutil.copy2(status_file, new_ext_status_dir)
def set_operation(self, op):
self.operation = op
def report_event(self, message="", is_success=True):
version = self.ext_handler.properties.version
add_event(name=self.ext_handler.name, version=version, message=message,
op=self.operation, is_success=is_success)
def download(self):
self.logger.info("Download extension package")
self.set_operation(WALAEventOperation.Download)
if self.pkg is None:
raise ExtensionError("No package uri found")
package = None
for uri in self.pkg.uris:
try:
package = self.protocol.download_ext_handler_pkg(uri.uri)
except ProtocolError as e:
logger.warn("Failed download extension: {0}", e)
if package is None:
raise ExtensionError("Failed to download extension")
self.logger.info("Unpack extension package")
pkg_file = os.path.join(conf.get_lib_dir(),
os.path.basename(uri.uri) + ".zip")
try:
fileutil.write_file(pkg_file, bytearray(package), asbin=True)
zipfile.ZipFile(pkg_file).extractall(self.get_base_dir())
except IOError as e:
raise ExtensionError(u"Failed to write and unzip plugin", e)
chmod = "find {0} -type f | xargs chmod u+x".format(self.get_base_dir())
shellutil.run(chmod)
self.report_event(message="Download succeeded")
self.logger.info("Initialize extension directory")
#Save HandlerManifest.json
man_file = fileutil.search_file(self.get_base_dir(),
'HandlerManifest.json')
if man_file is None:
raise ExtensionError("HandlerManifest.json not found")
try:
man = fileutil.read_file(man_file, remove_bom=True)
fileutil.write_file(self.get_manifest_file(), man)
except IOError as e:
raise ExtensionError(u"Failed to save HandlerManifest.json", e)
#Create status and config dir
try:
status_dir = self.get_status_dir()
fileutil.mkdir(status_dir, mode=0o700)
conf_dir = self.get_conf_dir()
fileutil.mkdir(conf_dir, mode=0o700)
except IOError as e:
raise ExtensionError(u"Failed to create status or config dir", e)
#Save HandlerEnvironment.json
self.create_handler_env()
def enable(self):
self.logger.info("Enable extension.")
self.set_operation(WALAEventOperation.Enable)
man = self.load_manifest()
self.launch_command(man.get_enable_command())
self.set_handler_state(ExtHandlerState.Enabled)
self.set_handler_status(status="Ready", message="Plugin enabled")
def disable(self):
self.logger.info("Disable extension.")
self.set_operation(WALAEventOperation.Disable)
man = self.load_manifest()
self.launch_command(man.get_disable_command(), timeout=900)
self.set_handler_state(ExtHandlerState.Installed)
self.set_handler_status(status="NotReady", message="Plugin disabled")
def install(self):
self.logger.info("Install extension.")
self.set_operation(WALAEventOperation.Install)
man = self.load_manifest()
self.launch_command(man.get_install_command(), timeout=900)
self.set_handler_state(ExtHandlerState.Installed)
def uninstall(self):
self.logger.info("Uninstall extension.")
self.set_operation(WALAEventOperation.UnInstall)
try:
man = self.load_manifest()
self.launch_command(man.get_uninstall_command())
except ExtensionError as e:
self.report_event(message=ustr(e), is_success=False)
def rm_ext_handler_dir(self):
try:
handler_state_dir = self.get_handler_state_dir()
if os.path.isdir(handler_state_dir):
self.logger.info("Remove ext handler dir: {0}", handler_state_dir)
shutil.rmtree(handler_state_dir)
base_dir = self.get_base_dir()
if os.path.isdir(base_dir):
self.logger.info("Remove ext handler dir: {0}", base_dir)
shutil.rmtree(base_dir)
except IOError as e:
message = "Failed to rm ext handler dir: {0}".format(e)
self.report_event(message=message, is_success=False)
def update(self):
self.logger.info("Update extension.")
self.set_operation(WALAEventOperation.Update)
man = self.load_manifest()
self.launch_command(man.get_update_command(), timeout=900)
def update_with_install(self):
man = self.load_manifest()
if man.is_update_with_install():
self.install()
else:
self.logger.info("UpdateWithInstall not set. "
"Skip install during upgrade.")
self.set_handler_state(ExtHandlerState.Installed)
def get_largest_seq_no(self):
seq_no = -1
conf_dir = self.get_conf_dir()
for item in os.listdir(conf_dir):
item_path = os.path.join(conf_dir, item)
if os.path.isfile(item_path):
try:
seperator = item.rfind(".")
if seperator > 0 and item[seperator + 1:] == 'settings':
curr_seq_no = int(item.split('.')[0])
if curr_seq_no > seq_no:
seq_no = curr_seq_no
except Exception as e:
self.logger.verb("Failed to parse file name: {0}", item)
continue
return seq_no
def collect_ext_status(self, ext):
self.logger.verb("Collect extension status")
seq_no = self.get_largest_seq_no()
if seq_no == -1:
return None
status_dir = self.get_status_dir()
ext_status_file = "{0}.status".format(seq_no)
ext_status_file = os.path.join(status_dir, ext_status_file)
ext_status = ExtensionStatus(seq_no=seq_no)
try:
data_str = fileutil.read_file(ext_status_file)
data = json.loads(data_str)
parse_ext_status(ext_status, data)
except IOError as e:
ext_status.message = u"Failed to get status file {0}".format(e)
ext_status.code = -1
ext_status.status = "error"
except ValueError as e:
ext_status.message = u"Malformed status file {0}".format(e)
ext_status.code = -1
ext_status.status = "error"
return ext_status
def report_ext_status(self):
active_exts = []
for ext in self.ext_handler.properties.extensions:
ext_status = self.collect_ext_status(ext)
if ext_status is None:
continue
try:
self.protocol.report_ext_status(self.ext_handler.name, ext.name,
ext_status)
active_exts.append(ext.name)
except ProtocolError as e:
self.logger.error(u"Failed to report extension status: {0}", e)
return active_exts
def collect_heartbeat(self):
man = self.load_manifest()
if not man.is_report_heartbeat():
return
heartbeat_file = os.path.join(conf.get_lib_dir(),
self.get_heartbeat_file())
self.logger.info("Collect heart beat")
if not os.path.isfile(heartbeat_file):
raise ExtensionError("Failed to get heart beat file")
if not self.is_responsive(heartbeat_file):
return {
"status": "Unresponsive",
"code": -1,
"message": "Extension heartbeat is not responsive"
}
try:
heartbeat_json = fileutil.read_file(heartbeat_file)
heartbeat = json.loads(heartbeat_json)[0]['heartbeat']
except IOError as e:
raise ExtensionError("Failed to get heartbeat file:{0}".format(e))
except ValueError as e:
raise ExtensionError("Malformed heartbeat file: {0}".format(e))
return heartbeat
def is_responsive(self, heartbeat_file):
last_update=int(time.time() - os.stat(heartbeat_file).st_mtime)
return last_update > 600 # not updated for more than 10 min
def launch_command(self, cmd, timeout=300):
self.logger.info("Launch command:{0}", cmd)
base_dir = self.get_base_dir()
try:
devnull = open(os.devnull, 'w')
child = subprocess.Popen(base_dir + "/" + cmd, shell=True,
cwd=base_dir, stdout=devnull)
except Exception as e:
#TODO do not catch all exception
raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e))
retry = timeout / 5
while retry > 0 and child.poll == None:
time.sleep(5)
retry -= 1
if retry == 0:
os.kill(child.pid, 9)
raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd))
ret = child.wait()
if ret == None or ret != 0:
raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd))
self.report_event(message="Launch command succeeded: {0}".format(cmd))
def load_manifest(self):
man_file = self.get_manifest_file()
try:
data = json.loads(fileutil.read_file(man_file))
except IOError as e:
raise ExtensionError('Failed to load manifest file.')
except ValueError as e:
raise ExtensionError('Malformed manifest file.')
return HandlerManifest(data[0])
def update_settings_file(self, settings_file, settings):
settings_file = os.path.join(self.get_conf_dir(), settings_file)
try:
fileutil.write_file(settings_file, settings)
except IOError as e:
raise ExtensionError(u"Failed to update settings file", e)
def update_settings(self):
if self.ext_handler.properties.extensions is None or \
len(self.ext_handler.properties.extensions) == 0:
#This is the behavior of waagent 2.0.x
#The new agent has to be consistent with the old one.
self.logger.info("Extension has no settings, write empty 0.settings")
self.update_settings_file("0.settings", "")
return
for ext in self.ext_handler.properties.extensions:
settings = {
'publicSettings': ext.publicSettings,
'protectedSettings': ext.protectedSettings,
'protectedSettingsCertThumbprint': ext.certificateThumbprint
}
ext_settings = {
"runtimeSettings":[{
"handlerSettings": settings
}]
}
settings_file = "{0}.settings".format(ext.sequenceNumber)
self.logger.info("Update settings file: {0}", settings_file)
self.update_settings_file(settings_file, json.dumps(ext_settings))
def create_handler_env(self):
env = [{
"name": self.ext_handler.name,
"version" : HANDLER_ENVIRONMENT_VERSION,
"handlerEnvironment" : {
"logFolder" : self.get_log_dir(),
"configFolder" : self.get_conf_dir(),
"statusFolder" : self.get_status_dir(),
"heartbeatFile" : self.get_heartbeat_file()
}
}]
try:
fileutil.write_file(self.get_env_file(), json.dumps(env))
except IOError as e:
raise ExtensionError(u"Failed to save handler environment", e)
def get_handler_state_dir(self):
return os.path.join(conf.get_lib_dir(), "handler_state",
self.get_full_name())
def set_handler_state(self, handler_state):
state_dir = self.get_handler_state_dir()
if not os.path.exists(state_dir):
try:
fileutil.mkdir(state_dir, 0o700)
except IOError as e:
self.logger.error("Failed to create state dir: {0}", e)
try:
state_file = os.path.join(state_dir, "state")
fileutil.write_file(state_file, handler_state)
except IOError as e:
self.logger.error("Failed to set state: {0}", e)
def get_handler_state(self):
state_dir = self.get_handler_state_dir()
state_file = os.path.join(state_dir, "state")
if not os.path.isfile(state_file):
return ExtHandlerState.NotInstalled
try:
return fileutil.read_file(state_file)
except IOError as e:
self.logger.error("Failed to get state: {0}", e)
return ExtHandlerState.NotInstalled
def set_handler_status(self, status="NotReady", message="",
code=0):
state_dir = self.get_handler_state_dir()
if not os.path.exists(state_dir):
try:
fileutil.mkdir(state_dir, 0o700)
except IOError as e:
self.logger.error("Failed to create state dir: {0}", e)
handler_status = ExtHandlerStatus()
handler_status.name = self.ext_handler.name
handler_status.version = self.ext_handler.properties.version
handler_status.message = message
handler_status.code = code
handler_status.status = status
status_file = os.path.join(state_dir, "status")
try:
fileutil.write_file(status_file,
json.dumps(get_properties(handler_status)))
except (IOError, ValueError, ProtocolError) as e:
self.logger.error("Failed to save handler status: {0}", e)
def get_handler_status(self):
state_dir = self.get_handler_state_dir()
status_file = os.path.join(state_dir, "status")
if not os.path.isfile(status_file):
return None
try:
data = json.loads(fileutil.read_file(status_file))
handler_status = ExtHandlerStatus()
set_properties("ExtHandlerStatus", handler_status, data)
return handler_status
except (IOError, ValueError) as e:
self.logger.error("Failed to get handler status: {0}", e)
def get_full_name(self):
return "{0}-{1}".format(self.ext_handler.name,
self.ext_handler.properties.version)
def get_base_dir(self):
return os.path.join(conf.get_lib_dir(), self.get_full_name())
def get_status_dir(self):
return os.path.join(self.get_base_dir(), "status")
def get_conf_dir(self):
return os.path.join(self.get_base_dir(), 'config')
def get_heartbeat_file(self):
return os.path.join(self.get_base_dir(), 'heartbeat.log')
def get_manifest_file(self):
return os.path.join(self.get_base_dir(), 'HandlerManifest.json')
def get_env_file(self):
return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json')
def get_log_dir(self):
return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name,
self.ext_handler.properties.version)
class HandlerEnvironment(object):
def __init__(self, data):
self.data = data
def get_version(self):
return self.data["version"]
def get_log_dir(self):
return self.data["handlerEnvironment"]["logFolder"]
def get_conf_dir(self):
return self.data["handlerEnvironment"]["configFolder"]
def get_status_dir(self):
return self.data["handlerEnvironment"]["statusFolder"]
def get_heartbeat_file(self):
return self.data["handlerEnvironment"]["heartbeatFile"]
class HandlerManifest(object):
def __init__(self, data):
if data is None or data['handlerManifest'] is None:
raise ExtensionError('Malformed manifest file.')
self.data = data
def get_name(self):
return self.data["name"]
def get_version(self):
return self.data["version"]
def get_install_command(self):
return self.data['handlerManifest']["installCommand"]
def get_uninstall_command(self):
return self.data['handlerManifest']["uninstallCommand"]
def get_update_command(self):
return self.data['handlerManifest']["updateCommand"]
def get_enable_command(self):
return self.data['handlerManifest']["enableCommand"]
def get_disable_command(self):
return self.data['handlerManifest']["disableCommand"]
def is_reboot_after_install(self):
"""
Deprecated
"""
return False
def is_report_heartbeat(self):
return self.data['handlerManifest'].get('reportHeartbeat', False)
def is_update_with_install(self):
update_mode = self.data['handlerManifest'].get('updateMode')
if update_mode is None:
return True
return update_mode.low() == "updatewithinstall"
|
#!/usr/bin/python3
# Scenario based on test : [2.5]-Vote-test
import os
import sys
import time
import datetime
currentdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(currentdir)))
from beos_test_utils.beos_utils_pack import init, ActionResult, ResourceResult, VotersResult
from beos_test_utils.utility_misc import *
if __name__ == "__main__":
try:
node, summary, args, log = init(__file__)
accounts_small_set = []
accounts_big_set = []
nr_system_producers = 10
nr_accounts_small_set = 10
nr_accounts_big_set = 40
producers = node.create_producers( nr_system_producers, "1.0000 PXBTS" )
node.run_node()
#Changeparams
newparams = {
"beos" : {
"starting_block" : 5,
"next_block" : 0,
"ending_block" : 200000,
"block_interval" : 5,
"trustee_reward" : 0
},
"ram" : {
"starting_block" : 5,
"next_block" : 0,
"ending_block" : 200000,
"block_interval" : 5,
"trustee_reward" : 0
},
"proxy_assets" : [ "0.0000 PXBTS"],
"ram_leftover" : 3000000,
"starting_block_for_initial_witness_election":10
}
node.changeparams( newparams )
node.wait_till_block( 6 )
#Actions
threads = 10
is_creation = True
is_reg_producer = False
is_vote_producer = False
is_unreg_producer = False
info = Info( is_creation, accounts_small_set, nr_accounts_small_set, is_reg_producer, is_vote_producer )
execute( node, summary, worker_creator, threads, info, log )
node.wait_n_blocks( 2 )
info = Info( is_creation, accounts_big_set, nr_accounts_big_set, is_reg_producer, is_vote_producer )
execute( node, summary, worker_creator, threads, info, log )
node.wait_n_blocks( 2 )
is_creation = False
is_reg_producer = True
info = Info( is_creation, accounts_small_set, nr_accounts_small_set, is_reg_producer, is_vote_producer )
execute( node, summary, worker_operation, threads, info, log )
node.wait_n_blocks( 2 )
info2 = Info( is_creation, producers, nr_system_producers, is_reg_producer, is_vote_producer )
execute( node, summary, worker_operation, threads, info2, log )
node.wait_n_blocks( 2 )
is_reg_producer = False
is_unreg_producer = True
info = Info( is_creation, accounts_small_set, nr_accounts_small_set, is_reg_producer, is_vote_producer, is_unreg_producer )
execute( node, summary, worker_operation, threads, info, log )
node.wait_n_blocks( 2 )
info2 = Info( is_creation, producers, nr_system_producers, is_reg_producer, is_vote_producer, is_unreg_producer )
execute( node, summary, worker_operation, threads, info2, log )
node.wait_n_blocks( 2 )
is_vote_producer = True
is_unreg_producer = False
is_failed = True
fail_message = "producer is not currently registered"
info = Info( is_creation, producers, nr_system_producers, is_reg_producer, is_vote_producer, is_unreg_producer, is_failed, fail_message )
execute( node, summary, worker_operation, threads, info, log )
node.wait_n_blocks( 10 )
rpc_nr_producers, rpc_nr_voted_producers = get_producers_stats( node, log )
what = "Incorrect number of producents: rpc: %s created: %s \n" % ( rpc_nr_producers, nr_accounts_small_set + nr_system_producers + 1 )
summary.equal( rpc_nr_producers, nr_system_producers + nr_accounts_small_set + 1, what )
what = "Incorrect number of voted producents: rpc: %s voted: %s \n" % ( rpc_nr_voted_producers, nr_system_producers )
summary.equal( rpc_nr_voted_producers, 0, what )
except Exception as _ex:
log.exception("Exception `{0}` occures while executing `{1}` tests.".format(str(_ex), __file__))
finally:
summary_status = summary.summarize()
node.stop_node()
exit(summary_status) |
<reponame>CharlesDDNoble/broncode
from codeclient import CodeClient
import json
class Trial():
def from_json(self,json_string):
return json.loads(json_string)
def to_json(self):
return json.dumps(self.to_dict())
def to_dict(self):
return {"is_success" : self.is_success,
"test_time" : self.test_time,
"out" : self.out,
"exp" : self.exp,
"handler" : self.handler.to_dict(),
"failure_reason" : self.failure_reason}
def __init__(self,test_time=0,out='',exp='',timeout_response='',handler=None):
self.is_success = (out == exp)
self.test_time = test_time
self.out = out
self.exp = exp
self.handler = handler
if self.is_success:
self.failure_reason = "None"
elif self.out == timeout_response:
self.failure_reason = "Timeout"
else:
self.failure_reason = "Wrong"
def print(self):
print("is_success: "+str(self.is_success))
print("test_time: "+str(self.test_time))
print("out: "+self.out.replace("\n","\\n"))
print("exp: "+self.exp.replace("\n","\\n"))
print("handler: "+str(self.handler.to_dict() if self.handler else str(None)))
# def get_data_string(self):
# return str(self.is_success) + "," \
# + str(self.test_time) + "," \
# + str("\'"+self.out.replace("\n","\\n")+"\'") + "," \
# + str("\'"+self.exp.replace("\n","\\n")+"\'") + "," \
# + str(self.failure_reason) + "," \
# + str(self.handler.run_time) + "," \
# + str(self.handler.send_time) + "," \
# + str(self.handler.recv_time) + "," \
# + str(self.handler.conn_attempt)
class Test():
def log_test(self,file_name,should_append=True):
mode = "a" if should_append else "w"
with open(file_name,mode) as f:
# print(test_name+"\n")
f.write((self.to_json())+"\n")
def from_json(self,json_string):
return json.loads(json_string)
def to_json(self):
return json.dumps(self.to_dict())
def from_dict(self,test_dict):
return Test(test_dict["name"],
test_dict["test_type"],
test_dict["count"],
test_dict["interval"],
test_dict["time_limit"],
test_dict["trials"])
def to_dict(self):
# trials_dicts = [trial.to_dict() for trial in test.trials]
#convert all trials into a {index:trial} dictionary
trials_dict = { i : self.trials[i].to_dict() for i in range(0, len(self.trials) ) }
return {"name" : self.name,
"test_type" : self.test_type,
"count" : self.count,
"interval" : self.interval,
"time_limit" : self.time_limit,
"trials" : trials_dict}
def __init__(self,name,test_type,count,interval,time_limit,trials = []):
self.name = name
self.test_type = test_type
self.count = count
self.interval = interval
self.time_limit = time_limit
self.trials = trials
def get_success_rate(self):
total = len(self.trials)
success = 0
for trial in self.trials:
if trial.is_success:
success += 1
return success/total
# def main():
# if __name__ == "__main__":
# main()
|
<filename>tests/test_accounts.py
import pykazoo.accounts
import pykazoo.restrequest
from unittest import TestCase
from unittest.mock import create_autospec
mock_rest_request = create_autospec(pykazoo.restrequest.RestRequest)
class TestAccounts(TestCase):
def setUp(self):
self.mock_rest_request = mock_rest_request
self.accounts = pykazoo.accounts.Accounts(
self.mock_rest_request)
self.account_id = '<KEY>'
self.data = {'test': 'data'}
self.params = {'test': 'params'}
def test_get_account_request_call(self):
self.accounts.get_account(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id,
self.params)
def test_get_account_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.accounts.get_account(self.account_id, self.params)
assert return_data is self.data
def test_get_account_children_request_call(self):
self.accounts.get_account_children(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/children',
self.params)
def test_get_account_children_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.accounts.get_account_children(self.account_id,
self.params)
assert return_data is self.data
def test_get_account_descendants_request_call(self):
self.accounts.get_account_descendants(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/descendants',
self.params)
def test_get_account_descendants_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.accounts.get_account_descendants(self.account_id,
self.params)
assert return_data is self.data
def test_get_account_siblings_request_call(self):
self.accounts.get_account_siblings(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/siblings',
self.params)
def test_get_account_siblings_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.accounts.get_account_siblings(self.account_id,
self.params)
assert return_data is self.data
def test_create_sub_account_request_call(self):
self.accounts.create_sub_account(self.account_id, self.data)
self.mock_rest_request.put.assert_called_with('accounts/' +
self.account_id,
self.data)
def test_create_sub_account_returns_dict(self):
self.mock_rest_request.put.return_value = self.data
return_data = self.accounts.create_sub_account(self.account_id,
self.data)
assert return_data is self.data
def test_update_account_request_call(self):
self.accounts.update_account(self.account_id, self.data)
self.mock_rest_request.post.assert_called_with('accounts/' +
self.account_id,
self.data)
def test_update_account_returns_dict(self):
self.mock_rest_request.post.return_value = self.data
return_data = self.accounts.update_account(self.account_id,
self.data)
assert return_data is self.data
def test_delete_account_request_call(self):
self.accounts.delete_account(self.account_id)
self.mock_rest_request.delete.assert_called_with('accounts/' +
self.account_id)
def test_delete_account_returns_dict(self):
self.mock_rest_request.delete.return_value = self.data
return_data = self.accounts.delete_account(self.account_id)
assert return_data is self.data
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.kythe.tasks.indexable_java_targets import IndexableJavaTargets
class IndexJava(NailgunTask):
_KYTHE_INDEXER_MAIN = 'com.google.devtools.kythe.analyzers.java.JavaIndexer'
cache_target_dirs = True
@classmethod
def implementation_version(cls):
# Bump this version to invalidate all past artifacts generated by this task.
return super(IndexJava, cls).implementation_version() + [('IndexJava', 6), ]
@classmethod
def product_types(cls):
return ['kythe_entries_files']
@classmethod
def prepare(cls, options, round_manager):
super(IndexJava, cls).prepare(options, round_manager)
round_manager.require_data('kindex_files')
@classmethod
def register_options(cls, register):
super(IndexJava, cls).register_options(register)
register('--force', type=bool, fingerprint=True,
help='Re-index all targets, even if they are valid.',
removal_version='1.6.0.dev0', removal_hint='Use --cache-ignore instead.')
cls.register_jvm_tool(register,
'kythe-indexer',
main=cls._KYTHE_INDEXER_MAIN)
def execute(self):
def entries_file(_vt):
return os.path.join(_vt.results_dir, 'index.entries')
indexable_targets = IndexableJavaTargets.get(self.context)
with self.invalidated(indexable_targets, invalidate_dependents=True) as invalidation_check:
kindex_files = self.context.products.get_data('kindex_files')
# TODO(<NAME>): `vts_to_index` should be inlined to `invalidation_check.invalid_vts`
# when the deprecation cycle for `--force` is completed.
vts_to_index = (invalidation_check.all_vts if self.get_options().force
else invalidation_check.invalid_vts)
indexer_cp = self.tool_classpath('kythe-indexer')
# Kythe jars embed a copy of Java 9's com.sun.tools.javac and javax.tools, for use on JDK8.
# We must put these jars on the bootclasspath, ahead of any others, to ensure that we load
# the Java 9 versions, and not the runtime's versions.
jvm_options = ['-Xbootclasspath/p:{}'.format(':'.join(indexer_cp))]
jvm_options.extend(self.get_options().jvm_options)
for vt in vts_to_index:
self.context.log.info('Kythe indexing {}'.format(vt.target.address.spec))
kindex_file = kindex_files.get(vt.target)
if not kindex_file:
raise TaskError('No .kindex file found for {}'.format(vt.target.address.spec))
args = [kindex_file, '--out', entries_file(vt)]
result = self.runjava(classpath=indexer_cp, main=self._KYTHE_INDEXER_MAIN,
jvm_options=jvm_options,
args=args, workunit_name='kythe-index',
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('java {main} ... exited non-zero ({result})'.format(
main=self._KYTHE_INDEXER_MAIN, result=result))
for vt in invalidation_check.all_vts:
self.context.products.get_data('kythe_entries_files', dict)[vt.target] = entries_file(vt)
|
<filename>Codes/Inference_NetworkConfiguration.py
# Copyright 2019 DIVERSIS Software. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
def get_scope_variable(scope, var, shape=None,initializer=None):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
v = tf.get_variable(var, initializer=initializer(shape),trainable=True)
return v
def ConstructInferenceNetwork(InputSize,batchSize,layerOutDimSize,kernelSize,strideSize,poolKernelSize,poolSize,IMAGE_SIZE):
# Generate Placeholders
inputFramePlaceHolder = tf.placeholder(tf.float32, shape=[batchSize, InputSize[0], InputSize[1], InputSize[2]],name='inputFramePlaceHolder')
labelPlaceHolder = tf.placeholder(tf.float32, shape=[batchSize, layerOutDimSize[-1]],name='labelPlaceHolder')
dropoutInputPlaceHolder = tf.placeholder(tf.float32, shape=[],name='dropoutInputPlaceHolder')
dropoutPoolPlaceHolder = tf.placeholder(tf.float32, shape=[len(layerOutDimSize)],name='dropoutPoolPlaceHolder')
inputDistortionPlaceholder = tf.placeholder(tf.bool, shape=[],name='inputDistortionPlaceholder')
# Construct distorted input if desired
inputFramePlaceHolderResized = [tf.reshape(distorted_inputs(inputFramePlaceHolder[i,:,:,:],IMAGE_SIZE,inputDistortionPlaceholder),[1,IMAGE_SIZE,IMAGE_SIZE,InputSize[2]]) for i in range(inputFramePlaceHolder.shape[0])]
inputFramePlaceHolderResized = tf.concat(inputFramePlaceHolderResized,axis=0)
# Construct inference network structure
layerSize = len(layerOutDimSize)
# Apply input dropout
inputFrame = tf.nn.dropout(inputFramePlaceHolderResized,dropoutInputPlaceHolder,name="InputDropout")
latentOut = tf.multiply(inputFrame,1.0)
for lIndx in range(layerSize):
print("**************** Layer-%d ****************"%lIndx)
print("Input Tensor: ")
print(latentOut)
inputDim = latentOut.get_shape().as_list()
outputDim = layerOutDimSize[lIndx]
# if kernel is convolution
if len(kernelSize[lIndx]) > 1:
shapeW = [kernelSize[lIndx][0],kernelSize[lIndx][1],inputDim[-1],outputDim]
else:
# kernel is FC
if len(inputDim) == 4:
shapeW = [inputDim[1]*inputDim[2]*inputDim[3],outputDim]
else:
shapeW = [inputDim[-1],outputDim]
weight = get_scope_variable('Layer-%d'%lIndx, 'Weight', shape=shapeW,initializer=tf.contrib.layers.xavier_initializer())
bias = get_scope_variable('Layer-%d'%lIndx, 'Bias', shape=[outputDim],initializer=tf.zeros_initializer())
print("Weight: ")
print(weight)
print("Bias: ")
print(bias)
# Construct layer
lastLayer = (lIndx == (layerSize-1))
latentOut = ConstructLayer(latentOut,weight,bias,strideSize[lIndx],'Layer-%d-OP'%lIndx,dropoutPoolPlaceHolder[lIndx],poolKernelSize[lIndx],poolSize[lIndx],lastLayer)
print("Output Tensor: ")
print(latentOut)
print("******************************************")
# Compute prediction metric
softMaxOut = tf.nn.softmax(logits=latentOut,name="softMaxOut")
correct_prediction = tf.equal(tf.argmax(softMaxOut,1,name="Argmax_softMaxOut"), tf.argmax(labelPlaceHolder,1,name="Argmax_Label"),name="CorrectPrediction")
# Compute accuracy metric
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32,name="Cast_Accuracy"),name="Accuracy")
placeHolders = {'inputFramePlaceHolder':inputFramePlaceHolder, 'labelPlaceHolder':labelPlaceHolder, 'dropoutInputPlaceHolder':dropoutInputPlaceHolder, 'dropoutPoolPlaceHolder':dropoutPoolPlaceHolder, 'inputDistortionPlaceholder':inputDistortionPlaceholder}
return latentOut,accuracy,placeHolders
def ConstructLayer(layerInput,weight,bias,strideSize,nameScope,dropoutPoolPlaceHolder,poolKernelSize,poolSize,lastLayer):
convSize = weight.get_shape().as_list()
with tf.name_scope(nameScope):
if len(convSize) == 4:
outOp = tf.nn.conv2d(layerInput, weight, strides=[1,strideSize,strideSize,1], padding='SAME',name="ConvOP")
else:
layerInputFC = tf.reshape(layerInput,(layerInput.shape[0],-1))
outOp = tf.matmul(layerInputFC, weight,name="MatMul")
if poolSize > 1:
outOp = tf.nn.max_pool(outOp, ksize=[1, poolKernelSize[0], poolKernelSize[1], 1], strides=[1, poolSize, poolSize, 1],padding='SAME', name='pool')
layerOutput = tf.add(outOp,bias,name="BiasAdd")
if lastLayer == False:
layerOutput = tf.nn.dropout(layerOutput,dropoutPoolPlaceHolder)
layerOutput = tf.nn.relu(layerOutput)
return layerOutput
def ConstructOptimizer(output,labelPlaceHolder,momentum,weightDecay=None):
learningRatePlaceHolder = tf.placeholder(tf.float32, shape=[],name='learningRatePlaceHolder')
# Compute l2Loss
if weightDecay is not None:
l2LossList = [tf.nn.l2_loss(var) for var in tf.trainable_variables()]
l2Loss = tf.multiply(tf.add_n(l2LossList),weightDecay)
else:
l2Loss = tf.zeros(shape=[])
# Compute coross entropy loss
crossEntropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labelPlaceHolder, logits=output),name="CrossEntropy")
# Compute totLoss used for training and accuracy metric
totLoss = tf.add_n([crossEntropy,l2Loss])
# Generate optimizer operation
with tf.variable_scope('Momentum-0', reuse=tf.AUTO_REUSE):
train_step = tf.train.MomentumOptimizer(learning_rate=learningRatePlaceHolder, momentum=momentum).minimize(totLoss)
return train_step,totLoss,l2Loss,learningRatePlaceHolder
def GetTestAccuracy(sess,accuracyOp,data,labels,testBatchSize,frameBufferForTest,inputFramePlaceHolder,inputDistortionPlaceholder,labelPlaceHolder,dropoutInputPlaceHolder,dropoutPoolPlaceHolder):
dataLen = data.shape[3]
iternum = int(dataLen / testBatchSize)
batchLabels = np.zeros((testBatchSize,labels.shape[0]))
accuracy = 0
for i in range(iternum):
for j in range(testBatchSize):
frameBufferForTest[j,:,:,:] = data[:,:,:,i*testBatchSize+j]
batchLabels[j,:] = labels[:,i*testBatchSize+j]
# Determine feed_dict for testing accuracy
feed_latent = {inputFramePlaceHolder:frameBufferForTest, inputDistortionPlaceholder:False, labelPlaceHolder:batchLabels, dropoutInputPlaceHolder:1.0, dropoutPoolPlaceHolder:np.ones((dropoutPoolPlaceHolder.shape[0]))}
classiferAccuracyVal = sess.run(accuracyOp,feed_dict = feed_latent)
accuracy += classiferAccuracyVal / iternum
return accuracy
# We changed the files "distorted_inputs" in "https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py"
# to generate "distorted_inputs" used in this file.
def distorted_inputs(image,imSize,inputDistortionPlaceholder):
with tf.name_scope('data_augmentation'):
height = imSize
width = imSize
if inputDistortionPlaceholder == True:
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image = tf.image.random_brightness(distorted_image,max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, image.shape[-1]])
else:
float_image = inputs_test(image,imSize)
# Generate a batch of images and labels by building up a queues of examples.
return float_image
# We changed the files "inputs" in "https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py"
# to generate "inputs_test" used in this file.
def inputs_test(image,imSize):
height = imSize
width = imSize
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(image,height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, image.shape[-1]])
# Generate a batch of images and labels by building up a queue of examples.
return float_image
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module tests the `cros branch` command."""
from __future__ import print_function
import os
import sys
import mock
from chromite.cbuildbot.manifest_version import VersionInfo
from chromite.cli import command_unittest
from chromite.cli.cros.cros_branch import Branch
from chromite.cli.cros.cros_branch import BranchCommand
from chromite.cli.cros.cros_branch import BranchError
from chromite.cli.cros.cros_branch import CanBranchProject
from chromite.cli.cros.cros_branch import CanPinProject
from chromite.cli.cros.cros_branch import CrosCheckout
from chromite.cli.cros.cros_branch import FactoryBranch
from chromite.cli.cros.cros_branch import FirmwareBranch
from chromite.cli.cros.cros_branch import ManifestRepository
from chromite.cli.cros.cros_branch import ProjectBranch
from chromite.cli.cros.cros_branch import ReleaseBranch
from chromite.cli.cros.cros_branch import StabilizeBranch
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import repo_manifest
from chromite.lib import repo_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def FileUrl(*args):
"""Map path components to a qualified local URL."""
return 'file://%s' % os.path.join(*args)
def ManifestXml(*args):
"""Joins arbitrary XML and wraps it in a <manifest> element."""
xml = '\n'.join(args)
return '<?xml version="1.0" encoding="UTF-8"?><manifest>%s</manifest>' % xml
def AsAttrDict(*args):
"""Create AttrDict from string values, indexed by CAPS_CASE value."""
return config_lib.AttrDict({v.upper().replace('-', '_'): v for v in args})
# A "project" in this dictionary is actually a project ID, which
# is used by helper functions to generate project name/path/revision/etc.
# If you add a project to this list, remember to update the categories below
# as well as PROJECTS_EXTERNAL_XML and its internal equivalent.
PROJECTS = AsAttrDict('manifest', 'manifest-internal', 'chromiumos-overlay',
'multicheckout-a', 'multicheckout-b', 'implicit-pinned',
'explicit-tot', 'explicit-branch', 'explicit-pinned',
'non-default-group')
# Categorize the projects above for use in testing.
PINNED_PROJECTS = (PROJECTS.EXPLICIT_PINNED, PROJECTS.IMPLICIT_PINNED)
TOT_PROJECTS = (PROJECTS.EXPLICIT_TOT,)
MULTI_CHECKOUT_PROJECTS = (PROJECTS.MULTICHECKOUT_A, PROJECTS.MULTICHECKOUT_B)
SINGLE_CHECKOUT_PROJECTS = (PROJECTS.CHROMIUMOS_OVERLAY,
PROJECTS.EXPLICIT_BRANCH, PROJECTS.MANIFEST,
PROJECTS.MANIFEST_INTERNAL,
PROJECTS.NON_DEFAULT_GROUP)
BRANCHED_PROJECTS = SINGLE_CHECKOUT_PROJECTS + MULTI_CHECKOUT_PROJECTS
NON_BRANCHED_PROJECTS = PINNED_PROJECTS + TOT_PROJECTS
MANIFEST_PROJECTS = (PROJECTS.MANIFEST, PROJECTS.MANIFEST_INTERNAL)
EXTERNAL_PROJECTS = (PROJECTS.MANIFEST, PROJECTS.CHROMIUMOS_OVERLAY,
PROJECTS.IMPLICIT_PINNED, PROJECTS.MULTICHECKOUT_A,
PROJECTS.MULTICHECKOUT_B)
INTERNAL_PROJECTS = (PROJECTS.MANIFEST_INTERNAL, PROJECTS.EXPLICIT_TOT,
PROJECTS.EXPLICIT_BRANCH, PROJECTS.EXPLICIT_PINNED)
# Define remotes. There is a public and an internal remote.
REMOTES = AsAttrDict('cros', 'cros-internal')
# Store commonly used values for convenience.
EXTERNAL_FILE_NAME = 'external.xml'
INTERNAL_FILE_NAME = 'internal.xml'
REMOTES_FILE_NAME = '_remotes.xml'
# Create the raw XML based on the above data. Note that by convention,
# the leaf directory of the project path MUST end with the project ID.
DEFAULT_XML = """
<default revision="refs/heads/master" remote="cros"/>
"""
REMOTE_EXTERNAL_XML = """
<remote name="cros" fetch="ext-fetch" revision="refs/heads/master"/>
"""
REMOTE_INTERNAL_XML = """
<remote name="cros-internal" fetch="int-fetch" revision="refs/heads/master"/>
"""
PROJECTS_EXTERNAL_XML = """
<project name="chromiumos/manifest" path="manifest"/>
<project name="chromiumos/overlays/chromiumos-overlay"
path="src/third_party/chromiumos-overlay"/>
<project name="external/implicit-pinned"
path="src/third_party/implicit-pinned"
revision="refs/heads/implicit-pinned"/>
<project name="chromiumos/multicheckout"
path="src/third_party/multicheckout-a"
revision="refs/heads/multicheckout-a"/>
<project name="chromiumos/multicheckout"
path="src/third_party/multicheckout-b"
revision="refs/heads/multicheckout-b"/>
<project name="chromiumos/non-default-group"
path="src/third_party/non-default-group"
revision="refs/heads/master"
groups="notdefault,special-group"/>
"""
PROJECTS_INTERNAL_XML = """
<project name="chromeos/manifest-internal"
path="manifest-internal"
remote="cros-internal"/>
<project name="chromeos/explicit-pinned"
path="src/explicit-pinned"
revision="refs/heads/explicit-pinned"
remote="cros-internal">
<annotation name="branch-mode" value="pin"/>
</project>
<project name="chromeos/explicit-branch"
path="src/explicit-branch"
remote="cros-internal">
<annotation name="branch-mode" value="create"/>
</project>
<project name="chromeos/explicit-tot"
path="src/explicit-tot"
remote="cros-internal">
<annotation name="branch-mode" value="tot"/>
</project>
"""
INCLUDE_REMOTES_XML = """
<include name="_remotes.xml"/>
"""
INCLUDE_EXTERNAL_XML = """
<include name="external.xml"/>
"""
INCLUDE_INTERNAL_XML = """
<include name="internal.xml"/>
"""
# Combine the XML chunks above into meaningful files. Create files for
# both manifest and manifest-internal projects, once for TOT and once
# for a branch named new-branch.
MANIFEST_FILES = {
REMOTES_FILE_NAME:
ManifestXml(REMOTE_EXTERNAL_XML),
EXTERNAL_FILE_NAME:
ManifestXml(DEFAULT_XML, INCLUDE_REMOTES_XML, PROJECTS_EXTERNAL_XML),
constants.DEFAULT_MANIFEST:
ManifestXml(INCLUDE_EXTERNAL_XML),
}
MANIFEST_INTERNAL_FILES = {
REMOTES_FILE_NAME:
ManifestXml(REMOTE_EXTERNAL_XML, REMOTE_INTERNAL_XML),
EXTERNAL_FILE_NAME:
MANIFEST_FILES[EXTERNAL_FILE_NAME],
INTERNAL_FILE_NAME:
ManifestXml(DEFAULT_XML, INCLUDE_REMOTES_XML, PROJECTS_INTERNAL_XML),
constants.OFFICIAL_MANIFEST:
ManifestXml(INCLUDE_INTERNAL_XML, INCLUDE_EXTERNAL_XML),
constants.DEFAULT_MANIFEST:
ManifestXml(INCLUDE_INTERNAL_XML, INCLUDE_EXTERNAL_XML),
}
# Store the full, parsed manifest XML for TOT.
FULL_TOT_XML = ManifestXml(DEFAULT_XML, REMOTE_EXTERNAL_XML,
REMOTE_INTERNAL_XML, PROJECTS_EXTERNAL_XML,
PROJECTS_INTERNAL_XML)
# Now create a branched version of the above XML.
DEFAULT_BRANCHED_XML = """
<default remote="cros"/>
"""
PROJECTS_EXTERNAL_BRANCHED_XML = """
<project name="chromiumos/manifest"
path="manifest"
revision="refs/heads/old-branch"/>
<project name="chromiumos/overlays/chromiumos-overlay"
path="src/third_party/chromiumos-overlay"
revision="refs/heads/old-branch"/>
<project name="external/implicit-pinned"
path="src/third_party/implicit-pinned"
revision="refs/heads/implicit-pinned"/>
<project name="chromiumos/multicheckout"
path="src/third_party/multicheckout-a"
revision="refs/heads/old-branch-multicheckout-a"/>
<project name="chromiumos/multicheckout"
path="src/third_party/multicheckout-b"
revision="refs/heads/old-branch-multicheckout-b"/>
<project name="chromiumos/non-default-group"
path="src/third_party/non-default-group"
revision="refs/heads/old-branch"
groups="notdefault,special-group"/>
"""
PROJECTS_INTERNAL_BRANCHED_XML = """
<project name="chromeos/manifest-internal"
path="manifest-internal"
remote="cros-internal"
revision="refs/heads/old-branch"/>
<project name="chromeos/explicit-pinned"
path="src/explicit-pinned"
revision="refs/heads/explicit-pinned"
remote="cros-internal">
<annotation name="branch-mode" value="pin"/>
</project>
<project name="chromeos/explicit-branch"
path="src/explicit-branch"
remote="cros-internal"
revision="refs/heads/old-branch">
<annotation name="branch-mode" value="create"/>
</project>
<project name="chromeos/explicit-tot"
path="src/explicit-tot"
remote="cros-internal"
revision="refs/heads/master">
<annotation name="branch-mode" value="tot"/>
</project>
"""
MANIFEST_BRANCHED_FILES = {
REMOTES_FILE_NAME:
ManifestXml(REMOTE_EXTERNAL_XML),
EXTERNAL_FILE_NAME:
ManifestXml(DEFAULT_BRANCHED_XML, INCLUDE_REMOTES_XML,
PROJECTS_EXTERNAL_BRANCHED_XML),
constants.DEFAULT_MANIFEST:
ManifestXml(INCLUDE_EXTERNAL_XML),
}
MANIFEST_INTERNAL_BRANCHED_FILES = {
REMOTES_FILE_NAME:
ManifestXml(REMOTE_EXTERNAL_XML, REMOTE_INTERNAL_XML),
EXTERNAL_FILE_NAME:
MANIFEST_BRANCHED_FILES[EXTERNAL_FILE_NAME],
INTERNAL_FILE_NAME:
ManifestXml(DEFAULT_BRANCHED_XML, INCLUDE_REMOTES_XML,
PROJECTS_INTERNAL_BRANCHED_XML),
constants.OFFICIAL_MANIFEST:
ManifestXml(INCLUDE_INTERNAL_XML, INCLUDE_EXTERNAL_XML),
constants.DEFAULT_MANIFEST:
ManifestXml(INCLUDE_INTERNAL_XML, INCLUDE_EXTERNAL_XML),
}
FULL_BRANCHED_XML = ManifestXml(
DEFAULT_BRANCHED_XML, REMOTE_INTERNAL_XML, REMOTE_EXTERNAL_XML,
PROJECTS_INTERNAL_BRANCHED_XML, PROJECTS_EXTERNAL_BRANCHED_XML)
class ManifestTestCase(cros_test_lib.TestCase):
"""Test case providing valid manifest test data.
This class generates a diverse collection of manifest XML strings, and
provides convenience methods for reading from those manifests.
"""
def NameFor(self, pid, manifest=None):
"""Return the test project's name.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay').
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
Name of the project, e.g. 'chromeos/manifest-internal'.
"""
return self.ProjectFor(pid, manifest).name
def PathFor(self, pid, manifest=None):
"""Return the test project's path.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay').
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
Path to the project, always of the form '<test path>/<project ID>'.
"""
return self.ProjectFor(pid, manifest).Path()
def PathListRegexFor(self, pid, manifest=None):
"""Return the test project's path as a ListRegex.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay').
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
partial_mock.ListRegex for project path.
"""
return partial_mock.ListRegex('.*/%s' % self.PathFor(pid, manifest))
def RevisionFor(self, pid, manifest=None):
"""Return the test project's revision.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay')
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
Reivision for the project, always of form 'refs/heads/<project ID>'.
"""
return self.ProjectFor(pid, manifest).Revision()
def RemoteFor(self, pid, manifest=None):
"""Return the test project's remote name.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay')
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
Remote name for the project, e.g. 'cros'.
"""
return self.ProjectFor(pid, manifest).Remote().GitName()
def ProjectFor(self, pid, manifest=None):
"""Return the test project's repo_manifest.Project.
Args:
pid: The test project ID (e.g. 'chromiumos-overlay')
manifest: The repo_manifest.Manifest to read from.
Uses full_manifest if None.
Returns:
Corresponding repo_manifest.Project.
"""
manifest = manifest or self.full_manifest
# Project paths always end with the project ID, so use that as key.
match = [p for p in manifest.Projects() if p.Path().endswith(pid)]
assert len(match) == 1
return match[0]
def PidFor(self, project):
"""Return the project's ID.
Args:
project: The repo_manifest.Project object.
Returns:
The project ID, always stored as the last component of its path.
"""
return os.path.basename(project.Path())
def setUp(self):
# Parse and cache the full TOT manifest to take advantage of the
# utility functions in repo_manifest.
self.full_manifest = repo_manifest.Manifest.FromString(FULL_TOT_XML)
# Ditto for the branched manifest.
self.full_branched_manifest = repo_manifest.Manifest.FromString(
FULL_BRANCHED_XML)
class UtilitiesTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests for all top-level utility functions."""
def testCanBranchProjectAcceptsBranchableProjects(self):
"""Test CanBranchProject returns true when project is branchable."""
for project in BRANCHED_PROJECTS:
self.assertTrue(CanBranchProject(self.ProjectFor(project)))
def testCanBranchProjectRejectsNonBranchableProjects(self):
"""Test CanBranchProject returns false when project is not branchable."""
for project in NON_BRANCHED_PROJECTS:
self.assertFalse(CanBranchProject(self.ProjectFor(project)))
def testCanPinProjectAcceptsPinnedProjects(self):
"""Test CanPinProject returns true when project is pinned."""
for project in PINNED_PROJECTS:
self.assertTrue(CanPinProject(self.ProjectFor(project)))
def testCanPinProjectRejectsNonPinnedProjects(self):
"""Test CanPinProject returns false when project is not pinned."""
for project in BRANCHED_PROJECTS + TOT_PROJECTS:
self.assertFalse(CanPinProject(self.ProjectFor(project)))
def testTotMutualExclusivity(self):
"""Test CanBranch/PinProject both return false only when project is TOT."""
for pid in PROJECTS.values():
project = self.ProjectFor(pid)
if not CanBranchProject(project) and not CanPinProject(project):
self.assertIn(pid, TOT_PROJECTS)
class ManifestRepositoryTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests for ManifestRepository functions."""
def GitRevisionMock(self, project):
"""Mock git.GetGitRepoRevision returning fake revision for given project.
Args:
project: Project to get the revision for.
Returns:
The repo HEAD as a string.
"""
return project.Revision()
def FromFileMock(self, path, allow_unsupported_features=False):
"""Forward repo_manifest.FromFile to repo_manifest.FromString.
Args:
path: File path for internal manifest. Used to look up XML in a table.
allow_unsupported_features: See repo_manifest.Manifest.
Returns:
repo_manifest.Manifest created from test data.
"""
return repo_manifest.Manifest.FromString(
MANIFEST_INTERNAL_FILES[os.path.basename(path)],
allow_unsupported_features=allow_unsupported_features)
def PathExistsMock(self, path):
"""Returns true if the fake manifest file exists.
Args:
path: Path to the manifest.
Returns:
True if we have a fake manifest under the given name.
"""
return os.path.basename(path) in MANIFEST_INTERNAL_FILES
def setUp(self):
self.PatchObject(CrosCheckout, 'GitRevision', self.GitRevisionMock)
self.PatchObject(CrosCheckout, 'EnsureProject')
self.PatchObject(repo_manifest.Manifest, 'FromFile', self.FromFileMock)
self.PatchObject(os.path, 'exists', self.PathExistsMock)
self.root = '/root'
self.checkout = CrosCheckout(self.root, manifest=self.full_manifest)
self.project = self.ProjectFor(PROJECTS.MANIFEST_INTERNAL)
self.manifest_repo = ManifestRepository(self.checkout, self.project)
def testRepairManifestDeletesDefaultRevision(self):
"""Test RepairManifest deletes revision attr on <default> and <remote>."""
branches = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'beep',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'boop',
}
actual = self.manifest_repo.RepairManifest(INTERNAL_FILE_NAME, branches)
self.assertIsNone(actual.Default().revision)
def testRepairManifestDeletesRemoteRevision(self):
"""Test RepairManifest deletes revision attr on <default> and <remote>."""
branches = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'beep',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'boop',
}
actual = self.manifest_repo.RepairManifest(REMOTES_FILE_NAME, branches)
self.assertIsNone(actual.GetRemote(REMOTES.CROS_INTERNAL).revision)
def testRepairManifestUpdatesBranchedProjectRevisions(self):
"""Test RepairManifest updates revision=branch on branched projects."""
branches = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'branch-a',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'branch-b'
}
actual = self.manifest_repo.RepairManifest(INTERNAL_FILE_NAME, branches)
manifest_internal = actual.GetUniqueProject(
self.NameFor(PROJECTS.MANIFEST_INTERNAL))
self.assertEqual(manifest_internal.revision, 'refs/heads/branch-a')
explicit_branch = actual.GetUniqueProject(
self.NameFor(PROJECTS.EXPLICIT_BRANCH))
self.assertEqual(explicit_branch.revision, 'refs/heads/branch-b')
def testRepairManifestUpdatesPinnedProjectRevisions(self):
"""Test RepairManifest retains revision attr on pinned projects."""
branches = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'irrelevant',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'should-not-matter'
}
actual = self.manifest_repo.RepairManifest(INTERNAL_FILE_NAME, branches)
proj = actual.GetUniqueProject(self.NameFor(PROJECTS.EXPLICIT_PINNED))
self.assertEqual(proj.revision, self.RevisionFor(PROJECTS.EXPLICIT_PINNED))
def testRepairManifestUpdatesTotProjectRevisions(self):
"""Test RepairManifest sets revision=refs/heads/master on TOT projects."""
branches = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'irrelevant',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'should-not-matter'
}
actual = self.manifest_repo.RepairManifest(INTERNAL_FILE_NAME, branches)
proj = actual.GetUniqueProject(self.NameFor(PROJECTS.EXPLICIT_TOT))
self.assertEqual(proj.revision, 'refs/heads/master')
def testRepairManifestsOnDisk(self):
"""Test RepairManifestsOnDisk writes all manifests."""
repair = self.PatchObject(
ManifestRepository, 'RepairManifest', return_value=self.full_manifest)
write = self.PatchObject(repo_manifest.Manifest, 'Write')
branches = [
ProjectBranch(self.ProjectFor(PROJECTS.MANIFEST_INTERNAL), 'branch-a'),
ProjectBranch(self.ProjectFor(PROJECTS.EXPLICIT_BRANCH), 'branch-b'),
]
branches_by_path = {
self.PathFor(PROJECTS.MANIFEST_INTERNAL): 'branch-a',
self.PathFor(PROJECTS.EXPLICIT_BRANCH): 'branch-b',
}
self.manifest_repo.RepairManifestsOnDisk(branches)
self.assertCountEqual(repair.call_args_list, [
mock.call('/root/manifest-internal/default.xml', branches_by_path),
mock.call('/root/manifest-internal/official.xml', branches_by_path),
mock.call('/root/manifest-internal/internal.xml', branches_by_path),
mock.call('/root/manifest-internal/external.xml', branches_by_path),
mock.call('/root/manifest-internal/_remotes.xml', branches_by_path),
])
self.assertCountEqual(write.call_args_list, [
mock.call('/root/manifest-internal/default.xml'),
mock.call('/root/manifest-internal/official.xml'),
mock.call('/root/manifest-internal/internal.xml'),
mock.call('/root/manifest-internal/external.xml'),
mock.call('/root/manifest-internal/_remotes.xml'),
])
class CrosCheckoutTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests for nontrivial methods in CrosCheckout."""
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
self.PatchObject(repo_util.Repository, '__init__', return_value=None)
self.PatchObject(
repo_util.Repository, 'Manifest', return_value=self.full_manifest)
self.PatchObject(
config_lib,
'GetSiteParams',
return_value=config_lib.AttrDict(
EXTERNAL_MANIFEST_VERSIONS_PATH='manifest-versions',
INTERNAL_MANIFEST_VERSIONS_PATH='manifest-versions-internal',
))
self.make_dirs = self.PatchObject(osutils, 'SafeMakedirs')
self.initialize = self.PatchObject(repo_util.Repository, 'Initialize')
self.match_branch_name = self.PatchObject(
git, 'MatchBranchName', return_value=['branch'])
self.get_current_branch = self.PatchObject(
git, 'GetCurrentBranch', return_value='local-branch')
self.get_git_repo_revision = self.PatchObject(
git, 'GetGitRepoRevision', return_value='abcdef')
self.commit_exists = self.PatchObject(
git, 'DoesCommitExistInRepo', return_value=False)
self.from_repo = self.PatchObject(
VersionInfo, 'from_repo', return_value=VersionInfo('1.2.3'))
self.increment_version = self.PatchObject(VersionInfo, 'IncrementVersion')
self.update_version = self.PatchObject(VersionInfo, 'UpdateVersionFile')
self.PatchObject(constants, 'CHROMITE_DIR', new='/run-root/chromite')
def testInitialize(self):
"""Test Initialize calls the correct functions with the correct data."""
self.PatchObject(git, 'FindRepoCheckoutRoot', return_value=None)
checkout = CrosCheckout.Initialize(
'/root',
'manifest.com',
repo_url='repo',
repo_branch='default',
groups='all')
self.assertEqual(checkout.root, '/root')
self.assertEqual(checkout.manifest_url, 'manifest.com')
self.assertEqual(checkout.repo_url, 'repo')
self.assertEqual(checkout.groups, 'all')
self.assertEqual(self.make_dirs.call_count, 1)
self.assertEqual(self.initialize.call_args_list, [
mock.call(
'/root',
'manifest.com',
repo_url='repo',
repo_branch='default',
groups='all')
])
def testInitializeNoRepoInit(self):
"""Test Initialize does not call repo init when already initialized."""
self.PatchObject(git, 'FindRepoCheckoutRoot', return_value='/root')
checkout = CrosCheckout.Initialize(
'/root', 'manifest.com', repo_url='repo', repo_branch='default')
self.assertEqual(checkout.root, '/root')
self.assertEqual(checkout.manifest_url, 'manifest.com')
self.assertEqual(checkout.repo_url, 'repo')
self.assertFalse(self.initialize.call_count)
def testSyncVersionMinimal(self):
"""Test SyncVersion passes minimal args to repo_sync_manifest."""
checkout = CrosCheckout('/root')
checkout.SyncVersion('1.2.3')
self.rc_mock.assertCommandContains([
'/run-root/chromite/scripts/repo_sync_manifest', '--repo-root', '/root',
'--manifest-versions-int', '/root/manifest-versions-internal',
'--manifest-versions-ext', '/root/manifest-versions', '--version',
'1.2.3'
])
def testSyncVersionAllOptions(self):
"""Test SyncVersion passes all args to repo_sync_manifest."""
checkout = CrosCheckout(
'/root', repo_url='repo.com', manifest_url='manifest.com')
checkout.SyncVersion('1.2.3')
self.rc_mock.assertCommandContains([
'/run-root/chromite/scripts/repo_sync_manifest', '--repo-root', '/root',
'--manifest-versions-int', '/root/manifest-versions-internal',
'--manifest-versions-ext', '/root/manifest-versions', '--version',
'1.2.3', '--repo-url', 'repo.com', '--manifest-url', 'manifest.com'
])
def testSyncBranchMinimal(self):
"""Test SyncBranch passes minimal args to repo_sync_manifest."""
checkout = CrosCheckout('/root')
checkout.SyncBranch('branch')
self.rc_mock.assertCommandContains([
'/run-root/chromite/scripts/repo_sync_manifest', '--repo-root', '/root',
'--branch', 'branch'
])
def testSyncBranchAllOptions(self):
"""Test SyncBranch passes all args to repo_sync_manifest."""
checkout = CrosCheckout(
'/root', repo_url='repo.com', manifest_url='manifest.com')
checkout.SyncBranch('branch')
self.rc_mock.assertCommandContains([
'/run-root/chromite/scripts/repo_sync_manifest', '--repo-root', '/root',
'--branch', 'branch', '--repo-url', 'repo.com', '--manifest-url',
'manifest.com'
])
def testSyncFileMinimal(self):
"""Test SyncFile passes correct args to repo_sync_manifest."""
checkout = CrosCheckout('/root')
checkout.SyncFile('manifest.xml')
manifest_path = os.path.abspath('manifest.xml')
self.rc_mock.assertCommandContains(
['repo', 'sync', '--manifest-name', manifest_path], cwd='/root')
def testSyncFileAllOptions(self):
"""Test SyncFile passes all args to repo_sync_manifest."""
checkout = CrosCheckout(
'/root', repo_url='repo.com', manifest_url='manifest.com')
checkout.SyncFile('manifest.xml')
manifest_path = os.path.abspath('manifest.xml')
self.rc_mock.assertCommandContains(
['repo', 'sync', '--manifest-name', manifest_path], cwd='/root')
def testAbsolutePath(self):
"""Test AbsolutePath joins root to given path."""
checkout = CrosCheckout('/foo')
self.assertEqual(checkout.AbsolutePath('bar'), '/foo/bar')
def testAbsoluteProjectPath(self):
"""Test AbsoluteProjectPath joins root and project path."""
checkout = CrosCheckout('/foo')
project = self.ProjectFor(PROJECTS.MANIFEST)
actual = checkout.AbsoluteProjectPath(project, 'bar')
self.assertEqual(actual, '/foo/manifest/bar')
def testEnsureProjectBadProject(self):
"""Test EnsurePath raises error if project does not exist."""
self.PatchObject(os.path, 'exists', return_value=False)
checkout = CrosCheckout('/foo')
project = self.ProjectFor(PROJECTS.MANIFEST)
with self.assertRaises(BranchError):
checkout.EnsureProject(project)
def testEnsureProjectGoodProject(self):
"""Test EnsurePath raises error if project does not exist."""
self.PatchObject(os.path, 'exists', return_value=True)
checkout = CrosCheckout('/foo')
project = self.ProjectFor(PROJECTS.MANIFEST)
checkout.EnsureProject(project)
def testReadVersion(self):
"""Test ReadVersion does not modify VersionInfo."""
checkout = CrosCheckout('/root')
vinfo = checkout.ReadVersion()
self.assertEqual(vinfo.build_number, '1')
self.assertEqual(vinfo.branch_build_number, '2')
self.assertEqual(vinfo.patch_number, '3')
def testBumpVersionMinimal(self):
"""Test BumpVersion with minimal arguments."""
checkout = CrosCheckout('/root')
checkout.BumpVersion('patch', 'my-branch', 'My message.')
self.assertEqual(self.from_repo.call_args_list,
[mock.call('/root', incr_type='patch')])
self.assertEqual(self.increment_version.call_count, 1)
self.assertEqual(self.update_version.call_args_list, [
mock.call(
'My message.',
dry_run=True,
push_to=git.RemoteRef('cros', 'refs/heads/my-branch'))
])
def testBumpVersionAllOptions(self):
"""Test BumpVersion properly defers to manifest_version functions."""
checkout = CrosCheckout('/root')
checkout.BumpVersion(
'patch', 'my-branch', 'My message.', fetch=True, dry_run=False)
self.rc_mock.assertCommandContains(
['git', 'fetch', 'cros', 'refs/heads/my-branch'],
cwd='/root/src/third_party/chromiumos-overlay')
self.rc_mock.assertCommandContains(
['git', 'checkout', '-B', 'my-branch', 'FETCH_HEAD'],
cwd='/root/src/third_party/chromiumos-overlay')
self.assertEqual(self.from_repo.call_args_list,
[mock.call('/root', incr_type='patch')])
self.assertEqual(self.increment_version.call_count, 1)
self.assertEqual(self.update_version.call_args_list, [
mock.call(
'My message.',
dry_run=False,
push_to=git.RemoteRef('cros', 'refs/heads/my-branch'))
])
def testRunGit(self):
"""Test RunGit runs git command in project directory."""
checkout = CrosCheckout('/root')
project = self.ProjectFor(PROJECTS.MANIFEST)
checkout.RunGit(project, ['branch', '-m', 'foo'])
self.rc_mock.assertCommandContains(['git', 'branch', '-m', 'foo'],
cwd='/root/manifest',
print_cmd=True)
def testGitRevision(self):
"""Test GitRevision properly forwards project path."""
checkout = CrosCheckout('/root')
project = self.ProjectFor(PROJECTS.MANIFEST)
actual = checkout.GitRevision(project)
self.assertEqual(self.get_git_repo_revision.call_args_list,
[mock.call('/root/manifest')])
self.assertEqual(actual, 'abcdef')
def testGitBranch(self):
"""Test GitBranch properly forwards project path."""
checkout = CrosCheckout('/root')
project = self.ProjectFor(PROJECTS.MANIFEST)
actual = checkout.GitBranch(project)
self.assertEqual(self.get_current_branch.call_args_list,
[mock.call('/root/manifest')])
self.assertEqual(actual, 'local-branch')
def testBranchExists(self):
checkout = CrosCheckout('/root')
project = self.ProjectFor(PROJECTS.MANIFEST_INTERNAL)
actual = checkout.BranchExists(project, 'my-branch')
self.assertTrue(actual)
self.assertEqual(self.match_branch_name.call_args_list,
[mock.call('/root/manifest-internal', 'my-branch')])
class BranchTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests core functionality of Branch class."""
def SetVersion(self, version):
"""Mock VersionInfo.from_repo to always return the given version.
Args:
version: The version string to return.
"""
self.PatchObject(
CrosCheckout, 'ReadVersion', return_value=VersionInfo(version))
def AssertBranchPushed(self, project, branch):
"""Assert given branch pushed to remote for given project.
Args:
project: Project ID.
branch: Expected name for the branch.
"""
self.rc_mock.assertCommandContains(
['git', 'push',
self.RemoteFor(project),
'HEAD:refs/heads/%s' % branch],
cwd=self.PathListRegexFor(project))
def AssertRemoteBranchDeleted(self, project, branch):
"""Assert given branch deleted on remote for given project.
Args:
project: Project ID.
branch: Expected name for the branch.
"""
self.rc_mock.assertCommandContains([
'git', 'push',
self.RemoteFor(project), '--delete',
'refs/heads/%s' % branch
],
cwd=self.PathListRegexFor(project))
def AssertNoPush(self, project):
"""Assert no push operation run inside the given project.
Args:
project: Project ID.
"""
self.rc_mock.assertCommandContains(['git', 'push'],
cwd=self.PathListRegexFor(project),
expected=False)
def AssertManifestRepairsCommitted(self):
"""Assert commits made to all manifest repositories."""
for manifest_project in MANIFEST_PROJECTS:
self.rc_mock.assertCommandContains(['git', 'commit', '-a'],
cwd=partial_mock.ListRegex(
'.*/%s' % manifest_project))
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
# ManifestRepository and CrosCheckout tested separately, so mock them.
self.PatchObject(ManifestRepository, 'RepairManifestsOnDisk')
self.PatchObject(
CrosCheckout, 'ReadVersion', return_value=VersionInfo('1.2.0'))
self.bump_version = self.PatchObject(CrosCheckout, 'BumpVersion')
# Fake checkouts for each test.
self.checkout = CrosCheckout('/', manifest=self.full_manifest)
self.branched_checkout = CrosCheckout(
'/', manifest=self.full_branched_manifest)
def testCreateRepairsManifests(self):
"""Test Create commits repairs to manifest repositories."""
Branch(self.checkout, 'new-branch').Create()
self.AssertManifestRepairsCommitted()
def testCreateBumpsBranchNumber(self):
"""Test WhichVersionShouldBump bumps branch number on X.0.0 version."""
self.SetVersion('1.0.0')
Branch(self.checkout, 'new-branch').Create()
self.assertEqual(self.bump_version.call_args_list, [
mock.call('branch', 'new-branch', mock.ANY, dry_run=True),
mock.call('build', 'master', mock.ANY, dry_run=True)
])
def testCreateBumpsPatchNumber(self):
"""Test WhichVersionShouldBump bumps patch number on X.X.0 version."""
self.SetVersion('1.2.0')
Branch(self.checkout, 'new-branch').Create()
self.assertEqual(self.bump_version.call_args_list, [
mock.call('patch', 'new-branch', mock.ANY, dry_run=True),
mock.call('branch', 'master', mock.ANY, dry_run=True)
])
def testCreateDiesOnNonzeroPatchNumber(self):
"""Test WhichVersionShouldBump dies on X.X.X version."""
self.SetVersion('1.2.3')
with self.assertRaises(AssertionError):
Branch(self.checkout, 'new-branch').Create()
def testCreatePushesToRemote(self):
"""Test Create pushes new branch to remote."""
Branch(self.checkout, 'new-branch').Create(push=True)
for project in SINGLE_CHECKOUT_PROJECTS:
self.AssertBranchPushed(project, 'new-branch')
for project in MULTI_CHECKOUT_PROJECTS:
self.AssertBranchPushed(project, 'new-branch-' + project)
for project in NON_BRANCHED_PROJECTS:
self.AssertNoPush(project)
def testRenameRepairsManifests(self):
"""Test Rename commits repairs to manifest repositories."""
Branch(self.branched_checkout, 'new-branch').Rename('old-branch')
self.AssertManifestRepairsCommitted()
def testRenamePushesNewBranch(self):
"""Test Rename pushes the new branch to remote."""
Branch(self.branched_checkout, 'new-branch').Rename('old-branch', push=True)
for project in SINGLE_CHECKOUT_PROJECTS:
self.AssertBranchPushed(project, 'new-branch')
for project in MULTI_CHECKOUT_PROJECTS:
self.AssertBranchPushed(project, 'new-branch-' + project)
for project in NON_BRANCHED_PROJECTS:
self.AssertNoPush(project)
def testRenamePushesDeletionOfOldBranch(self):
"""Test rename deletes old branch on remote."""
Branch(self.branched_checkout, 'new-branch').Rename('old-branch', push=True)
for project in SINGLE_CHECKOUT_PROJECTS:
self.AssertRemoteBranchDeleted(project, 'old-branch')
for project in MULTI_CHECKOUT_PROJECTS:
self.AssertRemoteBranchDeleted(project, 'old-branch-' + project)
for project in NON_BRANCHED_PROJECTS:
self.AssertNoPush(project)
def testDeleteRequiresForceForRemotePush(self):
"""Verify Delete does nothing when push is True but force is False."""
with self.assertRaises(BranchError):
Branch(self.branched_checkout, 'old-branch').Delete(push=True)
for project in PROJECTS.values():
self.AssertNoPush(project)
def testDeletePushesDeletions(self):
"""Verify delete deletes remote branches when push=force=True."""
Branch(self.branched_checkout, 'old-branch').Delete(push=True, force=True)
for project in SINGLE_CHECKOUT_PROJECTS:
self.AssertRemoteBranchDeleted(project, 'old-branch')
for project in MULTI_CHECKOUT_PROJECTS:
self.AssertRemoteBranchDeleted(project, 'old-branch-' + project)
for project in NON_BRANCHED_PROJECTS:
self.AssertNoPush(project)
class StandardBranchTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests branch logic specific to the standard branches."""
def SetVersion(self, milestone, version):
"""Mock VersionInfo to always return the given versions.
Args:
milestone: The Chrome branch number, e.g. '47'
version: The manifest version string, e.g. '1.2.0'
"""
self.PatchObject(
CrosCheckout,
'ReadVersion',
return_value=VersionInfo(version, milestone))
def setUp(self):
self.checkout = CrosCheckout('/', manifest=self.full_manifest)
def testGenerateNameWithoutBranchVersion(self):
"""Test name generation on a X.0.0 version."""
self.SetVersion('12', '3.0.0')
branch_names = {
'release-R12-3.B': ReleaseBranch,
'factory-3.B': FactoryBranch,
'firmware-3.B': FirmwareBranch,
'stabilize-3.B': StabilizeBranch,
}
for branch_name, branch_type in branch_names.items():
self.assertEqual(branch_type(self.checkout).name, branch_name)
def testGenerateNameWithBranchVersion(self):
"""Test name generation on a X.X.0 version."""
self.SetVersion('12', '3.4.0')
branch_names = {
'release-R12-3.4.B': ReleaseBranch,
'factory-3.4.B': FactoryBranch,
'firmware-3.4.B': FirmwareBranch,
'stabilize-3.4.B': StabilizeBranch,
}
for branch_name, cls in branch_names.items():
self.assertEqual(cls(self.checkout).name, branch_name)
def testGenerateNameWithDescriptor(self):
"""Test name generation with a descriptor."""
self.SetVersion('12', '3.4.0')
branch_names = {
'release-board-R12-3.4.B': ReleaseBranch,
'factory-board-3.4.B': FactoryBranch,
'firmware-board-3.4.B': FirmwareBranch,
'stabilize-board-3.4.B': StabilizeBranch,
}
for branch_name, cls in branch_names.items():
self.assertEqual(cls(self.checkout, 'board').name, branch_name)
class MockBranchCommand(command_unittest.MockCommand):
"""Mock out the `cros branch` command."""
TARGET = 'chromite.cli.cros.cros_branch.BranchCommand'
TARGET_CLASS = BranchCommand
COMMAND = 'branch'
class BranchCommandTest(ManifestTestCase, cros_test_lib.MockTestCase):
"""Tests for BranchCommand functions."""
def RunCommandMock(self, args):
"""Patch the mock command and run it.
Args:
args: List of arguments for the command.
"""
self.cmd = MockBranchCommand(args)
self.StartPatcher(self.cmd)
self.cmd.inst.Run()
def AssertSynced(self, args):
"""Assert repo_sync_manifest was run with at least the given args.
Args:
args: Expected args for repo_sync_manifest.
"""
self.cmd.rc_mock.assertCommandContains(
[partial_mock.ListRegex('.*/repo_sync_manifest')] + args)
def AssertSyncedUsingRepo(self, args):
"""Assert repo sync was run with at least the given args.
Args:
args: Expected args for repo sync.
"""
self.cmd.rc_mock.assertCommandContains(['repo', 'sync'] + args)
def AssertNoDangerousOptions(self):
"""Assert that force and push were not set."""
self.assertFalse(self.cmd.inst.options.force)
self.assertFalse(self.cmd.inst.options.push)
def setUp(self):
self.cmd = None
self.create = self.PatchObject(Branch, 'Create')
self.PatchObject(ReleaseBranch, 'Create')
self.PatchObject(Branch, 'Rename')
self.PatchObject(Branch, 'Delete')
self.PatchObject(repo_util, 'Repository')
self.PatchObject(
repo_util.Repository, 'Manifest', return_value=self.full_manifest)
self.PatchObject(
CrosCheckout,
'Initialize',
return_value=CrosCheckout('', manifest=self.full_manifest))
self.PatchObject(
CrosCheckout, 'ReadVersion', return_value=VersionInfo('1.2.0'))
self.PatchObject(CrosCheckout, 'BranchExists', return_value=False)
self.get_input = self.PatchObject(
cros_build_lib, 'GetInput', return_value='yes')
td_context = self.PatchObject(CrosCheckout, 'TempRoot')
td_context.return_value.__enter__.return_value = '/td'
def testCreateDiesWhenNonzeroPatchNumber(self):
"""Test create validates zero patch number."""
with self.assertRaises(BranchError):
self.RunCommandMock(['create', '--version', '1.2.3', '--release'])
def testCreateDiesWhenVersionAlreadyBranched(self):
"""Test create validates version has no existing branches."""
branch_exists = self.PatchObject(
CrosCheckout, 'BranchExists', return_value=True)
with self.assertRaises(BranchError):
self.RunCommandMock(['create', '--version', '1.2.0', '--release'])
self.assertEqual(branch_exists.call_args_list,
[mock.call(mock.ANY, '.*-1\\.2\\.B$')])
def testCreateWithForceDoesNotCheckVersion(self):
"""Test create validates version has no existing branches."""
branch_exists = self.PatchObject(
CrosCheckout, 'BranchExists', return_value=True)
self.RunCommandMock(
['--force', 'create', '--version', '1.2.0', '--release'])
self.assertEqual(branch_exists.call_args_list,
[mock.call(mock.ANY, '.*-1\\.2\\.B$')])
def testCreateConfirmsGeneratedBranchNameNoAnswer(self):
"""Test create confirms generated branch names with users."""
self.get_input = self.PatchObject(
cros_build_lib, 'GetInput', return_value='no')
self.RunCommandMock(['create', '--version', '1.2.0', '--factory'])
self.assertEqual(self.get_input.call_args_list, [
mock.call(
'\nNew branch will be named factory-1.2.B. Continue? (yes/No)? ')
])
self.assertFalse(self.create.call_count)
def testCreateDoesNotConfirmGeneratedBranchNameWithYesFlag(self):
"""Tests --yes flag (which skips the name confirmation prompt)."""
self.get_input = self.PatchObject(cros_build_lib, 'GetInput')
self.RunCommandMock(['create', '--version', '1.2.0', '--factory', '--yes'])
self.assertEqual(self.get_input.call_args_list, [])
self.assertEqual(self.create.call_count, 1)
def testCreateReleaseCommandParses(self):
"""Test `cros branch create` parses with '--release' flag."""
self.RunCommandMock(['create', '--version', '1.2.0', '--release'])
self.assertIs(self.cmd.inst.options.cls, ReleaseBranch)
self.AssertNoDangerousOptions()
def testCreateFactoryCommandParses(self):
"""Test `cros branch create` parses with '--factory' flag."""
self.RunCommandMock(['create', '--version', '1.2.0', '--factory'])
self.assertIs(self.cmd.inst.options.cls, FactoryBranch)
self.AssertNoDangerousOptions()
def testCreateFirmwareCommandParses(self):
"""Test `cros branch create` parses with '--firmware' flag."""
self.RunCommandMock(['create', '--version', '1.2.0', '--firmware'])
self.assertIs(self.cmd.inst.options.cls, FirmwareBranch)
self.AssertNoDangerousOptions()
def testCreateStabilizeCommandParses(self):
"""Test `cros branch create` parses with '--stabilize' flag."""
self.RunCommandMock(['create', '--version', '1.2.0', '--stabilize'])
self.assertIs(self.cmd.inst.options.cls, StabilizeBranch)
self.AssertNoDangerousOptions()
def testCreateCustomCommandParses(self):
"""Test `cros branch create` parses with '--custom' flag."""
self.RunCommandMock(['create', '--version', '1.2.0', '--custom', 'branch'])
self.assertEqual(self.cmd.inst.options.name, 'branch')
self.assertIsNone(self.cmd.inst.options.cls)
self.AssertNoDangerousOptions()
def testCreateCustomCannotBeUsedWithDescriptor(self):
"""Test `cros branch create` does not allow --descriptor with --custom."""
with self.assertRaises(BranchError):
self.RunCommandMock([
'create', '--version', '1.2.0', '--custom', 'branch', '--descriptor',
'blah'
])
def testCreateSyncsToFile(self):
"""Test `cros branch create` calls repo_sync_manifest to sync to file."""
self.RunCommandMock(['create', '--file', 'manifest.xml', '--stabilize'])
manifest_path = os.path.abspath('manifest.xml')
self.AssertSyncedUsingRepo(['--manifest-name', manifest_path])
def testCreateSyncsToVersion(self):
"""Test `cros branch create` calls repo_sync_manifest to sync to version."""
self.RunCommandMock(['create', '--version', '1.2.0', '--stabilize'])
self.AssertSynced(['--version', '1.2.0'])
def testRenameSyncsToBranch(self):
"""Test `cros branch rename` calls repo_sync_manifest to sync to branch."""
self.RunCommandMock(['rename', 'branch', 'new-branch'])
self.AssertSynced(['--branch', 'branch'])
def testDeleteSyncsToBranch(self):
"""Test `cros branch delete` calls repo_sync_manifest to sync to branch."""
self.RunCommandMock(['delete', 'branch'])
self.AssertSynced(['--branch', 'branch'])
|
<filename>src/app/beer_garden/api/http/base_handler.py<gh_stars>0
# -*- coding: utf-8 -*-
import asyncio
import datetime
import json
import re
import socket
from typing import Union
from brewtils.errors import (
AuthorizationRequired,
ConflictError,
ModelError,
ModelValidationError,
NotFoundError,
RequestForbidden,
RequestPublishException,
WaitExceededError,
)
from marshmallow.exceptions import ValidationError as MarshmallowValidationError
from mongoengine.errors import DoesNotExist, NotUniqueError
from mongoengine.errors import ValidationError as MongoValidationError
from pymongo.errors import DocumentTooLarge
from tornado.web import HTTPError, RequestHandler
import beer_garden.api.http
import beer_garden.config as config
import beer_garden.db.mongo.models
from beer_garden.api.http.exceptions import BaseHTTPError
from beer_garden.api.http.metrics import http_api_latency_total
from beer_garden.errors import (
EndpointRemovedException,
NotFoundException,
NotUniqueException,
RoutingException,
RoutingRequestException,
)
async def event_wait(evt, timeout):
"""Helper method to add a timeout to an asyncio wait"""
try:
await asyncio.wait_for(evt.wait(), timeout)
except asyncio.TimeoutError:
pass
return evt.is_set()
class BaseHandler(RequestHandler):
"""Base handler from which all handlers inherit"""
MONGO_ID_PATTERN = r".*/([0-9a-f]{24}).*"
charset_re = re.compile(r"charset=(.*)$")
error_map = {
MarshmallowValidationError: {"status_code": 400},
MongoValidationError: {"status_code": 400},
ModelError: {"status_code": 400},
RoutingRequestException: {"status_code": 400},
ModelValidationError: {"status_code": 400},
ValueError: {"status_code": 400},
AuthorizationRequired: {"status_code": 401},
RequestForbidden: {"status_code": 403},
DoesNotExist: {"status_code": 404, "message": "Resource does not exist"},
NotFoundError: {"status_code": 404},
NotFoundException: {"status_code": 404},
WaitExceededError: {"status_code": 408, "message": "Max wait time exceeded"},
ConflictError: {"status_code": 409},
NotUniqueException: {"status_code": 409},
NotUniqueError: {"status_code": 409, "message": "Resource already exists"},
EndpointRemovedException: {"status_code": 410, "message": "Endpoint removed"},
DocumentTooLarge: {"status_code": 413, "message": "Resource too large"},
RequestPublishException: {"status_code": 502},
RoutingException: {"status_code": 500},
socket.timeout: {"status_code": 504, "message": "Backend request timed out"},
}
def set_default_headers(self):
"""Headers set here will be applied to all responses"""
self.set_header("BG-Version", beer_garden.__version__)
if config.get("ui.cors_enabled"):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
self.set_header(
"Access-Control-Allow-Methods", "GET, POST, PATCH, DELETE, OPTIONS"
)
@property
def prometheus_endpoint(self):
"""Removes Mongo ID from endpoint."""
to_return = self.request.path.rstrip("/")
for mongo_id in re.findall(self.MONGO_ID_PATTERN, self.request.path):
to_return = to_return.replace(mongo_id, "<ID>")
return to_return
@property
def client(self):
return self.settings["client"]
def prepare(self):
"""Called before each verb handler"""
# Used for calculating request handling duration
self.request.created_time = datetime.datetime.utcnow()
content_type = self.request.headers.get("content-type", "")
if self.request.method.upper() in ["POST", "PATCH"] and content_type:
content_type = content_type.split(";")
self.request.mime_type = content_type[0]
if self.request.mime_type not in [
"application/json",
"application/x-www-form-urlencoded",
]:
raise ModelValidationError("Unsupported or missing content-type header")
# Attempt to parse out the charset and decode the body, default to utf-8
charset = "utf-8"
if len(content_type) > 1:
search_result = self.charset_re.search(content_type[1])
if search_result:
charset = search_result.group(1)
self.request.charset = charset
self.request.decoded_body = self.request.body.decode(charset)
def on_finish(self):
"""Called after a handler completes processing"""
# Latency measurement for blocking request creation just muddies the waters
if not getattr(self.request, "ignore_latency", False):
timedelta = datetime.datetime.utcnow() - self.request.created_time
http_api_latency_total.labels(
method=self.request.method.upper(),
route=self.prometheus_endpoint,
status=self.get_status(),
).observe(timedelta.total_seconds())
def options(self, *args, **kwargs):
if config.get("ui.cors_enabled"):
self.set_status(204)
else:
raise HTTPError(403, reason="CORS is disabled")
def write_error(self, status_code, **kwargs):
"""Transform an exception into a response.
This protects controllers from having to write a lot of the same code over and
over and over. Controllers can, of course, overwrite error handlers and return
their own responses if necessary, but generally, this is where error handling
should occur.
When an exception is handled this function makes two passes through error_map.
The first pass is to see if the exception type can be matched exactly. If there
is no exact type match the second pass will attempt to match using isinstance.
If a message is provided in the error_map it takes precedence over the
exception message.
***NOTE*** Nontrivial inheritance trees will almost definitely break. This is a
BEST EFFORT using a simple isinstance check on an unordered data structure. So
if an exception class has both a parent and a grandparent in the error_map
there is no guarantee about which message / status code will be chosen. The
same applies to exceptions that use multiple inheritance.
***LOGGING***
An exception raised in a controller method will generate logging to the
tornado.application logger that includes a stacktrace. That logging occurs
before this method is invoked. The result of this method will generate logging
to the tornado.access logger as usual. So there is no need to do additional
logging here as the 'real' exception will already have been logged.
:param status_code: a status_code that will be used if no match is found in the
error map
:return: None
"""
code = 0
message = ""
if "exc_info" in kwargs:
typ3 = kwargs["exc_info"][0]
e = kwargs["exc_info"][1]
error_dict = None
if typ3 in self.error_map.keys():
error_dict = self.error_map[typ3]
else:
for error_type in self.error_map.keys():
if isinstance(e, error_type):
error_dict = self.error_map[error_type]
break
if error_dict:
# Thrift exceptions should have a message attribute
message = error_dict.get("message", getattr(e, "message", str(e)))
code = error_dict.get("status_code", 500)
elif issubclass(typ3, BaseHTTPError):
message = typ3.reason
code = typ3.status_code
elif config.get("ui.debug_mode"):
message = str(e)
code = code or status_code or 500
message = message or (
"Encountered unknown exception. Please check "
"with your System Administrator."
)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.set_status(code)
self.finish({"message": message})
@property
def request_body(self) -> Union[dict, None]:
"""A convenience helper that handles transforming the request.decoded_body into
a proper dict
Returns:
dict: if request has a decoded_body
Raises:
HTTPError: request has no decoded_body
"""
if hasattr(self.request, "decoded_body"):
return json.loads(self.request.decoded_body)
else:
raise HTTPError(
400,
reason="A body was expected with the request, but none was provided.",
)
|
<gh_stars>100-1000
import logging
import base64
import datetime
import requests
from Crypto.Cipher import DES3
from zeep import Transport, Client
from azbankgateways.banks import BaseBank
from azbankgateways.exceptions import SettingDoesNotExist, BankGatewayConnectionError
from azbankgateways.exceptions.exceptions import BankGatewayRejectPayment
from azbankgateways.models import CurrencyEnum, BankType, PaymentStatus
from azbankgateways.utils import get_json
class SEP(BaseBank):
_merchant_code = None
_terminal_code = None
def __init__(self, **kwargs):
super(SEP, self).__init__(**kwargs)
self.set_gateway_currency(CurrencyEnum.IRR)
self._token_api_url = 'https://sep.shaparak.ir/MobilePG/MobilePayment'
self._payment_url = 'https://sep.shaparak.ir/OnlinePG/OnlinePG'
self._verify_api_url = 'https://verify.sep.ir/Payments/ReferencePayment.asmx?WSDL'
def get_bank_type(self):
return BankType.SEP
def set_default_settings(self):
for item in ['MERCHANT_CODE', 'TERMINAL_CODE']:
if item not in self.default_setting_kwargs:
raise SettingDoesNotExist()
setattr(self, f'_{item.lower()}', self.default_setting_kwargs[item])
def get_pay_data(self):
data = {
'Action': 'Token',
'Amount': self.get_gateway_amount(),
'Wage': 0,
'TerminalId': self._merchant_code,
'ResNum': self.get_tracking_code(),
'RedirectURL': self._get_gateway_callback_url(),
'CellNumber': self.get_mobile_number(),
}
return data
def prepare_pay(self):
super(SEP, self).prepare_pay()
def pay(self):
super(SEP, self).pay()
data = self.get_pay_data()
response_json = self._send_data(self._token_api_url, data)
if str(response_json['status']) == '1':
token = response_json['token']
self._set_reference_number(token)
else:
logging.critical("SEP gateway reject payment")
raise BankGatewayRejectPayment(self.get_transaction_status_text())
"""
: gateway
"""
def _get_gateway_payment_url_parameter(self):
return self._payment_url
def _get_gateway_payment_method_parameter(self):
return 'POST'
def _get_gateway_payment_parameter(self):
params = {
'Token': self.get_reference_number(),
'GetMethod': 'true',
}
return params
"""
verify from gateway
"""
def prepare_verify_from_gateway(self):
super(SEP, self).prepare_verify_from_gateway()
request = self.get_request()
tracking_code = request.GET.get('ResNum', None)
token = request.GET.get('Token', None)
self._set_tracking_code(tracking_code)
self._set_bank_record()
ref_num = request.GET.get('RefNum', None)
if request.GET.get('State', 'NOK') == 'OK' and ref_num:
self._set_reference_number(ref_num)
self._bank.reference_number = ref_num
extra_information = f"TRACENO={request.GET.get('TRACENO', None)}, RefNum={ref_num}, Token={token}"
self._bank.extra_information = extra_information
self._bank.save()
def verify_from_gateway(self, request):
super(SEP, self).verify_from_gateway(request)
"""
verify
"""
def get_verify_data(self):
super(SEP, self).get_verify_data()
data = self.get_reference_number(), self._merchant_code
return data
def prepare_verify(self, tracking_code):
super(SEP, self).prepare_verify(tracking_code)
def verify(self, transaction_code):
super(SEP, self).verify(transaction_code)
data = self.get_verify_data()
client = self._get_client(self._verify_api_url)
result = client.service.verifyTransaction(
*data
)
if result == self.get_gateway_amount():
self._set_payment_status(PaymentStatus.COMPLETE)
else:
self._set_payment_status(PaymentStatus.CANCEL_BY_USER)
logging.debug("SEP gateway unapprove payment")
def _send_data(self, api, data):
try:
response = requests.post(api, json=data, timeout=5)
except requests.Timeout:
logging.exception("SEP time out gateway {}".format(data))
raise BankGatewayConnectionError()
except requests.ConnectionError:
logging.exception("SEP time out gateway {}".format(data))
raise BankGatewayConnectionError()
response_json = get_json(response)
self._set_transaction_status_text(response_json.get('errorDesc'))
return response_json
@staticmethod
def _get_client(url):
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0',
}
transport = Transport(timeout=5, operation_timeout=5)
transport.session.headers = headers
client = Client(url, transport=transport)
return client
|
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from collections import OrderedDict
from collections.abc import Iterable
import itertools
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
import seaborn as sns
import sys
sys.path.append('../')
# sns.set_theme(palette="pastel")
from src.d02_intermediate.classifier_data_api import ClassifierDataApi
from src.d04_modeling.knapsack_classifier import KnapsackClassifier
from src.d04_modeling.naive_classifier import NaiveClassifier
from src.d04_modeling.ctip_classifier import CtipClassifier
from src.d04_modeling.propositional_classifier import PropositionalClassifier, andClassifier, orClassifier
from src.d04_modeling.abstract_block_classifier import AbstractBlockClassifier
class SampleEvaluation:
"""
Generate main report results
"""
__classifier_data_api = ClassifierDataApi()
def __init__(self, frl_key, propositional_model: PropositionalClassifier, propositional_params):
"""
:param frl_key: string that identifies which FRL data should be loaded ('tk5' or 'tk12')
:param propositional_params: list of parameters for the propositional classifier
:param propositional_model: PropositionalClassifier
"""
self.__frl_key = frl_key
self.__model_dict = self.initialize_models_dict(frl_key, propositional_model, propositional_params)
@staticmethod
def initialize_models_dict(frl_key, propositional_model, propositional_params):
"""
Initialize models for report
:param frl_key: string that identifies which FRL data should be loaded ('tk5' or 'tk12')
:param propositional_params: list of parameters for the propositional classifier
:param propositional_model: PropositionalClassifier
:return:
"""
AbstractBlockClassifier().refresh()
positive_group = propositional_model.positive_group
model_dict = OrderedDict()
model_dict['CTIP1'] = {'model': CtipClassifier(positive_group=positive_group, frl_key=frl_key),
'params': None,
'fname': 'ctip'}
model_dict['Benchmark'] = {'model': NaiveClassifier(positive_group=positive_group, proportion=True,
frl_key=frl_key),
'params': None,
'fname': 'naivep'}
model_dict['DSSG ET'] = {'model': propositional_model,
'params': propositional_params,
'fname': 'pc'}
print("Propositional Statement:\n%s" % model_dict['DSSG ET']['model'].statement)
print("Focal group: %s" % positive_group)
return model_dict
def heat_map1(self, column, frl_key=None, pct_frl=True, title=None, legend=False):
"""
Generate heat map of SF for the corresponding column
:param column: column with value used for the heat map
:param frl_key: string that identifies which FRL data should be loaded ('tk5' or 'tk12')
:param pct_frl: load block data with percentage columns for FRL data
:param title: title of the plot
:param legend: show map legend
:return:
"""
if frl_key is None:
self.__classifier_data_api.get_block_data(pct_frl=pct_frl)
else:
self.__classifier_data_api.get_block_data(frl_key=frl_key, pct_frl=pct_frl)
self.__classifier_data_api.get_map_data()
map_df_data = self.__classifier_data_api.get_map_df_data(cols=[column])
if title is not None:
display(HTML("<h3>%s</h3>" % title))
self.__classifier_data_api.plot_map_column(map_df_data, column, cmap="YlOrRd",
save=True, legend=legend, title="",
show=True)
def classifier_evalutaion_roc(self, x=None):
"""
Plot ROC curve for all the models
:return:
"""
model_dict = self.__model_dict.copy()
results_dict = OrderedDict()
for model_name, model in model_dict.items():
params = model['params']
if params is None:
results_dict[model_name] = model['model'].get_roc()
else:
results_dict[model_name] = model['model'].get_roc(params)
plt.rcParams['font.size'] = '10'
fig, ax = plt.subplots(figsize=(4.8,4.8))
lw = 2
palette = itertools.cycle(sns.color_palette())
for model_name, results in results_dict.items():
markers = False
if len(results) < 20:
markers = True
sns.scatterplot(ax=ax, data=results, x='fpr', y='tpr', label=model_name, color=next(palette))
else:
sns.lineplot(ax=ax, data=results, x='fpr', y='tpr', label=model_name, linewidth=lw,
markers=markers, markersize=12, color=next(palette))
ax.set_ylabel('Proportion of focal students\n receiving priority (TPR)')
ax.set_xlabel('Proportion of non-Focal students\n receiving priority (FPR)')
if x is not None:
ax.axvline(x=x, ymin=0., ymax=1., color='k', linestyle='--')
ax.legend()
plt.tight_layout()
plt.savefig('outputs/roc_results_%s.png' % self.__frl_key)
plt.show()
def classifier_evalutaion_precision_recall(self):
"""
Plot precision/recall curve for all the models
:return:
"""
model_dict = self.__model_dict.copy()
results_dict = OrderedDict()
for model_name, model in model_dict.items():
params = model['params']
if params is None:
results_dict[model_name] = model['model'].get_precision_recall()
else:
results_dict[model_name] = model['model'].get_precision_recall(params)
plt.rcParams['font.size'] = '10'
fig, ax = plt.subplots(figsize=(4.8,4.8))
lw = 4
for model_name, results in results_dict.items():
marker = None
if len(results) < 20:
marker = '.'
ax.plot(results['recall'], results['precision'], label=model_name, linewidth=lw,
marker=marker, markersize=12)
ax.set_xlabel('Proportion of focal students\n receiving priority (Recall)')
ax.set_ylabel('Proportion of prioritized students\n who are focal (Precision)')
ax.legend()
plt.tight_layout()
plt.savefig('outputs/precision_recall_results_%s.png' % self.__frl_key)
plt.show()
def classification_map(self, fpr, params):
"""
Plot SF map with the solution/assignment for each model. Propositional classifier is not implemented.
:param fpr: Maximum FPR of the solution
:return:
"""
model_dict = self.__model_dict.copy()
for model_name, model in model_dict.items():
display(HTML("<h1>%s</h1>" % model_name))
use_fpr = model['params'] is None
if use_fpr is None:
model['model'].plot_map(params=fpr, save=True, col=model['fname'])
else:
model['model'].plot_map(params=params, save=True, col=model['fname'])
def get_ctip1_fpr(self):
"""
Query the false positive rate of the CTIP1 model
:return:
"""
model = self.__model_dict['CTIP1']['model']
fpr = model.get_results().iloc[0]['fp'] / model.data[model.negative_group].sum()
print("CTIP1 FPR: %.4f" % fpr)
return fpr
def get_dssg_et_params(self, fpr):
"""
Query the parameters for the prepositional model at a given FPR.
:param fpr: model fpr
:return:
"""
model = self.__model_dict['DSSG ET']['model']
params = self.__model_dict['DSSG ET']['params']
roc_df = model.get_roc(params).sort_values('fpr')
mask = roc_df['fpr'] <= fpr
i = roc_df.index[mask][-1]
params_fpr = params[i]
print("Parameters DSSG ET @ %.4f:" % fpr)
print(params_fpr)
return params_fpr |
#!/usr/bin/env python
# Copyright 2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
ddCOSMO TDA, TDHF, TDDFT gradients
The implementaitons are based on modules
pyscf.grad.tdrhf
pyscf.grad.tdrks
pyscf.grad.tduhf
pyscf.grad.tduks
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf import scf
from pyscf import dft
from pyscf import df
from pyscf.dft import numint
from pyscf.solvent import ddcosmo
from pyscf.solvent import ddcosmo_grad
from pyscf.solvent._attach_solvent import _Solvation
from pyscf.grad import rks as rks_grad
from pyscf.grad import tdrks as tdrks_grad
from pyscf.grad import tduks as tduks_grad
from pyscf.scf import cphf, ucphf
def make_grad_object(grad_method):
'''For grad_method in vacuum, add nuclear gradients of solvent pcmobj'''
# Zeroth order method object must be a solvation-enabled method
assert isinstance(grad_method.base, _Solvation)
if grad_method.base.with_solvent.frozen:
raise RuntimeError('Frozen solvent model is not avialbe for energy gradients')
grad_method_class = grad_method.__class__
class WithSolventGrad(grad_method_class):
def __init__(self, grad_method):
self.__dict__.update(grad_method.__dict__)
self.de_solvent = None
self.de_solute = None
self._keys = self._keys.union(['de_solvent', 'de_solute'])
def grad_elec(self, xy, singlet, atmlst=None):
if isinstance(self.base._scf, dft.uks.UKS):
return tduks_grad_elec(self, xy, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, dft.rks.RKS):
return tdrks_grad_elec(self, xy, singlet, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, scf.uhf.UHF):
return tduhf_grad_elec(self, xy, atmlst, self.max_memory, self.verbose)
elif isinstance(self.base._scf, scf.hf.RHF):
return tdrhf_grad_elec(self, xy, singlet, atmlst, self.max_memory, self.verbose)
# TODO: if moving to python3, change signature to
# def kernel(self, *args, dm=None, atmlst=None, **kwargs):
def kernel(self, *args, **kwargs):
dm = kwargs.pop('dm', None)
if dm is None:
dm = self.base._scf.make_rdm1(ao_repr=True)
self.de_solvent = ddcosmo_grad.kernel(self.base.with_solvent, dm)
self.de_solute = grad_method_class.kernel(self, *args, **kwargs)
self.de = self.de_solute + self.de_solvent
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s (+%s) gradients ---------------',
self.base.__class__.__name__,
self.base.with_solvent.__class__.__name__)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
return self.de
def _finalize(self):
# disable _finalize. It is called in grad_method.kernel method
# where self.de was not yet initialized.
pass
return WithSolventGrad(grad_method)
def tdrhf_grad_elec(td_grad, x_y, singlet=True, atmlst=None,
max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tdrhf.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
x, y = x_y
xpy = (x+y).reshape(nocc,nvir).T
xmy = (x-y).reshape(nocc,nvir).T
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
dvv = numpy.einsum('ai,bi->ab', xpy, xpy) + numpy.einsum('ai,bi->ab', xmy, xmy)
doo =-numpy.einsum('ai,aj->ij', xpy, xpy) - numpy.einsum('ai,aj->ij', xmy, xmy)
dmxpy = reduce(numpy.dot, (orbv, xpy, orbo.T))
dmxmy = reduce(numpy.dot, (orbv, xmy, orbo.T))
dmzoo = reduce(numpy.dot, (orbo, doo, orbo.T))
dmzoo+= reduce(numpy.dot, (orbv, dvv, orbv.T))
vj, vk = mf.get_jk(mol, (dmzoo, dmxpy+dmxpy.T, dmxmy-dmxmy.T), hermi=0)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
veff0doo = vj[0] * 2 - vk[0]
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 - vk[1]
else:
veff = -vk[1]
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff = -vk[2]
veff0mom = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mom[nocc:,nocc:], xmy) * 2
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
# set singlet=None, generate function for CPHF type response kernel
vresp = mf.gen_response(singlet=None, hermi=1)
def fvind(x): # For singlet, closed shell ground state
dm = reduce(numpy.dot, (orbv, x.reshape(nvir,nocc)*2, orbo.T))
v1ao = vresp(dm+dm.T)
return reduce(numpy.dot, (orbv.T, v1ao, orbo)).ravel()
z1 = cphf.solve(fvind, mo_energy, mo_occ, wvo,
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
z1 = z1.reshape(nvir,nocc)
time1 = log.timer('Z-vector using CPHF solver', *time0)
z1ao = reduce(numpy.dot, (orbv, z1, orbo.T))
veff = vresp(z1ao+z1ao.T)
im0 = numpy.zeros((nmo,nmo))
im0[:nocc,:nocc] = reduce(numpy.dot, (orbo.T, veff0doo+veff, orbo))
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mop[nocc:,:nocc], xpy)
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,nocc:] = numpy.einsum('ci,ai->ac', veff0mop[nocc:,:nocc], xpy)
im0[nocc:,nocc:]+= numpy.einsum('ci,ai->ac', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,:nocc] = numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy)*2
im0[nocc:,:nocc]+= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy)*2
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo
dm1[nocc:,nocc:] = dvv
dm1[nocc:,:nocc] = z1
dm1[:nocc,:nocc] += numpy.eye(nocc)*2 # for ground state
im0 = reduce(numpy.dot, (mo_coeff, im0+zeta*dm1, mo_coeff.T))
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1doo = z1ao + dmzoo
oo0 = reduce(numpy.dot, (orbo, orbo.T))
vj, vk = td_grad.get_jk(mol, (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T,
dmxmy-dmxmy.T))
vj = vj.reshape(-1,3,nao,nao)
vk = vk.reshape(-1,3,nao,nao)
if singlet:
vhf1 = vj * 2 - vk
else:
vhf1 = numpy.vstack((vj[:2]*2-vk[:2], -vk[2:]))
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
h1ao[:,p0:p1] += vhf1[0,:,p0:p1]
h1ao[:,:,p0:p1] += vhf1[0,:,p0:p1].transpose(0,2,1)
# oo0*2 for doubly occupied orbitals
de[k] = numpy.einsum('xpq,pq->x', h1ao, oo0) * 2
de[k] += numpy.einsum('xpq,pq->x', h1ao, dmz1doo)
de[k] -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
de[k] -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1[1,:,p0:p1], oo0[p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1[2,:,p0:p1], dmxpy[p0:p1,:]) * 2
de[k] += numpy.einsum('xij,ij->x', vhf1[3,:,p0:p1], dmxmy[p0:p1,:]) * 2
de[k] += numpy.einsum('xji,ij->x', vhf1[2,:,p0:p1], dmxpy[:,p0:p1]) * 2
de[k] -= numpy.einsum('xji,ij->x', vhf1[3,:,p0:p1], dmxmy[:,p0:p1]) * 2
de += _grad_solvent(with_solvent, oo0*2, dmz1doo, dmxpy*2, singlet)
log.timer('TDHF nuclear gradients', *time0)
return de
def tdrks_grad_elec(td_grad, x_y, singlet=True, atmlst=None,
max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tdrks.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
x, y = x_y
xpy = (x+y).reshape(nocc,nvir).T
xmy = (x-y).reshape(nocc,nvir).T
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
dvv = numpy.einsum('ai,bi->ab', xpy, xpy) + numpy.einsum('ai,bi->ab', xmy, xmy)
doo =-numpy.einsum('ai,aj->ij', xpy, xpy) - numpy.einsum('ai,aj->ij', xmy, xmy)
dmxpy = reduce(numpy.dot, (orbv, xpy, orbo.T))
dmxmy = reduce(numpy.dot, (orbv, xmy, orbo.T))
dmzoo = reduce(numpy.dot, (orbo, doo, orbo.T))
dmzoo+= reduce(numpy.dot, (orbv, dvv, orbv.T))
mem_now = lib.current_memory()[0]
max_memory = max(2000, td_grad.max_memory*.9-mem_now)
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 3, raise_error=True)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
# dm0 = mf.make_rdm1(mo_coeff, mo_occ), but it is not used when computing
# fxc since rho0 is passed to fxc function.
rho0, vxc, fxc = ni.cache_xc_kernel(mf.mol, mf.grids, mf.xc,
[mo_coeff]*2, [mo_occ*.5]*2, spin=1)
f1vo, f1oo, vxc1, k1ao = \
tdrks_grad._contract_xc_kernel(td_grad, mf.xc, dmxpy,
dmzoo, True, True, singlet, max_memory)
if abs(hyb) > 1e-10:
dm = (dmzoo, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = mf.get_jk(mol, dm, hermi=0)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
vk *= hyb
if abs(omega) > 1e-10:
vk += mf.get_k(mol, dm, hermi=0, omega=omega) * (alpha-hyb)
veff0doo = vj[0] * 2 - vk[0] + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 - vk[1] + f1vo[0] * 2
else:
veff = -vk[1] + f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff = -vk[2]
veff0mom = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mom[nocc:,nocc:], xmy) * 2
else:
vj = mf.get_j(mol, (dmzoo, dmxpy+dmxpy.T), hermi=1)
if with_solvent.equilibrium_solvation:
vj[:2] += mf.with_solvent._B_dot_x((dmzoo, dmxpy+dmxpy.T))
else:
vj[0] += mf.with_solvent._B_dot_x(dmzoo)
veff0doo = vj[0] * 2 + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 + f1vo[0] * 2
else:
veff = f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff0mom = numpy.zeros((nmo,nmo))
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
# set singlet=None, generate function for CPHF type response kernel
vresp = mf.gen_response(singlet=None, hermi=1)
def fvind(x):
dm = reduce(numpy.dot, (orbv, x.reshape(nvir,nocc)*2, orbo.T))
v1ao = vresp(dm+dm.T)
return reduce(numpy.dot, (orbv.T, v1ao, orbo)).ravel()
z1 = cphf.solve(fvind, mo_energy, mo_occ, wvo,
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
z1 = z1.reshape(nvir,nocc)
time1 = log.timer('Z-vector using CPHF solver', *time0)
z1ao = reduce(numpy.dot, (orbv, z1, orbo.T))
veff = vresp(z1ao+z1ao.T)
im0 = numpy.zeros((nmo,nmo))
im0[:nocc,:nocc] = reduce(numpy.dot, (orbo.T, veff0doo+veff, orbo))
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mop[nocc:,:nocc], xpy)
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,nocc:] = numpy.einsum('ci,ai->ac', veff0mop[nocc:,:nocc], xpy)
im0[nocc:,nocc:]+= numpy.einsum('ci,ai->ac', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,:nocc] = numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy)*2
im0[nocc:,:nocc]+= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy)*2
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo
dm1[nocc:,nocc:] = dvv
dm1[nocc:,:nocc] = z1
dm1[:nocc,:nocc] += numpy.eye(nocc)*2 # for ground state
im0 = reduce(numpy.dot, (mo_coeff, im0+zeta*dm1, mo_coeff.T))
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1doo = z1ao + dmzoo
oo0 = reduce(numpy.dot, (orbo, orbo.T))
if abs(hyb) > 1e-10:
dm = (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = td_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10:
with mol.with_range_coulomb(omega):
vk += td_grad.get_k(mol, dm) * (alpha-hyb)
vj = vj.reshape(-1,3,nao,nao)
vk = vk.reshape(-1,3,nao,nao)
if singlet:
veff1 = vj * 2 - vk
else:
veff1 = numpy.vstack((vj[:2]*2-vk[:2], -vk[2:]))
else:
vj = td_grad.get_j(mol, (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T))
vj = vj.reshape(-1,3,nao,nao)
veff1 = numpy.zeros((4,3,nao,nao))
if singlet:
veff1[:3] = vj * 2
else:
veff1[:2] = vj[:2] * 2
fxcz1 = tdrks_grad._contract_xc_kernel(td_grad, mf.xc, z1ao, None,
False, False, True, max_memory)[0]
veff1[0] += vxc1[1:]
veff1[1] +=(f1oo[1:] + fxcz1[1:] + k1ao[1:]*2)*2 # *2 for dmz1doo+dmz1oo.T
veff1[2] += f1vo[1:] * 2
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
h1ao[:,p0:p1] += veff1[0,:,p0:p1]
h1ao[:,:,p0:p1] += veff1[0,:,p0:p1].transpose(0,2,1)
# oo0*2 for doubly occupied orbitals
e1 = numpy.einsum('xpq,pq->x', h1ao, oo0) * 2
e1 += numpy.einsum('xpq,pq->x', h1ao, dmz1doo)
e1 -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
e1 -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[1,:,p0:p1], oo0[p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[2,:,p0:p1], dmxpy[p0:p1,:]) * 2
e1 += numpy.einsum('xij,ij->x', veff1[3,:,p0:p1], dmxmy[p0:p1,:]) * 2
e1 += numpy.einsum('xji,ij->x', veff1[2,:,p0:p1], dmxpy[:,p0:p1]) * 2
e1 -= numpy.einsum('xji,ij->x', veff1[3,:,p0:p1], dmxmy[:,p0:p1]) * 2
de[k] = e1
de += _grad_solvent(with_solvent, oo0*2, dmz1doo, dmxpy*2, singlet)
log.timer('TDDFT nuclear gradients', *time0)
return de
def tduhf_grad_elec(td_grad, x_y, atmlst=None, max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tduhf.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
nmoa = nocca + nvira
nmob = noccb + nvirb
(xa, xb), (ya, yb) = x_y
xpya = (xa+ya).reshape(nocca,nvira).T
xpyb = (xb+yb).reshape(noccb,nvirb).T
xmya = (xa-ya).reshape(nocca,nvira).T
xmyb = (xb-yb).reshape(noccb,nvirb).T
dvva = numpy.einsum('ai,bi->ab', xpya, xpya) + numpy.einsum('ai,bi->ab', xmya, xmya)
dvvb = numpy.einsum('ai,bi->ab', xpyb, xpyb) + numpy.einsum('ai,bi->ab', xmyb, xmyb)
dooa =-numpy.einsum('ai,aj->ij', xpya, xpya) - numpy.einsum('ai,aj->ij', xmya, xmya)
doob =-numpy.einsum('ai,aj->ij', xpyb, xpyb) - numpy.einsum('ai,aj->ij', xmyb, xmyb)
dmxpya = reduce(numpy.dot, (orbva, xpya, orboa.T))
dmxpyb = reduce(numpy.dot, (orbvb, xpyb, orbob.T))
dmxmya = reduce(numpy.dot, (orbva, xmya, orboa.T))
dmxmyb = reduce(numpy.dot, (orbvb, xmyb, orbob.T))
dmzooa = reduce(numpy.dot, (orboa, dooa, orboa.T))
dmzoob = reduce(numpy.dot, (orbob, doob, orbob.T))
dmzooa+= reduce(numpy.dot, (orbva, dvva, orbva.T))
dmzoob+= reduce(numpy.dot, (orbvb, dvvb, orbvb.T))
vj, vk = mf.get_jk(mol, (dmzooa, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
dmzoob, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T), hermi=0)
vj = vj.reshape(2,3,nao,nao)
vk = vk.reshape(2,3,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] - vk[:,0]
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] - vk[:,1]
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff = -vk[:,2]
veff0moma = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0momb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0moma[nocca:,nocca:], xmya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0momb[noccb:,noccb:], xmyb) * 2
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
vresp = mf.gen_response(hermi=1)
def fvind(x):
dm1 = numpy.empty((2,nao,nao))
xa = x[0,:nvira*nocca].reshape(nvira,nocca)
xb = x[0,nvira*nocca:].reshape(nvirb,noccb)
dma = reduce(numpy.dot, (orbva, xa, orboa.T))
dmb = reduce(numpy.dot, (orbvb, xb, orbob.T))
dm1[0] = dma + dma.T
dm1[1] = dmb + dmb.T
v1 = vresp(dm1)
v1a = reduce(numpy.dot, (orbva.T, v1[0], orboa))
v1b = reduce(numpy.dot, (orbvb.T, v1[1], orbob))
return numpy.hstack((v1a.ravel(), v1b.ravel()))
z1a, z1b = ucphf.solve(fvind, mo_energy, mo_occ, (wvoa,wvob),
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
time1 = log.timer('Z-vector using UCPHF solver', *time0)
z1ao = numpy.empty((2,nao,nao))
z1ao[0] = reduce(numpy.dot, (orbva, z1a, orboa.T))
z1ao[1] = reduce(numpy.dot, (orbvb, z1b, orbob.T))
veff = vresp((z1ao+z1ao.transpose(0,2,1)) * .5)
im0a = numpy.zeros((nmoa,nmoa))
im0b = numpy.zeros((nmob,nmob))
im0a[:nocca,:nocca] = reduce(numpy.dot, (orboa.T, veff0doo[0]+veff[0], orboa)) * .5
im0b[:noccb,:noccb] = reduce(numpy.dot, (orbob.T, veff0doo[1]+veff[1], orbob)) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0moma[nocca:,:nocca], xmya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,nocca:] = numpy.einsum('ci,ai->ac', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[noccb:,noccb:] = numpy.einsum('ci,ai->ac', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[nocca:,nocca:]+= numpy.einsum('ci,ai->ac', veff0moma[nocca:,:nocca], xmya) * .5
im0b[noccb:,noccb:]+= numpy.einsum('ci,ai->ac', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,:nocca] = numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya)
im0b[noccb:,:noccb] = numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb)
im0a[nocca:,:nocca]+= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya)
im0b[noccb:,:noccb]+= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb)
zeta_a = (mo_energy[0][:,None] + mo_energy[0]) * .5
zeta_b = (mo_energy[1][:,None] + mo_energy[1]) * .5
zeta_a[nocca:,:nocca] = mo_energy[0][:nocca]
zeta_b[noccb:,:noccb] = mo_energy[1][:noccb]
zeta_a[:nocca,nocca:] = mo_energy[0][nocca:]
zeta_b[:noccb,noccb:] = mo_energy[1][noccb:]
dm1a = numpy.zeros((nmoa,nmoa))
dm1b = numpy.zeros((nmob,nmob))
dm1a[:nocca,:nocca] = dooa * .5
dm1b[:noccb,:noccb] = doob * .5
dm1a[nocca:,nocca:] = dvva * .5
dm1b[noccb:,noccb:] = dvvb * .5
dm1a[nocca:,:nocca] = z1a * .5
dm1b[noccb:,:noccb] = z1b * .5
dm1a[:nocca,:nocca] += numpy.eye(nocca) # for ground state
dm1b[:noccb,:noccb] += numpy.eye(noccb)
im0a = reduce(numpy.dot, (mo_coeff[0], im0a+zeta_a*dm1a, mo_coeff[0].T))
im0b = reduce(numpy.dot, (mo_coeff[1], im0b+zeta_b*dm1b, mo_coeff[1].T))
im0 = im0a + im0b
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1dooa = z1ao[0] + dmzooa
dmz1doob = z1ao[1] + dmzoob
oo0a = reduce(numpy.dot, (orboa, orboa.T))
oo0b = reduce(numpy.dot, (orbob, orbob.T))
as_dm1 = oo0a + oo0b + (dmz1dooa + dmz1doob) * .5
vj, vk = td_grad.get_jk(mol, (oo0a, dmz1dooa+dmz1dooa.T, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
oo0b, dmz1doob+dmz1doob.T, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T))
vj = vj.reshape(2,4,3,nao,nao)
vk = vk.reshape(2,4,3,nao,nao)
vhf1a, vhf1b = vj[0] + vj[1] - vk
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
de[k] = numpy.einsum('xpq,pq->x', h1ao, as_dm1)
de[k] += numpy.einsum('xpq,pq->x', vhf1a[0,:,p0:p1], oo0a[p0:p1])
de[k] += numpy.einsum('xpq,pq->x', vhf1b[0,:,p0:p1], oo0b[p0:p1])
de[k] += numpy.einsum('xpq,qp->x', vhf1a[0,:,p0:p1], oo0a[:,p0:p1])
de[k] += numpy.einsum('xpq,qp->x', vhf1b[0,:,p0:p1], oo0b[:,p0:p1])
de[k] += numpy.einsum('xpq,pq->x', vhf1a[0,:,p0:p1], dmz1dooa[p0:p1]) * .5
de[k] += numpy.einsum('xpq,pq->x', vhf1b[0,:,p0:p1], dmz1doob[p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', vhf1a[0,:,p0:p1], dmz1dooa[:,p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', vhf1b[0,:,p0:p1], dmz1doob[:,p0:p1]) * .5
de[k] -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
de[k] -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
de[k] += numpy.einsum('xij,ij->x', vhf1a[1,:,p0:p1], oo0a[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', vhf1b[1,:,p0:p1], oo0b[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', vhf1a[2,:,p0:p1], dmxpya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1b[2,:,p0:p1], dmxpyb[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1a[3,:,p0:p1], dmxmya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', vhf1b[3,:,p0:p1], dmxmyb[p0:p1,:])
de[k] += numpy.einsum('xji,ij->x', vhf1a[2,:,p0:p1], dmxpya[:,p0:p1])
de[k] += numpy.einsum('xji,ij->x', vhf1b[2,:,p0:p1], dmxpyb[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', vhf1a[3,:,p0:p1], dmxmya[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', vhf1b[3,:,p0:p1], dmxmyb[:,p0:p1])
dm0 = oo0a + oo0b
dmz1doo = (dmz1dooa + dmz1doob) * .5
dmxpy = dmxpya + dmxpyb
de += _grad_solvent(with_solvent, dm0, dmz1doo, dmxpy)
log.timer('TDUHF nuclear gradients', *time0)
return de
def tduks_grad_elec(td_grad, x_y, atmlst=None, max_memory=2000, verbose=logger.INFO):
'''
See also function pyscf.grad.tduks.grad_elec
'''
log = logger.new_logger(td_grad, verbose)
time0 = logger.process_clock(), logger.perf_counter()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
with_solvent = getattr(td_grad.base, 'with_solvent', mf.with_solvent)
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
nmoa = nocca + nvira
nmob = noccb + nvirb
(xa, xb), (ya, yb) = x_y
xpya = (xa+ya).reshape(nocca,nvira).T
xpyb = (xb+yb).reshape(noccb,nvirb).T
xmya = (xa-ya).reshape(nocca,nvira).T
xmyb = (xb-yb).reshape(noccb,nvirb).T
dvva = numpy.einsum('ai,bi->ab', xpya, xpya) + numpy.einsum('ai,bi->ab', xmya, xmya)
dvvb = numpy.einsum('ai,bi->ab', xpyb, xpyb) + numpy.einsum('ai,bi->ab', xmyb, xmyb)
dooa =-numpy.einsum('ai,aj->ij', xpya, xpya) - numpy.einsum('ai,aj->ij', xmya, xmya)
doob =-numpy.einsum('ai,aj->ij', xpyb, xpyb) - numpy.einsum('ai,aj->ij', xmyb, xmyb)
dmxpya = reduce(numpy.dot, (orbva, xpya, orboa.T))
dmxpyb = reduce(numpy.dot, (orbvb, xpyb, orbob.T))
dmxmya = reduce(numpy.dot, (orbva, xmya, orboa.T))
dmxmyb = reduce(numpy.dot, (orbvb, xmyb, orbob.T))
dmzooa = reduce(numpy.dot, (orboa, dooa, orboa.T))
dmzoob = reduce(numpy.dot, (orbob, doob, orbob.T))
dmzooa+= reduce(numpy.dot, (orbva, dvva, orbva.T))
dmzoob+= reduce(numpy.dot, (orbvb, dvvb, orbvb.T))
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 3, raise_error=True)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
# dm0 = mf.make_rdm1(mo_coeff, mo_occ), but it is not used when computing
# fxc since rho0 is passed to fxc function.
dm0 = None
rho0, vxc, fxc = ni.cache_xc_kernel(mf.mol, mf.grids, mf.xc,
mo_coeff, mo_occ, spin=1)
f1vo, f1oo, vxc1, k1ao = \
tduks_grad._contract_xc_kernel(td_grad, mf.xc, (dmxpya,dmxpyb),
(dmzooa,dmzoob), True, True, max_memory)
if abs(hyb) > 1e-10:
dm = (dmzooa, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
dmzoob, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T)
vj, vk = mf.get_jk(mol, dm, hermi=0)
vk *= hyb
if abs(omega) > 1e-10:
vk += mf.get_k(mol, dm, hermi=0, omega=omega) * (alpha-hyb)
vj = vj.reshape(2,3,nao,nao)
vk = vk.reshape(2,3,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] - vk[:,0] + f1oo[:,0] + k1ao[:,0] * 2
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] - vk[:,1] + f1vo[:,0] * 2
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff = -vk[:,2]
veff0moma = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0momb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0moma[nocca:,nocca:], xmya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0momb[noccb:,noccb:], xmyb) * 2
else:
dm = (dmzooa, dmxpya+dmxpya.T,
dmzoob, dmxpyb+dmxpyb.T)
vj = mf.get_j(mol, dm, hermi=1).reshape(2,2,nao,nao)
if with_solvent.equilibrium_solvation:
dmxpy = dmxpya + dmxpyb
vj[0,:2] += mf.with_solvent._B_dot_x((dmzooa+dmzoob, dmxpy+dmxpy.T))
else:
vj[0,0] += mf.with_solvent._B_dot_x(dmzooa+dmzoob)
veff0doo = vj[0,0]+vj[1,0] + f1oo[:,0] + k1ao[:,0] * 2
wvoa = reduce(numpy.dot, (orbva.T, veff0doo[0], orboa)) * 2
wvob = reduce(numpy.dot, (orbvb.T, veff0doo[1], orbob)) * 2
veff = vj[0,1]+vj[1,1] + f1vo[:,0] * 2
veff0mopa = reduce(numpy.dot, (mo_coeff[0].T, veff[0], mo_coeff[0]))
veff0mopb = reduce(numpy.dot, (mo_coeff[1].T, veff[1], mo_coeff[1]))
wvoa -= numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya) * 2
wvob -= numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb) * 2
wvoa += numpy.einsum('ac,ai->ci', veff0mopa[nocca:,nocca:], xpya) * 2
wvob += numpy.einsum('ac,ai->ci', veff0mopb[noccb:,noccb:], xpyb) * 2
veff0moma = numpy.zeros((nmoa,nmoa))
veff0momb = numpy.zeros((nmob,nmob))
with lib.temporary_env(mf.with_solvent, equilibrium_solvation=True):
vresp = mf.gen_response(hermi=1)
def fvind(x):
dm1 = numpy.empty((2,nao,nao))
xa = x[0,:nvira*nocca].reshape(nvira,nocca)
xb = x[0,nvira*nocca:].reshape(nvirb,noccb)
dma = reduce(numpy.dot, (orbva, xa, orboa.T))
dmb = reduce(numpy.dot, (orbvb, xb, orbob.T))
dm1[0] = dma + dma.T
dm1[1] = dmb + dmb.T
v1 = vresp(dm1)
v1a = reduce(numpy.dot, (orbva.T, v1[0], orboa))
v1b = reduce(numpy.dot, (orbvb.T, v1[1], orbob))
return numpy.hstack((v1a.ravel(), v1b.ravel()))
z1a, z1b = ucphf.solve(fvind, mo_energy, mo_occ, (wvoa,wvob),
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
time1 = log.timer('Z-vector using UCPHF solver', *time0)
z1ao = numpy.empty((2,nao,nao))
z1ao[0] = reduce(numpy.dot, (orbva, z1a, orboa.T))
z1ao[1] = reduce(numpy.dot, (orbvb, z1b, orbob.T))
veff = vresp((z1ao+z1ao.transpose(0,2,1)) * .5)
im0a = numpy.zeros((nmoa,nmoa))
im0b = numpy.zeros((nmob,nmob))
im0a[:nocca,:nocca] = reduce(numpy.dot, (orboa.T, veff0doo[0]+veff[0], orboa)) * .5
im0b[:noccb,:noccb] = reduce(numpy.dot, (orbob.T, veff0doo[1]+veff[1], orbob)) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[:nocca,:nocca]+= numpy.einsum('ak,ai->ki', veff0moma[nocca:,:nocca], xmya) * .5
im0b[:noccb,:noccb]+= numpy.einsum('ak,ai->ki', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,nocca:] = numpy.einsum('ci,ai->ac', veff0mopa[nocca:,:nocca], xpya) * .5
im0b[noccb:,noccb:] = numpy.einsum('ci,ai->ac', veff0mopb[noccb:,:noccb], xpyb) * .5
im0a[nocca:,nocca:]+= numpy.einsum('ci,ai->ac', veff0moma[nocca:,:nocca], xmya) * .5
im0b[noccb:,noccb:]+= numpy.einsum('ci,ai->ac', veff0momb[noccb:,:noccb], xmyb) * .5
im0a[nocca:,:nocca] = numpy.einsum('ki,ai->ak', veff0mopa[:nocca,:nocca], xpya)
im0b[noccb:,:noccb] = numpy.einsum('ki,ai->ak', veff0mopb[:noccb,:noccb], xpyb)
im0a[nocca:,:nocca]+= numpy.einsum('ki,ai->ak', veff0moma[:nocca,:nocca], xmya)
im0b[noccb:,:noccb]+= numpy.einsum('ki,ai->ak', veff0momb[:noccb,:noccb], xmyb)
zeta_a = (mo_energy[0][:,None] + mo_energy[0]) * .5
zeta_b = (mo_energy[1][:,None] + mo_energy[1]) * .5
zeta_a[nocca:,:nocca] = mo_energy[0][:nocca]
zeta_b[noccb:,:noccb] = mo_energy[1][:noccb]
zeta_a[:nocca,nocca:] = mo_energy[0][nocca:]
zeta_b[:noccb,noccb:] = mo_energy[1][noccb:]
dm1a = numpy.zeros((nmoa,nmoa))
dm1b = numpy.zeros((nmob,nmob))
dm1a[:nocca,:nocca] = dooa * .5
dm1b[:noccb,:noccb] = doob * .5
dm1a[nocca:,nocca:] = dvva * .5
dm1b[noccb:,noccb:] = dvvb * .5
dm1a[nocca:,:nocca] = z1a * .5
dm1b[noccb:,:noccb] = z1b * .5
dm1a[:nocca,:nocca] += numpy.eye(nocca) # for ground state
dm1b[:noccb,:noccb] += numpy.eye(noccb)
im0a = reduce(numpy.dot, (mo_coeff[0], im0a+zeta_a*dm1a, mo_coeff[0].T))
im0b = reduce(numpy.dot, (mo_coeff[1], im0b+zeta_b*dm1b, mo_coeff[1].T))
im0 = im0a + im0b
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1dooa = z1ao[0] + dmzooa
dmz1doob = z1ao[1] + dmzoob
oo0a = reduce(numpy.dot, (orboa, orboa.T))
oo0b = reduce(numpy.dot, (orbob, orbob.T))
as_dm1 = oo0a + oo0b + (dmz1dooa + dmz1doob) * .5
if abs(hyb) > 1e-10:
dm = (oo0a, dmz1dooa+dmz1dooa.T, dmxpya+dmxpya.T, dmxmya-dmxmya.T,
oo0b, dmz1doob+dmz1doob.T, dmxpyb+dmxpyb.T, dmxmyb-dmxmyb.T)
vj, vk = td_grad.get_jk(mol, dm)
vj = vj.reshape(2,4,3,nao,nao)
vk = vk.reshape(2,4,3,nao,nao) * hyb
if abs(omega) > 1e-10:
with mol.with_range_coulomb(omega):
vk += td_grad.get_k(mol, dm).reshape(2,4,3,nao,nao) * (alpha-hyb)
veff1 = vj[0] + vj[1] - vk
else:
dm = (oo0a, dmz1dooa+dmz1dooa.T, dmxpya+dmxpya.T,
oo0b, dmz1doob+dmz1doob.T, dmxpyb+dmxpyb.T)
vj = td_grad.get_j(mol, dm).reshape(2,3,3,nao,nao)
veff1 = numpy.zeros((2,4,3,nao,nao))
veff1[:,:3] = vj[0] + vj[1]
fxcz1 = tduks_grad._contract_xc_kernel(td_grad, mf.xc, z1ao, None,
False, False, max_memory)[0]
veff1[:,0] += vxc1[:,1:]
veff1[:,1] +=(f1oo[:,1:] + fxcz1[:,1:] + k1ao[:,1:]*2)*2 # *2 for dmz1doo+dmz1oo.T
veff1[:,2] += f1vo[:,1:] * 2
veff1a, veff1b = veff1
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
de[k] = numpy.einsum('xpq,pq->x', h1ao, as_dm1)
de[k] += numpy.einsum('xpq,pq->x', veff1a[0,:,p0:p1], oo0a[p0:p1])
de[k] += numpy.einsum('xpq,pq->x', veff1b[0,:,p0:p1], oo0b[p0:p1])
de[k] += numpy.einsum('xpq,qp->x', veff1a[0,:,p0:p1], oo0a[:,p0:p1])
de[k] += numpy.einsum('xpq,qp->x', veff1b[0,:,p0:p1], oo0b[:,p0:p1])
de[k] += numpy.einsum('xpq,pq->x', veff1a[0,:,p0:p1], dmz1dooa[p0:p1]) * .5
de[k] += numpy.einsum('xpq,pq->x', veff1b[0,:,p0:p1], dmz1doob[p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', veff1a[0,:,p0:p1], dmz1dooa[:,p0:p1]) * .5
de[k] += numpy.einsum('xpq,qp->x', veff1b[0,:,p0:p1], dmz1doob[:,p0:p1]) * .5
de[k] -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
de[k] -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
de[k] += numpy.einsum('xij,ij->x', veff1a[1,:,p0:p1], oo0a[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', veff1b[1,:,p0:p1], oo0b[p0:p1]) * .5
de[k] += numpy.einsum('xij,ij->x', veff1a[2,:,p0:p1], dmxpya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', veff1b[2,:,p0:p1], dmxpyb[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', veff1a[3,:,p0:p1], dmxmya[p0:p1,:])
de[k] += numpy.einsum('xij,ij->x', veff1b[3,:,p0:p1], dmxmyb[p0:p1,:])
de[k] += numpy.einsum('xji,ij->x', veff1a[2,:,p0:p1], dmxpya[:,p0:p1])
de[k] += numpy.einsum('xji,ij->x', veff1b[2,:,p0:p1], dmxpyb[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', veff1a[3,:,p0:p1], dmxmya[:,p0:p1])
de[k] -= numpy.einsum('xji,ij->x', veff1b[3,:,p0:p1], dmxmyb[:,p0:p1])
dm0 = oo0a + oo0b
dmz1doo = (dmz1dooa + dmz1doob) * .5
dmxpy = dmxpya + dmxpyb
de += _grad_solvent(with_solvent, dm0, dmz1doo, dmxpy)
log.timer('TDUHF nuclear gradients', *time0)
return de
def _grad_solvent(pcmobj, dm0, dmz1doo, dmxpy, singlet=True):
'''Energy derivatives associated to derivatives of B tensor'''
dielectric = pcmobj.eps
if dielectric > 0:
f_epsilon = (dielectric-1.)/dielectric
else:
f_epsilon = 1
r_vdw = pcmobj._intermediates['r_vdw' ]
ylm_1sph = pcmobj._intermediates['ylm_1sph' ]
ui = pcmobj._intermediates['ui' ]
Lmat = pcmobj._intermediates['Lmat' ]
cached_pol = pcmobj._intermediates['cached_pol']
# First order nuclei-solvent-electron contribution
tmp = _grad_ne(pcmobj, dmz1doo,
r_vdw, ui, ylm_1sph, cached_pol, Lmat)
de = .5 * f_epsilon * tmp
# First order electron-solvent-electron contribution
tmp = _grad_ee(pcmobj, (dm0, dmxpy), (dmz1doo, dmxpy),
r_vdw, ui, ylm_1sph, cached_pol, Lmat)
de += .5 * f_epsilon * tmp[0] # (dm0 * dmz1doo)
if singlet and pcmobj.equilibrium_solvation:
de += .5 * f_epsilon * tmp[1] # (dmxpy * dmxpy)
return de
def _grad_nn(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''nuclei-solvent-nuclei term'''
mol = pcmobj.mol
natm = mol.natm
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
de = numpy.zeros((natm,3))
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
v_phi = numpy.zeros((natm, ngrid_1sph))
for ia in range(natm):
# Note (-) sign is not applied to atom_charges, because (-) is explicitly
# included in rhs and L matrix
d_rs = atom_coords.reshape(-1,1,3) - cav_coords[ia]
v_phi[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi0 = -numpy.einsum('n,xn,jn,jn->jx', weights_1sph, ylm_1sph, ui, v_phi)
psi0 = numpy.zeros((natm, nlm))
for ia in range(natm):
psi0[ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia)
v_phi0 = numpy.empty((natm,ngrid_1sph))
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
d_rs = atom_coords.reshape(-1,1,3) - cav_coords
v_phi0[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi1 = -numpy.einsum('n,ln,azjn,jn->azjl', weights_1sph, ylm_1sph, ui1, v_phi0)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
for ja in range(natm):
rs = atom_coords[ja] - cav_coords
d_rs = lib.norm(rs, axis=1)
v_phi = atom_charges[ja] * numpy.einsum('px,p->px', rs, 1./d_rs**3)
tmp = numpy.einsum('n,ln,n,nx->xl', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1[ja,:,ia] += tmp # response of the other atoms
phi1[ia,:,ia] -= tmp # response of cavity grids
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.ravel())
Xvec0 = Xvec0.reshape(natm,nlm)
phi1 -= numpy.einsum('aziljm,jm->azil', L1, Xvec0)
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.ravel())
LS0 = LS0.reshape(natm,nlm)
de += numpy.einsum('il,azil->az', LS0, phi1)
return de
def _grad_ne(pcmobj, dm, r_vdw, ui, ylm_1sph, cached_pol, L):
'''nuclear charge-electron density cross term'''
mol = pcmobj.mol
natm = mol.natm
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
lmax = pcmobj.lmax
nlm = (lmax+1)**2
nao = mol.nao
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
aoslices = mol.aoslice_by_atom()
#extern_point_idx = ui > 0
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
dms = numpy.asarray(dm)
is_single_dm = dms.ndim == 2
dms = dms.reshape(-1,nao,nao)
n_dm = dms.shape[0]
de = numpy.zeros((n_dm,natm,3))
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
v_phi = numpy.zeros((natm, ngrid_1sph))
for ia in range(natm):
# Note (-) sign is not applied to atom_charges, because (-) is explicitly
# included in rhs and L matrix
d_rs = atom_coords.reshape(-1,1,3) - cav_coords[ia]
v_phi[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi0 = -numpy.einsum('n,xn,jn,jn->jx', weights_1sph, ylm_1sph, ui, v_phi)
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.ravel())
Xvec0 = Xvec0.reshape(natm,nlm)
v_phi0 = numpy.empty((natm,ngrid_1sph))
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
d_rs = atom_coords.reshape(-1,1,3) - cav_coords
v_phi0[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi1 = -numpy.einsum('n,ln,azjn,jn->azjl', weights_1sph, ylm_1sph, ui1, v_phi0)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
for ja in range(natm):
rs = atom_coords[ja] - cav_coords
d_rs = lib.norm(rs, axis=1)
v_phi = atom_charges[ja] * numpy.einsum('px,p->px', rs, 1./d_rs**3)
tmp = numpy.einsum('n,ln,n,nx->xl', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1[ja,:,ia] += tmp # response of the other atoms
phi1[ia,:,ia] -= tmp # response of cavity grids
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
phi1 -= numpy.einsum('aziljm,jm->azil', L1, Xvec0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.reshape(-1,natm*nlm).T)
Xvec1 = Xvec1.T.reshape(natm,3,natm,nlm)
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
scaled_weights = numpy.einsum('azm,mn->azn', Xvec1[:,:,ia], fac_pol)
scaled_weights *= weight
aow = numpy.einsum('gi,azg->azgi', ao[0], scaled_weights)
de -= numpy.einsum('nij,gi,azgj->naz', dms, ao[0], aow)
aow0 = numpy.einsum('gi,g->gi', ao[0], weight)
aow1 = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
den0 = numpy.einsum('nij,gi,zxgj->nzxg', dms, ao[0], aow1)
de -= numpy.einsum('m,mg,nzxg->nzx', Xvec0[ia], fac_pol, den0)
eta_nj = numpy.einsum('m,mg->g', Xvec0[ia], fac_pol)
dm_ao = lib.einsum('nij,gj->ngi', dms, aow0)
dm_ao += lib.einsum('nji,gj->ngi', dms, aow0)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
den1 = numpy.einsum('ngi,xgi->nxg', dm_ao[:,:,p0:p1], ao[1:,:,p0:p1])
detmp = numpy.einsum('g,nxg->nx', eta_nj, den1)
de[:,ja] += detmp
de[:,ia] -= detmp
psi0 = numpy.zeros((natm, nlm))
for ia in range(natm):
psi0[ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia)
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.ravel())
LS0 = LS0.reshape(natm,nlm)
LS1 = numpy.einsum('il,aziljm->azjm', LS0, L1)
LS1 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, LS1.reshape(-1,natm*nlm).T)
LS1 = LS1.T.reshape(natm,3,natm,nlm)
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
cintopt_ip1 = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e_ip1)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
wtmp = numpy.einsum('l,n,ln->ln', LS0[ia], weights_1sph, ylm_1sph)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
jaux = numpy.einsum('ijg,nij->ng', v_nj, dms)
de -= numpy.einsum('azl,g,lg,g,ng->naz', LS1[:,:,ia], weights_1sph, ylm_1sph, ui[ia], jaux)
de += numpy.einsum('lg,azg,ng->naz', wtmp, ui1[:,:,ia], jaux)
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3,
aosym='s1', cintopt=cintopt_ip1)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
jaux1 = numpy.einsum('xijg,nij->nxg', v_e1_nj[:,p0:p1], dms[:,p0:p1])
jaux1 += numpy.einsum('xijg,nji->nxg', v_e1_nj[:,p0:p1], dms[:,:,p0:p1])
detmp = numpy.einsum('lg,g,nxg->nx', wtmp, ui[ia], jaux1)
de[:,ja] -= detmp
de[:,ia] += detmp
if is_single_dm:
de = de[0]
return de
def _grad_ee(pcmobj, dm1, dm2, r_vdw, ui, ylm_1sph, cached_pol, L):
'''electron density-electorn density term'''
mol = pcmobj.mol
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
aoslices = mol.aoslice_by_atom()
grids = pcmobj.grids
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
#extern_point_idx = ui > 0
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
dm1s = numpy.asarray(dm1)
dm2s = numpy.asarray(dm2)
is_single_dm = dm1s.ndim == 2
dm1s = dm1s.reshape(-1,nao,nao)
dm2s = dm2s.reshape(-1,nao,nao)
n_dm = dm1s.shape[0]
assert dm2s.shape[0] == n_dm
de = numpy.zeros((n_dm,natm,3))
ni = numint.NumInt()
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dm1s, hermi=0)
den = numpy.empty((n_dm,grids.weights.size))
p1 = 0
for ao, mask, weight, coords in ni.block_loop(mol, grids, nao, 0):
p0, p1 = p1, p1 + weight.size
for i in range(n_dm):
den[i,p0:p1] = make_rho(i, ao, mask, 'LDA') * weight
psi0_dm1 = numpy.zeros((n_dm, natm, nlm))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
i0, i1 = i1, i1 + fac_pol.shape[1]
psi0_dm1[:,ia] = -numpy.einsum('mg,ng->nm', fac_pol, den[:,i0:i1])
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0_dm1.reshape(n_dm,-1).T)
LS0 = LS0.T.reshape(n_dm,natm,nlm)
phi0_dm1 = numpy.zeros((n_dm,natm,nlm))
phi0_dm2 = numpy.zeros((n_dm,natm,nlm))
phi1_dm1 = numpy.zeros((n_dm,natm,3,natm,nlm))
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
cintopt_ip1 = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e_ip1)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
v_phi = numpy.einsum('ijg,nij->ng', v_nj, dm1s)
phi0_dm1[:,ia] = numpy.einsum('g,lg,g,ng->nl', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1_dm1[:,:,:,ia] += numpy.einsum('g,lg,azg,ng->nazl', weights_1sph, ylm_1sph, ui1[:,:,ia], v_phi)
jaux = numpy.einsum('ijg,nij->ng', v_nj, dm2s)
de += numpy.einsum('nl,g,lg,azg,ng->naz', LS0[:,ia], weights_1sph, ylm_1sph, ui1[:,:,ia], jaux)
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3,
aosym='s1', cintopt=cintopt_ip1)
wtmp = numpy.einsum('g,lg,g->lg', weights_1sph, ylm_1sph, ui[ia])
phi0_dm2[:,ia] = numpy.einsum('lg,ng->nl', wtmp, jaux)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
jaux1 = numpy.einsum('xijg,nij->nxg', v_e1_nj[:,p0:p1], dm2s[:,p0:p1])
jaux1 += numpy.einsum('xijg,nji->nxg', v_e1_nj[:,p0:p1], dm2s[:,:,p0:p1])
detmp = numpy.einsum('nl,lg,nxg->nx', LS0[:,ia], wtmp, jaux1)
de[:,ja] -= detmp
de[:,ia] += detmp
tmp = numpy.einsum('xijg,nij->nxg', v_e1_nj[:,p0:p1], dm1s[:,p0:p1])
tmp += numpy.einsum('xijg,nji->nxg', v_e1_nj[:,p0:p1], dm1s[:,:,p0:p1])
phitmp = numpy.einsum('lg,nxg->nxl', wtmp, tmp)
phi1_dm1[:,ja,:,ia] -= phitmp
phi1_dm1[:,ia,:,ia] += phitmp
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0_dm1.reshape(n_dm,-1).T)
Xvec0 = Xvec0.T.reshape(n_dm,natm,nlm)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
phi1_dm1 -= numpy.einsum('aziljm,njm->nazil', L1, Xvec0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1_dm1.reshape(-1,natm*nlm).T)
Xvec1 = Xvec1.T.reshape(n_dm,natm,3,natm,nlm)
psi1_dm1 = numpy.zeros((n_dm,natm,3,natm,nlm))
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
scaled_weights = numpy.einsum('nazm,mg->nazg', Xvec1[:,:,:,ia], fac_pol)
scaled_weights *= weight
aow = numpy.einsum('gi,nazg->nazgi', ao[0], scaled_weights)
de -= numpy.einsum('nij,gi,nazgj->naz', dm2s, ao[0], aow)
aow0 = numpy.einsum('gi,g->gi', ao[0], weight)
aow1 = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
den0 = numpy.einsum('nij,gi,zxgj->nzxg', dm1s, ao[0], aow1)
psi1_dm1[:,:,:,ia] -= numpy.einsum('mg,nzxg->nzxm', fac_pol, den0)
dm_ao = lib.einsum('nij,gj->ngi', dm1s, aow0)
dm_ao += lib.einsum('nji,gj->ngi', dm1s, aow0)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
den1 = numpy.einsum('ngi,xgi->nxg', dm_ao[:,:,p0:p1], ao[1:,:,p0:p1])
psitmp = numpy.einsum('mg,nxg->nxm', fac_pol, den1)
psi1_dm1[:,ja,:,ia] += psitmp
psi1_dm1[:,ia,:,ia] -= psitmp
eta_nj = numpy.einsum('nm,mg->ng', Xvec0[:,ia], fac_pol)
den0 = numpy.einsum('nij,gi,zxgj->nzxg', dm2s, ao[0], aow1)
de -= numpy.einsum('ng,nzxg->nzx', eta_nj, den0)
dm_ao = lib.einsum('nij,gj->ngi', dm2s, aow0)
dm_ao += lib.einsum('nji,gj->ngi', dm2s, aow0)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
den1 = lib.einsum('ngi,xgi->nxg', dm_ao[:,:,p0:p1], ao[1:,:,p0:p1])
detmp = numpy.einsum('ng,nxg->nx', eta_nj, den1)
de[:,ja] += detmp
de[:,ia] -= detmp
psi1_dm1 -= numpy.einsum('nil,aziljm->nazjm', LS0, L1)
LS1 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi1_dm1.reshape(-1,natm*nlm).T)
LS1 = LS1.T.reshape(n_dm,natm,3,natm,nlm)
de += numpy.einsum('nazjx,njx->naz', LS1, phi0_dm2)
if is_single_dm:
de = de[0]
return de
if __name__ == '__main__':
mol0 = gto.M(atom='H 0. 0. 1.804; F 0. 0. 0.', verbose=0, unit='B')
mol1 = gto.M(atom='H 0. 0. 1.803; F 0. 0. 0.', verbose=0, unit='B')
mol2 = gto.M(atom='H 0. 0. 1.805; F 0. 0. 0.', verbose=0, unit='B')
# TDA with equilibrium_solvation
mf = mol0.RHF().ddCOSMO().run()
td = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
g1 = td.nuc_grad_method().kernel() # 0 0 -0.5116214042
mf = mol1.RHF().ddCOSMO().run()
td1 = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
mf = mol2.RHF().ddCOSMO().run()
td2 = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
print((td2.e_tot[0]-td1.e_tot[0])/0.002, g1[0,2])
print((td2.e_tot[0]-td1.e_tot[0])/0.002 - g1[0,2])
# TDA without equilibrium_solvation
mf = mol0.RHF().ddCOSMO().run()
td = mf.TDA().ddCOSMO().run()
g1 = td.nuc_grad_method().kernel()
mf = mol1.RHF().ddCOSMO().run()
td1 = mf.TDA().ddCOSMO().run()
mf = mol2.RHF().ddCOSMO().run()
td2 = mf.TDA().ddCOSMO().run()
print((td2.e_tot[0]-td1.e_tot[0])/0.002, g1[0,2])
print((td2.e_tot[0]-td1.e_tot[0])/0.002 - g1[0,2])
# TDA lda with equilibrium_solvation
mf = mol0.RKS().ddCOSMO().run(xc='svwn')
td = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
g1 = td.nuc_grad_method().kernel()
mf = mol1.RKS().ddCOSMO().run(xc='svwn')
td1 = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
mf = mol2.RKS().ddCOSMO().run(xc='svwn')
td2 = mf.TDA().ddCOSMO().run(equilibrium_solvation=True)
print((td2.e_tot[0]-td1.e_tot[0])/0.002, g1[0,2])
print((td2.e_tot[0]-td1.e_tot[0])/0.002 - g1[0,2])
|
<reponame>SemanticsOS/smc.bibencodings
# -*- coding: utf-8 -*-
#=============================================================================
# Copyright : (c)2010-2012 semantics GmbH
# Rep./File : $URL$
# Date : $Date$
# Author : <NAME>
# License : BSD LICENSE
# Worker : $Author$
# Revision : $Rev$
# Purpose : ISO-5426 encoding
#=============================================================================
"""ISO-5426 / MAB2 codec
"""
from __future__ import unicode_literals, print_function
import codecs
from smc.bibencodings.utils import DecodeIterator
def encode(input, errors='strict'):
"""Encode unicode as ISO-5426
"""
if errors not in set(['strict', 'replace', 'ignore']):
raise ValueError("Invalid errors argument %s" % errors)
result = []
rappend = result.append
uget = unicodemap.get
for u in input:
s = uget(u)
if s is None:
if errors == 'strict':
raise UnicodeError(repr(u))
elif errors == "replace":
s = b'?'
elif errors == "ignore":
s = b''
else: # pragma: no cover
# should never be reached
raise ValueError("Invalid errors argument %s" % errors)
# special case combining char, move it in front of the last char
if len(s) == 1 and 0xc0 <= ord(s) <= 0xdf:
result.insert(-1, s)
else:
rappend(s)
return b"".join(result), len(input)
def decode(input, errors='strict', special=None):
"""Decode unicode from ISO-5426
"""
if errors not in set(['strict', 'replace', 'ignore', 'repr']):
raise ValueError("Invalid errors argument %s" % errors)
result = []
di = DecodeIterator(input)
# optimizations
rappend = result.append
cget = charmap.get
for c in di:
o = ord(c)
# ASCII chars
if c < b'\x7f':
rappend(chr(o))
#i += 1
continue
c1, c2 = di.peek(2)
ccc2 = None
# 0xc0 to 0xdf signals a combined char
if 0xc0 <= o <= 0xdf and c1 is not None:
# special case 0xc9: both 0xc9 and 0xc9 are combining diaeresis
# use 0xc8 in favor of 0xc9
if c == b'\xc9':
c = b'\xc8'
if c1 == b'\xc9':
c1 = b'\xc8'
# double combined char
if 0xc0 <= ord(c1) <= 0xdf and c2 is not None:
ccc2 = c + c1 + c2
r = cget(ccc2)
if r is not None:
# double combined found in table
rappend(r)
di.evolve(2)
continue
# build combining unicode
dc1 = cget(c)
dc2 = cget(c1 + c2)
if dc1 is not None and dc2 is not None: # pragma: no branch
# reverse order, in unicode, the combining char comes after the char
rappend(dc2 + dc1)
di.evolve(2)
continue
else:
cc1 = c + c1
r = cget(cc1)
if r is not None:
rappend(r)
di.evolve(1)
continue
# denormalized unicode: char + combining
r = cget(c)
rn = cget(c1)
if r is not None and rn is not None: # pragma: no branch
rappend(rn + r)
di.evolve(1)
continue
# just the combining
#r = cget(c)
#if r is not None:
# result.append(r)
# continue
# other chars, 0x80 <= o <= 0xbf or o >= 0xe0 or last combining
if special is not None:
r = special.get(c)
if r is not None:
rappend(r)
continue
r = cget(c)
if r is not None:
rappend(r)
continue
# only reached when no result was found
if errors == "strict":
p = di.position
raise UnicodeError("Can't decode byte%s %r at position %i (context %r)" %
("" if ccc2 is None else "s",
c if ccc2 is None else ccc2,
p, input[p - 3:p + 3]))
elif errors == "replace":
rappend('\ufffd')
elif errors == "ignore":
pass
elif errors == "repr":
rappend('\\x%x' % o)
else: # pragma: no cover
# should never be reached
raise ValueError("Invalid errors argument %s" % errors)
return "".join(result), di.position
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return encode(input, errors)
def decode(self, input, errors='strict'):
return decode(input, errors)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
codecInfo = codecs.CodecInfo(
name='iso-5426',
encode=Codec().encode,
decode=Codec().decode,
streamreader=StreamReader,
streamwriter=StreamWriter)
### Codec APIs
class SpecialXE0Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return encode(input, errors)
def decode(self, input, errors='strict'):
return decode(input, errors, special_xe0_map)
class SpecialXE0StreamWriter(SpecialXE0Codec, codecs.StreamWriter):
pass
class SpecialXE0StreamReader(SpecialXE0Codec, codecs.StreamReader):
pass
### encodings module API
specialXE0CodecInfo = codecs.CodecInfo(
name='iso-5426-xe0',
encode=SpecialXE0Codec().encode,
decode=SpecialXE0Codec().decode,
streamreader=SpecialXE0StreamReader,
streamwriter=SpecialXE0StreamWriter)
# special identity mapping for 0xa4, 0xe0-0xff
special_xe0_map = {
b'\xa4': '\xa4',
b'\xe0': '\xe0',
b'\xe1': '\xe1',
b'\xe2': '\xe2',
b'\xe3': '\xe3',
b'\xe4': '\xe4',
b'\xe5': '\xe5',
b'\xe6': '\xe6',
b'\xe7': '\xe7',
b'\xe8': '\xe8',
b'\xe9': '\xe9',
b'\xea': '\xea',
b'\xeb': '\xeb',
b'\xec': '\xec',
b'\xed': '\xed',
b'\xee': '\xee',
b'\xef': '\xef',
b'\xf0': '\xf0',
b'\xf1': '\xf1',
b'\xf2': '\xf2',
b'\xf3': '\xf3',
b'\xf4': '\xf4',
b'\xf5': '\xf5',
b'\xf6': '\xf6',
b'\xf7': '\xf7',
b'\xf8': '\xf8',
b'\xf9': '\xf9',
b'\xfa': '\xfa',
b'\xfb': '\xfb',
b'\xfc': '\xfc',
b'\xfd': '\xfd',
b'\xfe': '\xfe',
b'\xff': '\xff'}
unicodemap = {
'\u001d': b'\x1d', # <control>
'\u001e': b'\x1e', # <control>
'\u001f': b'\x1f', # <control>
'\u0020': b' ', # SPACE
'\u0021': b'!', # EXCLAMATION MARK
'\u0022': b'"', # QUOTATION MARK
'\u0023': b'#', # NUMBER SIGN
'\u0024': b'\xa4', # DOLLAR SIGN
'\u0025': b'%', # PERCENT SIGN
'\u0026': b'&', # AMPERSAND
'\u0027': b"'", # APOSTROPHE
'\u0028': b'(', # LEFT PARENTHESIS
'\u0029': b')', # RIGHT PARENTHESIS
'\u002a': b'*', # ASTERISK
'\u002b': b'+', # PLUS SIGN
'\u002c': b',', # COMMA
'\u002d': b'-', # HYPHEN-MINUS
'\u002e': b'.', # FULL STOP
'\u002f': b'/', # SOLIDUS
'\u0030': b'0', # DIGIT ZERO
'\u0031': b'1', # DIGIT ONE
'\u0032': b'2', # DIGIT TWO
'\u0033': b'3', # DIGIT THREE
'\u0034': b'4', # DIGIT FOUR
'\u0035': b'5', # DIGIT FIVE
'\u0036': b'6', # DIGIT SIX
'\u0037': b'7', # DIGIT SEVEN
'\u0038': b'8', # DIGIT EIGHT
'\u0039': b'9', # DIGIT NINE
'\u003a': b':', # COLON
'\u003b': b';', # SEMICOLON
'\u003c': b'<', # LESS-THAN SIGN
'\u003d': b'=', # EQUALS SIGN
'\u003e': b'>', # GREATER-THAN SIGN
'\u003f': b'?', # QUESTION MARK
'\u0040': b'@', # COMMERCIAL AT
'\u0041': b'A', # LATIN CAPITAL LETTER A
'\u0042': b'B', # LATIN CAPITAL LETTER B
'\u0043': b'C', # LATIN CAPITAL LETTER C
'\u0044': b'D', # LATIN CAPITAL LETTER D
'\u0045': b'E', # LATIN CAPITAL LETTER E
'\u0046': b'F', # LATIN CAPITAL LETTER F
'\u0047': b'G', # LATIN CAPITAL LETTER G
'\u0048': b'H', # LATIN CAPITAL LETTER H
'\u0049': b'I', # LATIN CAPITAL LETTER I
'\u004a': b'J', # LATIN CAPITAL LETTER J
'\u004b': b'K', # LATIN CAPITAL LETTER K
'\u004c': b'L', # LATIN CAPITAL LETTER L
'\u004d': b'M', # LATIN CAPITAL LETTER M
'\u004e': b'N', # LATIN CAPITAL LETTER N
'\u004f': b'O', # LATIN CAPITAL LETTER O
'\u0050': b'P', # LATIN CAPITAL LETTER P
'\u0051': b'Q', # LATIN CAPITAL LETTER Q
'\u0052': b'R', # LATIN CAPITAL LETTER R
'\u0053': b'S', # LATIN CAPITAL LETTER S
'\u0054': b'T', # LATIN CAPITAL LETTER T
'\u0055': b'U', # LATIN CAPITAL LETTER U
'\u0056': b'V', # LATIN CAPITAL LETTER V
'\u0057': b'W', # LATIN CAPITAL LETTER W
'\u0058': b'X', # LATIN CAPITAL LETTER X
'\u0059': b'Y', # LATIN CAPITAL LETTER Y
'\u005a': b'Z', # LATIN CAPITAL LETTER Z
'\u005b': b'[', # LEFT SQUARE BRACKET
'\u005c': b'\\', # REVERSE SOLIDUS
'\u005d': b']', # RIGHT SQUARE BRACKET
'\u005e': b'^', # CIRCUMFLEX ACCENT
'\u005f': b'_', # LOW LINE
'\u0060': b'`', # GRAVE ACCENT
'\u0061': b'a', # LATIN SMALL LETTER A
'\u0062': b'b', # LATIN SMALL LETTER B
'\u0063': b'c', # LATIN SMALL LETTER C
'\u0064': b'd', # LATIN SMALL LETTER D
'\u0065': b'e', # LATIN SMALL LETTER E
'\u0066': b'f', # LATIN SMALL LETTER F
'\u0067': b'g', # LATIN SMALL LETTER G
'\u0068': b'h', # LATIN SMALL LETTER H
'\u0069': b'i', # LATIN SMALL LETTER I
'\u006a': b'j', # LATIN SMALL LETTER J
'\u006b': b'k', # LATIN SMALL LETTER K
'\u006c': b'l', # LATIN SMALL LETTER L
'\u006d': b'm', # LATIN SMALL LETTER M
'\u006e': b'n', # LATIN SMALL LETTER N
'\u006f': b'o', # LATIN SMALL LETTER O
'\u0070': b'p', # LATIN SMALL LETTER P
'\u0071': b'q', # LATIN SMALL LETTER Q
'\u0072': b'r', # LATIN SMALL LETTER R
'\u0073': b's', # LATIN SMALL LETTER S
'\u0074': b't', # LATIN SMALL LETTER T
'\u0075': b'u', # LATIN SMALL LETTER U
'\u0076': b'v', # LATIN SMALL LETTER V
'\u0077': b'w', # LATIN SMALL LETTER W
'\u0078': b'x', # LATIN SMALL LETTER X
'\u0079': b'y', # LATIN SMALL LETTER Y
'\u007a': b'z', # LATIN SMALL LETTER Z
'\u007b': b'{', # LEFT CURLY BRACKET
'\u007c': b'|', # VERTICAL LINE
'\u007d': b'}', # RIGHT CURLY BRACKET
'\u007e': b'~', # TILDE
'\u0088': b'\x88', # <control>
'\u0089': b'\x89', # <control>
# XXX not part of the standard but MARC equivalent of \x88, \x89
#'\u0098': b'\x98', # <control>
#'\u009c': b'\x9c', # <control>
'\u00a1': b'\xa1', # INVERTED EXCLAMATION MARK
'\u00a3': b'\xa3', # POUND SIGN
'\u00a5': b'\xa5', # YEN SIGN
'\u00a7': b'\xa7', # SECTION SIGN
'\u00a9': b'\xad', # COPYRIGHT SIGN
'\u00ab': b'\xab', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u00ae': b'\xaf', # REGISTERED SIGN
'\u00b7': b'\xb7', # MIDDLE DOT
'\u00bb': b'\xbb', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u00bf': b'\xbf', # INVERTED QUESTION MARK
'\u00c0': b'\xc1A', # LATIN CAPITAL LETTER A WITH GRAVE
'\u00c1': b'\xc2A', # LATIN CAPITAL LETTER A WITH ACUTE
'\u00c2': b'\xc3A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u00c3': b'\xc4A', # LATIN CAPITAL LETTER A WITH TILDE
'\u00c4': b'\xc8A', # LATIN CAPITAL LETTER A WITH DIAERESIS
'\u00c5': b'\xcaA', # LATIN CAPITAL LETTER A WITH RING ABOVE
'\u00c6': b'\xe1', # LATIN CAPITAL LETTER AE
'\u00c7': b'\xd0C', # LATIN CAPITAL LETTER C WITH CEDILLA
'\u00c8': b'\xc1E', # LATIN CAPITAL LETTER E WITH GRAVE
'\u00c9': b'\xc2E', # LATIN CAPITAL LETTER E WITH ACUTE
'\u00ca': b'\xc3E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\u00cb': b'\xc8E', # LATIN CAPITAL LETTER E WITH DIAERESIS
'\u00cc': b'\xc1I', # LATIN CAPITAL LETTER I WITH GRAVE
'\u00cd': b'\xc2I', # LATIN CAPITAL LETTER I WITH ACUTE
'\u00ce': b'\xc3I', # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u00cf': b'\xc8I', # LATIN CAPITAL LETTER I WITH DIAERESIS
'\u00d1': b'\xc4N', # LATIN CAPITAL LETTER N WITH TILDE
'\u00d2': b'\xc1O', # LATIN CAPITAL LETTER O WITH GRAVE
'\u00d3': b'\xc2O', # LATIN CAPITAL LETTER O WITH ACUTE
'\u00d4': b'\xc3O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u00d5': b'\xc4O', # LATIN CAPITAL LETTER O WITH TILDE
'\u00d6': b'\xc8O', # LATIN CAPITAL LETTER O WITH DIAERESIS
'\u00d8': b'\xe9', # LATIN CAPITAL LETTER O WITH STROKE
'\u00d9': b'\xc1U', # LATIN CAPITAL LETTER U WITH GRAVE
'\u00da': b'\xc2U', # LATIN CAPITAL LETTER U WITH ACUTE
'\u00db': b'\xc3U', # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u00dc': b'\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS
'\u00dd': b'\xc2Y', # LATIN CAPITAL LETTER Y WITH ACUTE
'\u00de': b'\xec', # LATIN CAPITAL LETTER THORN
'\u00df': b'\xfb', # LATIN SMALL LETTER SHARP S
'\u00e0': b'\xc1a', # LATIN SMALL LETTER A WITH GRAVE
'\u00e1': b'\xc2a', # LATIN SMALL LETTER A WITH ACUTE
'\u00e2': b'\xc3a', # LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u00e3': b'\xc4a', # LATIN SMALL LETTER A WITH TILDE
'\u00e4': b'\xc8a', # LATIN SMALL LETTER A WITH DIAERESIS
'\u00e5': b'\xcaa', # LATIN SMALL LETTER A WITH RING ABOVE
'\u00e6': b'\xf1', # LATIN SMALL LETTER AE
'\u00e7': b'\xd0c', # LATIN SMALL LETTER C WITH CEDILLA
'\u00e8': b'\xc1e', # LATIN SMALL LETTER E WITH GRAVE
'\u00e9': b'\xc2e', # LATIN SMALL LETTER E WITH ACUTE
'\u00ea': b'\xc3e', # LATIN SMALL LETTER E WITH CIRCUMFLEX
'\u00eb': b'\xc8e', # LATIN SMALL LETTER E WITH DIAERESIS
'\u00ec': b'\xc1i', # LATIN SMALL LETTER I WITH GRAVE
'\u00ed': b'\xc2i', # LATIN SMALL LETTER I WITH ACUTE
'\u00ee': b'\xc3i', # LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u00ef': b'\xc8i', # LATIN SMALL LETTER I WITH DIAERESIS
'\u00f0': b'\xf3', # LATIN SMALL LETTER ETH
'\u00f1': b'\xc4n', # LATIN SMALL LETTER N WITH TILDE
'\u00f2': b'\xc1o', # LATIN SMALL LETTER O WITH GRAVE
'\u00f3': b'\xc2o', # LATIN SMALL LETTER O WITH ACUTE
'\u00f4': b'\xc3o', # LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u00f5': b'\xc4o', # LATIN SMALL LETTER O WITH TILDE
'\u00f6': b'\xc8o', # LATIN SMALL LETTER O WITH DIAERESIS
'\u00f8': b'\xf9', # LATIN SMALL LETTER O WITH STROKE
'\u00f9': b'\xc1u', # LATIN SMALL LETTER U WITH GRAVE
'\u00fa': b'\xc2u', # LATIN SMALL LETTER U WITH ACUTE
'\u00fb': b'\xc3u', # LATIN SMALL LETTER U WITH CIRCUMFLEX
'\u00fc': b'\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS
'\u00fd': b'\xc2y', # LATIN SMALL LETTER Y WITH ACUTE
'\u00fe': b'\xfc', # LATIN SMALL LETTER THORN
'\u00ff': b'\xc8y', # LATIN SMALL LETTER Y WITH DIAERESIS
'\u0100': b'\xc5A', # LATIN CAPITAL LETTER A WITH MACRON
'\u0101': b'\xc5a', # LATIN SMALL LETTER A WITH MACRON
'\u0102': b'\xc6A', # LATIN CAPITAL LETTER A WITH BREVE
'\u0103': b'\xc6a', # LATIN SMALL LETTER A WITH BREVE
'\u0104': b'\xd3A', # LATIN CAPITAL LETTER A WITH OGONEK
'\u0105': b'\xd3a', # LATIN SMALL LETTER A WITH OGONEK
'\u0106': b'\xc2C', # LATIN CAPITAL LETTER C WITH ACUTE
'\u0107': b'\xc2c', # LATIN SMALL LETTER C WITH ACUTE
'\u0108': b'\xc3C', # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
'\u0109': b'\xc3c', # LATIN SMALL LETTER C WITH CIRCUMFLEX
'\u010a': b'\xc7C', # LATIN CAPITAL LETTER C WITH DOT ABOVE
'\u010b': b'\xc7c', # LATIN SMALL LETTER C WITH DOT ABOVE
'\u010c': b'\xcfC', # LATIN CAPITAL LETTER C WITH CARON
'\u010d': b'\xcfc', # LATIN SMALL LETTER C WITH CARON
'\u010e': b'\xcfD', # LATIN CAPITAL LETTER D WITH CARON
'\u010f': b'\xcfd', # LATIN SMALL LETTER D WITH CARON
'\u0110': b'\xe2', # LATIN CAPITAL LETTER D WITH STROKE
'\u0111': b'\xf2', # LATIN SMALL LETTER D WITH STROKE
'\u0112': b'\xc5E', # LATIN CAPITAL LETTER E WITH MACRON
'\u0113': b'\xc5e', # LATIN SMALL LETTER E WITH MACRON
'\u0114': b'\xc6E', # LATIN CAPITAL LETTER E WITH BREVE
'\u0115': b'\xc6e', # LATIN SMALL LETTER E WITH BREVE
'\u0116': b'\xc7E', # LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0117': b'\xc7e', # LATIN SMALL LETTER E WITH DOT ABOVE
'\u0118': b'\xd3E', # LATIN CAPITAL LETTER E WITH OGONEK
'\u0119': b'\xd3e', # LATIN SMALL LETTER E WITH OGONEK
'\u011a': b'\xcfE', # LATIN CAPITAL LETTER E WITH CARON
'\u011b': b'\xcfe', # LATIN SMALL LETTER E WITH CARON
'\u011c': b'\xc3G', # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
'\u011d': b'\xc3g', # LATIN SMALL LETTER G WITH CIRCUMFLEX
'\u011e': b'\xc6G', # LATIN CAPITAL LETTER G WITH BREVE
'\u011f': b'\xc6g', # LATIN SMALL LETTER G WITH BREVE
'\u0120': b'\xc7G', # LATIN CAPITAL LETTER G WITH DOT ABOVE
'\u0121': b'\xc7g', # LATIN SMALL LETTER G WITH DOT ABOVE
'\u0122': b'\xd0G', # LATIN CAPITAL LETTER G WITH CEDILLA
'\u0123': b'\xd0g', # LATIN SMALL LETTER G WITH CEDILLA
'\u0124': b'\xc3H', # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
'\u0125': b'\xc3h', # LATIN SMALL LETTER H WITH CIRCUMFLEX
'\u0128': b'\xc4I', # LATIN CAPITAL LETTER I WITH TILDE
'\u0129': b'\xc4i', # LATIN SMALL LETTER I WITH TILDE
'\u012a': b'\xc5I', # LATIN CAPITAL LETTER I WITH MACRON
'\u012b': b'\xc5i', # LATIN SMALL LETTER I WITH MACRON
'\u012c': b'\xc6I', # LATIN CAPITAL LETTER I WITH BREVE
'\u012d': b'\xc6i', # LATIN SMALL LETTER I WITH BREVE
'\u012e': b'\xd3I', # LATIN CAPITAL LETTER I WITH OGONEK
'\u012f': b'\xd3i', # LATIN SMALL LETTER I WITH OGONEK
'\u0130': b'\xc7I', # LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u0131': b'\xf5', # LATIN SMALL LETTER DOTLESS I
'\u0132': b'\xe6', # LATIN CAPITAL LIGATURE IJ
'\u0133': b'\xf6', # LATIN SMALL LIGATURE IJ
'\u0134': b'\xc3J', # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
'\u0135': b'\xc3j', # LATIN SMALL LETTER J WITH CIRCUMFLEX
'\u0136': b'\xd0K', # LATIN CAPITAL LETTER K WITH CEDILLA
'\u0137': b'\xd0k', # LATIN SMALL LETTER K WITH CEDILLA
'\u0139': b'\xc2L', # LATIN CAPITAL LETTER L WITH ACUTE
'\u013a': b'\xc2l', # LATIN SMALL LETTER L WITH ACUTE
'\u013b': b'\xd0L', # LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c': b'\xd0l', # LATIN SMALL LETTER L WITH CEDILLA
'\u013d': b'\xcfL', # LATIN CAPITAL LETTER L WITH CARON
'\u013e': b'\xcfl', # LATIN SMALL LETTER L WITH CARON
'\u0141': b'\xe8', # LATIN CAPITAL LETTER L WITH STROKE
'\u0142': b'\xf8', # LATIN SMALL LETTER L WITH STROKE
'\u0143': b'\xc2N', # LATIN CAPITAL LETTER N WITH ACUTE
'\u0144': b'\xc2n', # LATIN SMALL LETTER N WITH ACUTE
'\u0145': b'\xd0N', # LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146': b'\xd0n', # LATIN SMALL LETTER N WITH CEDILLA
'\u0147': b'\xcfN', # LATIN CAPITAL LETTER N WITH CARON
'\u0148': b'\xcfn', # LATIN SMALL LETTER N WITH CARON
'\u014c': b'\xc5O', # LATIN CAPITAL LETTER O WITH MACRON
'\u014d': b'\xc5o', # LATIN SMALL LETTER O WITH MACRON
'\u014e': b'\xc6O', # LATIN CAPITAL LETTER O WITH BREVE
'\u014f': b'\xc6o', # LATIN SMALL LETTER O WITH BREVE
'\u0150': b'\xcdO', # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151': b'\xcdo', # LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u0152': b'\xea', # LATIN CAPITAL LIGATURE OE
'\u0153': b'\xfa', # LATIN SMALL LIGATURE OE
'\u0154': b'\xc2R', # LATIN CAPITAL LETTER R WITH ACUTE
'\u0155': b'\xc2r', # LATIN SMALL LETTER R WITH ACUTE
'\u0156': b'\xd0R', # LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157': b'\xd0r', # LATIN SMALL LETTER R WITH CEDILLA
'\u0158': b'\xcfR', # LATIN CAPITAL LETTER R WITH CARON
'\u0159': b'\xcfr', # LATIN SMALL LETTER R WITH CARON
'\u015a': b'\xc2S', # LATIN CAPITAL LETTER S WITH ACUTE
'\u015b': b'\xc2s', # LATIN SMALL LETTER S WITH ACUTE
'\u015c': b'\xc3S', # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
'\u015d': b'\xc3s', # LATIN SMALL LETTER S WITH CIRCUMFLEX
'\u015e': b'\xd0S', # LATIN CAPITAL LETTER S WITH CEDILLA
'\u015f': b'\xd0s', # LATIN SMALL LETTER S WITH CEDILLA
'\u0160': b'\xcfS', # LATIN CAPITAL LETTER S WITH CARON
'\u0161': b'\xcfs', # LATIN SMALL LETTER S WITH CARON
'\u0162': b'\xd0T', # LATIN CAPITAL LETTER T WITH CEDILLA
'\u0163': b'\xd0t', # LATIN SMALL LETTER T WITH CEDILLA
'\u0164': b'\xcfT', # LATIN CAPITAL LETTER T WITH CARON
'\u0165': b'\xcft', # LATIN SMALL LETTER T WITH CARON
'\u0168': b'\xc4U', # LATIN CAPITAL LETTER U WITH TILDE
'\u0169': b'\xc4u', # LATIN SMALL LETTER U WITH TILDE
'\u016a': b'\xc5U', # LATIN CAPITAL LETTER U WITH MACRON
'\u016b': b'\xc5u', # LATIN SMALL LETTER U WITH MACRON
'\u016c': b'\xc6U', # LATIN CAPITAL LETTER U WITH BREVE
'\u016d': b'\xc6u', # LATIN SMALL LETTER U WITH BREVE
'\u016e': b'\xcaU', # LATIN CAPITAL LETTER U WITH RING ABOVE
'\u016f': b'\xcau', # LATIN SMALL LETTER U WITH RING ABOVE
'\u0170': b'\xcdU', # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171': b'\xcdu', # LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172': b'\xd3U', # LATIN CAPITAL LETTER U WITH OGONEK
'\u0173': b'\xd3u', # LATIN SMALL LETTER U WITH OGONEK
'\u0174': b'\xc3W', # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
'\u0175': b'\xc3w', # LATIN SMALL LETTER W WITH CIRCUMFLEX
'\u0176': b'\xc3Y', # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
'\u0177': b'\xc3y', # LATIN SMALL LETTER Y WITH CIRCUMFLEX
'\u0178': b'\xc8Y', # LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u0179': b'\xc2Z', # LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a': b'\xc2z', # LATIN SMALL LETTER Z WITH ACUTE
'\u017b': b'\xc7Z', # LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c': b'\xc7z', # LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017d': b'\xcfZ', # LATIN CAPITAL LETTER Z WITH CARON
'\u017e': b'\xcfz', # LATIN SMALL LETTER Z WITH CARON
'\u01a0': b'\xceO', # LATIN CAPITAL LETTER O WITH HORN
'\u01a1': b'\xceo', # LATIN SMALL LETTER O WITH HORN
'\u01af': b'\xceU', # LATIN CAPITAL LETTER U WITH HORN
'\u01b0': b'\xceu', # LATIN SMALL LETTER U WITH HORN
'\u01cd': b'\xcfA', # LATIN CAPITAL LETTER A WITH CARON
'\u01ce': b'\xcfa', # LATIN SMALL LETTER A WITH CARON
'\u01cf': b'\xcfI', # LATIN CAPITAL LETTER I WITH CARON
'\u01d0': b'\xcfi', # LATIN SMALL LETTER I WITH CARON
'\u01d1': b'\xcfO', # LATIN CAPITAL LETTER O WITH CARON
'\u01d2': b'\xcfo', # LATIN SMALL LETTER O WITH CARON
'\u01d3': b'\xcfU', # LATIN CAPITAL LETTER U WITH CARON
'\u01d4': b'\xcfu', # LATIN SMALL LETTER U WITH CARON
'\u01d5': b'\xc5\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
'\u01d6': b'\xc5\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND MACRON
'\u01d7': b'\xc2\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
'\u01d8': b'\xc2\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE
'\u01d9': b'\xcf\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
'\u01da': b'\xcf\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND CARON
'\u01db': b'\xc1\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
'\u01dc': b'\xc1\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE
'\u01de': b'\xc5\xc8A', # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
'\u01df': b'\xc5\xc8a', # LATIN SMALL LETTER A WITH DIAERESIS AND MACRON
'\u01e0': b'\xc5\xc7A', # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
'\u01e1': b'\xc5\xc7a', # LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON
'\u01e2': b'\xc5\xe1', # LATIN CAPITAL LETTER AE WITH MACRON
'\u01e3': b'\xc5\xf1', # LATIN SMALL LETTER AE WITH MACRON
'\u01e6': b'\xcfG', # LATIN CAPITAL LETTER G WITH CARON
'\u01e7': b'\xcfg', # LATIN SMALL LETTER G WITH CARON
'\u01e8': b'\xcfK', # LATIN CAPITAL LETTER K WITH CARON
'\u01e9': b'\xcfk', # LATIN SMALL LETTER K WITH CARON
'\u01ea': b'\xd3O', # LATIN CAPITAL LETTER O WITH OGONEK
'\u01eb': b'\xd3o', # LATIN SMALL LETTER O WITH OGONEK
'\u01ec': b'\xc5\xd3O', # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
'\u01ed': b'\xc5\xd3o', # LATIN SMALL LETTER O WITH OGONEK AND MACRON
'\u01f0': b'\xcfj', # LATIN SMALL LETTER J WITH CARON
'\u01f4': b'\xc2G', # LATIN CAPITAL LETTER G WITH ACUTE
'\u01f5': b'\xc2g', # LATIN SMALL LETTER G WITH ACUTE
'\u01f8': b'\xc1N', # LATIN CAPITAL LETTER N WITH GRAVE
'\u01f9': b'\xc1n', # LATIN SMALL LETTER N WITH GRAVE
'\u01fa': b'\xc2\xcaA', # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
'\u01fb': b'\xc2\xcaa', # LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
'\u01fc': b'\xc2\xe1', # LATIN CAPITAL LETTER AE WITH ACUTE
'\u01fd': b'\xc2\xf1', # LATIN SMALL LETTER AE WITH ACUTE
'\u01fe': b'\xc2\xe9', # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
'\u01ff': b'\xc2\xf9', # LATIN SMALL LETTER O WITH STROKE AND ACUTE
'\u0218': b'\xd2S', # LATIN CAPITAL LETTER S WITH COMMA BELOW
'\u0219': b'\xd2s', # LATIN SMALL LETTER S WITH COMMA BELOW
'\u021a': b'\xd2T', # LATIN CAPITAL LETTER T WITH COMMA BELOW
'\u021b': b'\xd2t', # LATIN SMALL LETTER T WITH COMMA BELOW
'\u021e': b'\xcfH', # LATIN CAPITAL LETTER H WITH CARON
'\u021f': b'\xcfh', # LATIN SMALL LETTER H WITH CARON
'\u0226': b'\xc7A', # LATIN CAPITAL LETTER A WITH DOT ABOVE
'\u0227': b'\xc7a', # LATIN SMALL LETTER A WITH DOT ABOVE
'\u0228': b'\xd0E', # LATIN CAPITAL LETTER E WITH CEDILLA
'\u0229': b'\xd0e', # LATIN SMALL LETTER E WITH CEDILLA
'\u022a': b'\xc5\xc8O', # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
'\u022b': b'\xc5\xc8o', # LATIN SMALL LETTER O WITH DIAERESIS AND MACRON
'\u022c': b'\xc5\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND MACRON
'\u022d': b'\xc5\xc4o', # LATIN SMALL LETTER O WITH TILDE AND MACRON
'\u022e': b'\xc7O', # LATIN CAPITAL LETTER O WITH DOT ABOVE
'\u022f': b'\xc7o', # LATIN SMALL LETTER O WITH DOT ABOVE
'\u0230': b'\xc5\xc7O', # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
'\u0231': b'\xc5\xc7o', # LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON
'\u0232': b'\xc5Y', # LATIN CAPITAL LETTER Y WITH MACRON
'\u0233': b'\xc5y', # LATIN SMALL LETTER Y WITH MACRON
'\u02b9': b'\xbd', # MODIFIER LETTER PRIME
'\u02ba': b'\xbe', # MODIFIER LETTER DOUBLE PRIME
'\u02bb': b'\xb0', # MODIFIER LETTER TURNED COMMA
'\u02bc': b'\xb1', # MODIFIER LETTER APOSTROPHE
'\u0300': b'\xc1', # COMBINING GRAVE ACCENT
'\u0301': b'\xc2', # COMBINING ACUTE ACCENT
'\u0302': b'\xc3', # COMBINING CIRCUMFLEX ACCENT
'\u0303': b'\xc4', # COMBINING TILDE
'\u0304': b'\xc5', # COMBINING MACRON
'\u0306': b'\xc6', # COMBINING BREVE
'\u0307': b'\xc7', # COMBINING DOT ABOVE
'\u0308': b'\xc8', # COMBINING DIAERESIS
'\u0309': b'\xc0', # COMBINING HOOK ABOVE
'\u030a': b'\xca', # COMBINING RING ABOVE
'\u030b': b'\xcd', # COMBINING DOUBLE ACUTE ACCENT
'\u030c': b'\xcf', # COMBINING CARON
'\u0312': b'\xcc', # COMBINING TURNED COMMA ABOVE
'\u0315': b'\xcb', # COMBINING COMMA ABOVE RIGHT
'\u031b': b'\xce', # COMBINING HORN
'\u031c': b'\xd1', # COMBINING LEFT HALF RING BELOW
'\u0323': b'\xd6', # COMBINING DOT BELOW
'\u0324': b'\xd7', # COMBINING DIAERESIS BELOW
'\u0325': b'\xd4', # COMBINING RING BELOW
'\u0326': b'\xd2', # COMBINING COMMA BELOW
'\u0327': b'\xd0', # COMBINING CEDILLA
'\u0328': b'\xd3', # COMBINING OGONEK
'\u0329': b'\xda', # COMBINING VERTICAL LINE BELOW
'\u032d': b'\xdb', # COMBINING CIRCUMFLEX ACCENT BELOW
'\u032e': b'\xd5', # COMBINING BREVE BELOW
'\u0332': b'\xd8', # COMBINING LOW LINE
'\u0333': b'\xd9', # COMBINING DOUBLE LOW LINE
'\u0340': b'\xc1', # COMBINING GRAVE TONE MARK
'\u0341': b'\xc2', # COMBINING ACUTE TONE MARK
'\u0344': b'\xc2\xc8', # COMBINING GREEK DIALYTIKA TONOS
'\u0374': b'\xbd', # GREEK NUMERAL SIGN
'\u037e': b';', # GREEK QUESTION MARK
'\u0387': b'\xb7', # GREEK ANO TELEIA
'\u1e00': b'\xd4A', # LATIN CAPITAL LETTER A WITH RING BELOW
'\u1e01': b'\xd4a', # LATIN SMALL LETTER A WITH RING BELOW
'\u1e02': b'\xc7B', # LATIN CAPITAL LETTER B WITH DOT ABOVE
'\u1e03': b'\xc7b', # LATIN SMALL LETTER B WITH DOT ABOVE
'\u1e04': b'\xd6B', # LATIN CAPITAL LETTER B WITH DOT BELOW
'\u1e05': b'\xd6b', # LATIN SMALL LETTER B WITH DOT BELOW
'\u1e08': b'\xc2\xd0C', # LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
'\u1e09': b'\xc2\xd0c', # LATIN SMALL LETTER C WITH CEDILLA AND ACUTE
'\u1e0a': b'\xc7D', # LATIN CAPITAL LETTER D WITH DOT ABOVE
'\u1e0b': b'\xc7d', # LATIN SMALL LETTER D WITH DOT ABOVE
'\u1e0c': b'\xd6D', # LATIN CAPITAL LETTER D WITH DOT BELOW
'\u1e0d': b'\xd6d', # LATIN SMALL LETTER D WITH DOT BELOW
'\u1e10': b'\xd0D', # LATIN CAPITAL LETTER D WITH CEDILLA
'\u1e11': b'\xd0d', # LATIN SMALL LETTER D WITH CEDILLA
'\u1e12': b'\xdbD', # LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
'\u1e13': b'\xdbd', # LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW
'\u1e14': b'\xc1\xc5E', # LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
'\u1e15': b'\xc1\xc5e', # LATIN SMALL LETTER E WITH MACRON AND GRAVE
'\u1e16': b'\xc2\xc5E', # LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
'\u1e17': b'\xc2\xc5e', # LATIN SMALL LETTER E WITH MACRON AND ACUTE
'\u1e18': b'\xdbE', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
'\u1e19': b'\xdbe', # LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW
'\u1e1c': b'\xc6\xd0E', # LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
'\u1e1d': b'\xc6\xd0e', # LATIN SMALL LETTER E WITH CEDILLA AND BREVE
'\u1e1e': b'\xc7F', # LATIN CAPITAL LETTER F WITH DOT ABOVE
'\u1e1f': b'\xc7f', # LATIN SMALL LETTER F WITH DOT ABOVE
'\u1e20': b'\xc5G', # LATIN CAPITAL LETTER G WITH MACRON
'\u1e21': b'\xc5g', # LATIN SMALL LETTER G WITH MACRON
'\u1e22': b'\xc7H', # LATIN CAPITAL LETTER H WITH DOT ABOVE
'\u1e23': b'\xc7h', # LATIN SMALL LETTER H WITH DOT ABOVE
'\u1e24': b'\xd6H', # LATIN CAPITAL LETTER H WITH DOT BELOW
'\u1e25': b'\xd6h', # LATIN SMALL LETTER H WITH DOT BELOW
'\u1e26': b'\xc8H', # LATIN CAPITAL LETTER H WITH DIAERESIS
'\u1e27': b'\xc8h', # LATIN SMALL LETTER H WITH DIAERESIS
'\u1e28': b'\xd0H', # LATIN CAPITAL LETTER H WITH CEDILLA
'\u1e29': b'\xd0h', # LATIN SMALL LETTER H WITH CEDILLA
'\u1e2a': b'\xd5H', # LATIN CAPITAL LETTER H WITH BREVE BELOW
'\u1e2b': b'\xd5h', # LATIN SMALL LETTER H WITH BREVE BELOW
'\u1e2e': b'\xc2\xc8I', # LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
'\u1e2f': b'\xc2\xc8i', # LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE
'\u1e30': b'\xc2K', # LATIN CAPITAL LETTER K WITH ACUTE
'\u1e31': b'\xc2k', # LATIN SMALL LETTER K WITH ACUTE
'\u1e32': b'\xd6K', # LATIN CAPITAL LETTER K WITH DOT BELOW
'\u1e33': b'\xd6k', # LATIN SMALL LETTER K WITH DOT BELOW
'\u1e36': b'\xd6L', # LATIN CAPITAL LETTER L WITH DOT BELOW
'\u1e37': b'\xd6l', # LATIN SMALL LETTER L WITH DOT BELOW
'\u1e38': b'\xc5\xd6L', # LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
'\u1e39': b'\xc5\xd6l', # LATIN SMALL LETTER L WITH DOT BELOW AND MACRON
'\u1e3c': b'\xdbL', # LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
'\u1e3d': b'\xdbl', # LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW
'\u1e3e': b'\xc2M', # LATIN CAPITAL LETTER M WITH ACUTE
'\u1e3f': b'\xc2m', # LATIN SMALL LETTER M WITH ACUTE
'\u1e40': b'\xc7M', # LATIN CAPITAL LETTER M WITH DOT ABOVE
'\u1e41': b'\xc7m', # LATIN SMALL LETTER M WITH DOT ABOVE
'\u1e42': b'\xd6M', # LATIN CAPITAL LETTER M WITH DOT BELOW
'\u1e43': b'\xd6m', # LATIN SMALL LETTER M WITH DOT BELOW
'\u1e44': b'\xc7N', # LATIN CAPITAL LETTER N WITH DOT ABOVE
'\u1e45': b'\xc7n', # LATIN SMALL LETTER N WITH DOT ABOVE
'\u1e46': b'\xd6N', # LATIN CAPITAL LETTER N WITH DOT BELOW
'\u1e47': b'\xd6n', # LATIN SMALL LETTER N WITH DOT BELOW
'\u1e4a': b'\xdbN', # LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
'\u1e4b': b'\xdbn', # LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW
'\u1e4c': b'\xc2\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
'\u1e4d': b'\xc2\xc4o', # LATIN SMALL LETTER O WITH TILDE AND ACUTE
'\u1e4e': b'\xc8\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
'\u1e4f': b'\xc8\xc4o', # LATIN SMALL LETTER O WITH TILDE AND DIAERESIS
'\u1e50': b'\xc1\xc5O', # LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
'\u1e51': b'\xc1\xc5o', # LATIN SMALL LETTER O WITH MACRON AND GRAVE
'\u1e52': b'\xc2\xc5O', # LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
'\u1e53': b'\xc2\xc5o', # LATIN SMALL LETTER O WITH MACRON AND ACUTE
'\u1e54': b'\xc2P', # LATIN CAPITAL LETTER P WITH ACUTE
'\u1e55': b'\xc2p', # LATIN SMALL LETTER P WITH ACUTE
'\u1e56': b'\xc7P', # LATIN CAPITAL LETTER P WITH DOT ABOVE
'\u1e57': b'\xc7p', # LATIN SMALL LETTER P WITH DOT ABOVE
'\u1e58': b'\xc7R', # LATIN CAPITAL LETTER R WITH DOT ABOVE
'\u1e59': b'\xc7r', # LATIN SMALL LETTER R WITH DOT ABOVE
'\u1e5a': b'\xd6R', # LATIN CAPITAL LETTER R WITH DOT BELOW
'\u1e5b': b'\xd6r', # LATIN SMALL LETTER R WITH DOT BELOW
'\u1e5c': b'\xc5\xd6R', # LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
'\u1e5d': b'\xc5\xd6r', # LATIN SMALL LETTER R WITH DOT BELOW AND MACRON
'\u1e60': b'\xc7S', # LATIN CAPITAL LETTER S WITH DOT ABOVE
'\u1e61': b'\xc7s', # LATIN SMALL LETTER S WITH DOT ABOVE
'\u1e62': b'\xd6S', # LATIN CAPITAL LETTER S WITH DOT BELOW
'\u1e63': b'\xd6s', # LATIN SMALL LETTER S WITH DOT BELOW
'\u1e64': b'\xc7\xc2S', # LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
'\u1e65': b'\xc7\xc2s', # LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE
'\u1e66': b'\xc7\xcfS', # LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
'\u1e67': b'\xc7\xcfs', # LATIN SMALL LETTER S WITH CARON AND DOT ABOVE
'\u1e68': b'\xc7\xd6S', # LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
'\u1e69': b'\xc7\xd6s', # LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE
'\u1e6a': b'\xc7T', # LATIN CAPITAL LETTER T WITH DOT ABOVE
'\u1e6b': b'\xc7t', # LATIN SMALL LETTER T WITH DOT ABOVE
'\u1e6c': b'\xd6T', # LATIN CAPITAL LETTER T WITH DOT BELOW
'\u1e6d': b'\xd6t', # LATIN SMALL LETTER T WITH DOT BELOW
'\u1e70': b'\xdbT', # LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
'\u1e71': b'\xdbt', # LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW
'\u1e72': b'\xd7U', # LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
'\u1e73': b'\xd7u', # LATIN SMALL LETTER U WITH DIAERESIS BELOW
'\u1e76': b'\xdbU', # LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
'\u1e77': b'\xdbu', # LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW
'\u1e78': b'\xc2\xc4U', # LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
'\u1e79': b'\xc2\xc4u', # LATIN SMALL LETTER U WITH TILDE AND ACUTE
'\u1e7a': b'\xc8\xc5U', # LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
'\u1e7b': b'\xc8\xc5u', # LATIN SMALL LETTER U WITH MACRON AND DIAERESIS
'\u1e7c': b'\xc4V', # LATIN CAPITAL LETTER V WITH TILDE
'\u1e7d': b'\xc4v', # LATIN SMALL LETTER V WITH TILDE
'\u1e7e': b'\xd6V', # LATIN CAPITAL LETTER V WITH DOT BELOW
'\u1e7f': b'\xd6v', # LATIN SMALL LETTER V WITH DOT BELOW
'\u1e80': b'\xc1W', # LATIN CAPITAL LETTER W WITH GRAVE
'\u1e81': b'\xc1w', # LATIN SMALL LETTER W WITH GRAVE
'\u1e82': b'\xc2W', # LATIN CAPITAL LETTER W WITH ACUTE
'\u1e83': b'\xc2w', # LATIN SMALL LETTER W WITH ACUTE
'\u1e84': b'\xc8W', # LATIN CAPITAL LETTER W WITH DIAERESIS
'\u1e85': b'\xc8w', # LATIN SMALL LETTER W WITH DIAERESIS
'\u1e86': b'\xc7W', # LATIN CAPITAL LETTER W WITH DOT ABOVE
'\u1e87': b'\xc7w', # LATIN SMALL LETTER W WITH DOT ABOVE
'\u1e88': b'\xd6W', # LATIN CAPITAL LETTER W WITH DOT BELOW
'\u1e89': b'\xd6w', # LATIN SMALL LETTER W WITH DOT BELOW
'\u1e8a': b'\xc7X', # LATIN CAPITAL LETTER X WITH DOT ABOVE
'\u1e8b': b'\xc7x', # LATIN SMALL LETTER X WITH DOT ABOVE
'\u1e8c': b'\xc8X', # LATIN CAPITAL LETTER X WITH DIAERESIS
'\u1e8d': b'\xc8x', # LATIN SMALL LETTER X WITH DIAERESIS
'\u1e8e': b'\xc7Y', # LATIN CAPITAL LETTER Y WITH DOT ABOVE
'\u1e8f': b'\xc7y', # LATIN SMALL LETTER Y WITH DOT ABOVE
'\u1e90': b'\xc3Z', # LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
'\u1e91': b'\xc3z', # LATIN SMALL LETTER Z WITH CIRCUMFLEX
'\u1e92': b'\xd6Z', # LATIN CAPITAL LETTER Z WITH DOT BELOW
'\u1e93': b'\xd6z', # LATIN SMALL LETTER Z WITH DOT BELOW
'\u1e97': b'\xc8t', # LATIN SMALL LETTER T WITH DIAERESIS
'\u1e98': b'\xcaw', # LATIN SMALL LETTER W WITH RING ABOVE
'\u1e99': b'\xcay', # LATIN SMALL LETTER Y WITH RING ABOVE
'\u1ea0': b'\xd6A', # LATIN CAPITAL LETTER A WITH DOT BELOW
'\u1ea1': b'\xd6a', # LATIN SMALL LETTER A WITH DOT BELOW
'\u1ea2': b'\xc0A', # LATIN CAPITAL LETTER A WITH HOOK ABOVE
'\u1ea3': b'\xc0a', # LATIN SMALL LETTER A WITH HOOK ABOVE
'\u1ea4': b'\xc2\xc3A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
'\u1ea5': b'\xc2\xc3a', # LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE
'\u1ea6': b'\xc1\xc3A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
'\u1ea7': b'\xc1\xc3a', # LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE
'\u1ea8': b'\xc0\xc3A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
'\u1ea9': b'\xc0\xc3a', # LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
'\u1eaa': b'\xc4\xc3A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
'\u1eab': b'\xc4\xc3a', # LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE
'\u1eac': b'\xc3\xd6A', # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
'\u1ead': b'\xc3\xd6a', # LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW
'\u1eae': b'\xc2\xc6A', # LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
'\u1eaf': b'\xc2\xc6a', # LATIN SMALL LETTER A WITH BREVE AND ACUTE
'\u1eb0': b'\xc1\xc6A', # LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
'\u1eb1': b'\xc1\xc6a', # LATIN SMALL LETTER A WITH BREVE AND GRAVE
'\u1eb2': b'\xc0\xc6A', # LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
'\u1eb3': b'\xc0\xc6a', # LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE
'\u1eb4': b'\xc4\xc6A', # LATIN CAPITAL LETTER A WITH BREVE AND TILDE
'\u1eb5': b'\xc4\xc6a', # LATIN SMALL LETTER A WITH BREVE AND TILDE
'\u1eb6': b'\xc6\xd6A', # LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
'\u1eb7': b'\xc6\xd6a', # LATIN SMALL LETTER A WITH BREVE AND DOT BELOW
'\u1eb8': b'\xd6E', # LATIN CAPITAL LETTER E WITH DOT BELOW
'\u1eb9': b'\xd6e', # LATIN SMALL LETTER E WITH DOT BELOW
'\u1eba': b'\xc0E', # LATIN CAPITAL LETTER E WITH HOOK ABOVE
'\u1ebb': b'\xc0e', # LATIN SMALL LETTER E WITH HOOK ABOVE
'\u1ebc': b'\xc4E', # LATIN CAPITAL LETTER E WITH TILDE
'\u1ebd': b'\xc4e', # LATIN SMALL LETTER E WITH TILDE
'\u1ebe': b'\xc2\xc3E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
'\u1ebf': b'\xc2\xc3e', # LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE
'\u1ec0': b'\xc1\xc3E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
'\u1ec1': b'\xc1\xc3e', # LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE
'\u1ec2': b'\xc0\xc3E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
'\u1ec3': b'\xc0\xc3e', # LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
'\u1ec4': b'\xc4\xc3E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
'\u1ec5': b'\xc4\xc3e', # LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE
'\u1ec6': b'\xc3\xd6E', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
'\u1ec7': b'\xc3\xd6e', # LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW
'\u1ec8': b'\xc0I', # LATIN CAPITAL LETTER I WITH HOOK ABOVE
'\u1ec9': b'\xc0i', # LATIN SMALL LETTER I WITH HOOK ABOVE
'\u1eca': b'\xd6I', # LATIN CAPITAL LETTER I WITH DOT BELOW
'\u1ecb': b'\xd6i', # LATIN SMALL LETTER I WITH DOT BELOW
'\u1ecc': b'\xd6O', # LATIN CAPITAL LETTER O WITH DOT BELOW
'\u1ecd': b'\xd6o', # LATIN SMALL LETTER O WITH DOT BELOW
'\u1ece': b'\xc0O', # LATIN CAPITAL LETTER O WITH HOOK ABOVE
'\u1ecf': b'\xc0o', # LATIN SMALL LETTER O WITH HOOK ABOVE
'\u1ed0': b'\xc2\xc3O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
'\u1ed1': b'\xc2\xc3o', # LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE
'\u1ed2': b'\xc1\xc3O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
'\u1ed3': b'\xc1\xc3o', # LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE
'\u1ed4': b'\xc0\xc3O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
'\u1ed5': b'\xc0\xc3o', # LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
'\u1ed6': b'\xc4\xc3O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
'\u1ed7': b'\xc4\xc3o', # LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE
'\u1ed8': b'\xc3\xd6O', # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
'\u1ed9': b'\xc3\xd6o', # LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW
'\u1eda': b'\xc2\xceO', # LATIN CAPITAL LETTER O WITH HORN AND ACUTE
'\u1edb': b'\xc2\xceo', # LATIN SMALL LETTER O WITH HORN AND ACUTE
'\u1edc': b'\xc1\xceO', # LATIN CAPITAL LETTER O WITH HORN AND GRAVE
'\u1edd': b'\xc1\xceo', # LATIN SMALL LETTER O WITH HORN AND GRAVE
'\u1ede': b'\xc0\xceO', # LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
'\u1edf': b'\xc0\xceo', # LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE
'\u1ee0': b'\xc4\xceO', # LATIN CAPITAL LETTER O WITH HORN AND TILDE
'\u1ee1': b'\xc4\xceo', # LATIN SMALL LETTER O WITH HORN AND TILDE
'\u1ee2': b'\xd6\xceO', # LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
'\u1ee3': b'\xd6\xceo', # LATIN SMALL LETTER O WITH HORN AND DOT BELOW
'\u1ee4': b'\xd6U', # LATIN CAPITAL LETTER U WITH DOT BELOW
'\u1ee5': b'\xd6u', # LATIN SMALL LETTER U WITH DOT BELOW
'\u1ee6': b'\xc0U', # LATIN CAPITAL LETTER U WITH HOOK ABOVE
'\u1ee7': b'\xc0u', # LATIN SMALL LETTER U WITH HOOK ABOVE
'\u1ee8': b'\xc2\xceU', # LATIN CAPITAL LETTER U WITH HORN AND ACUTE
'\u1ee9': b'\xc2\xceu', # LATIN SMALL LETTER U WITH HORN AND ACUTE
'\u1eea': b'\xc1\xceU', # LATIN CAPITAL LETTER U WITH HORN AND GRAVE
'\u1eeb': b'\xc1\xceu', # LATIN SMALL LETTER U WITH HORN AND GRAVE
'\u1eec': b'\xc0\xceU', # LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
'\u1eed': b'\xc0\xceu', # LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE
'\u1eee': b'\xc4\xceU', # LATIN CAPITAL LETTER U WITH HORN AND TILDE
'\u1eef': b'\xc4\xceu', # LATIN SMALL LETTER U WITH HORN AND TILDE
'\u1ef0': b'\xd6\xceU', # LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
'\u1ef1': b'\xd6\xceu', # LATIN SMALL LETTER U WITH HORN AND DOT BELOW
'\u1ef2': b'\xc1Y', # LATIN CAPITAL LETTER Y WITH GRAVE
'\u1ef3': b'\xc1y', # LATIN SMALL LETTER Y WITH GRAVE
'\u1ef4': b'\xd6Y', # LATIN CAPITAL LETTER Y WITH DOT BELOW
'\u1ef5': b'\xd6y', # LATIN SMALL LETTER Y WITH DOT BELOW
'\u1ef6': b'\xc0Y', # LATIN CAPITAL LETTER Y WITH HOOK ABOVE
'\u1ef7': b'\xc0y', # LATIN SMALL LETTER Y WITH HOOK ABOVE
'\u1ef8': b'\xc4Y', # LATIN CAPITAL LETTER Y WITH TILDE
'\u1ef9': b'\xc4y', # LATIN SMALL LETTER Y WITH TILDE
'\u1fef': b'`', # GREEK VARIA
'\u2018': b'\xa9', # LEFT SINGLE QUOTATION MARK
'\u2019': b'\xb9', # RIGHT SINGLE QUOTATION MARK
'\u201a': b'\xb2', # SINGLE LOW-9 QUOTATION MARK
'\u201c': b'\xaa', # LEFT DOUBLE QUOTATION MARK
'\u201d': b'\xba', # RIGHT DOUBLE QUOTATION MARK
'\u201e': b'\xa2', # DOUBLE LOW-9 QUOTATION MARK
'\u2020': b'\xa6', # DAGGER
'\u2021': b'\xb6', # DOUBLE DAGGER
'\u2032': b'\xa8', # PRIME
'\u2033': b'\xb8', # DOUBLE PRIME
'\u2117': b'\xae', # SOUND RECORDING COPYRIGHT
#'\u212a': b'K', # KELVIN SIGN
'\u212b': b'\xcaA', # ANGSTROM SIGN
'\u266d': b'\xac', # MUSIC FLAT SIGN
'\u266f': b'\xbc', # MUSIC SHARP SIGN
'\ufe20': b'\xdd', # COMBINING LIGATURE LEFT HALF
'\ufe21': b'\xde', # COMBINING LIGATURE RIGHT HALF
'\ufe23': b'\xdf', # COMBINING DOUBLE TILDE RIGHT HALF
}
charmap = {}
for uni, char in getattr(unicodemap, "iteritems", unicodemap.items)():
if char in charmap:
continue
charmap[char] = uni
|
<filename>msl/qt/prompt.py<gh_stars>0
"""
Convenience functions to prompt the user.
The following functions create a dialog window to either notify the user of an
event that happened or to request information from the user.
"""
import traceback
from . import QtWidgets, QtCore, application
def critical(message, title=None):
"""Display the critical `message` in a dialog window.
Parameters
----------
message : :class:`str` or :class:`Exception`
The message to display.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
"""
app, title = _get_app_and_title(title)
if isinstance(message, Exception):
message = traceback.format_exc()
QtWidgets.QMessageBox.critical(app.activeWindow(), title, str(message))
def double(message, default=0, minimum=-2147483647, maximum=2147483647, precision=1, title=None):
"""Request a floating-point value.
Parameters
----------
message : :class:`str`
The message that is shown to the user to describe what the value represents.
default : :class:`float`, optional
The default floating-point value.
minimum : :class:`float`, optional
The minimum value that the user can enter.
maximum : :class:`float`, optional
The maximum value that the user can enter.
precision : :class:`int`, optional
The number of digits that are displayed after the decimal point.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`float` or :obj:`None`
The floating-point value or :obj:`None` if the user cancelled
the request to enter a floating-point number.
"""
app, title = _get_app_and_title(title)
value, ok = QtWidgets.QInputDialog.getDouble(app.activeWindow(), title, message,
default, minimum, maximum, precision,
flags=QtCore.Qt.WindowCloseButtonHint)
return value if ok else None
def filename(initial=None, filters=None, multiple=False, title='Select File'):
"""Request to select the file(s) to open.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
filters : :class:`str`, :class:`list` of :class:`str` or :class:`dict`, optional
Only filenames that match the specified `filters` are shown.
Examples::
'Images (*.png *.xpm *.jpg)'
'Images (*.png *.xpm *.jpg);;Text files (*.txt);;XML files (*.xml)'
['Images (*.png *.xpm *.jpg)', 'Text files (*.txt)', 'XML files (*.xml)']
{'Images': ('*.png', '*.xpm', '*.jpg'), 'Text files': '*.txt'}
multiple : :class:`bool`, optional
Whether multiple files can be selected.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
Returns
-------
:class:`str` or :class:`list` of :class:`str`
The name(s) of the file(s) to open or :obj:`None` if the user cancelled
the request to select a file.
"""
app, title = _get_app_and_title(title)
filters = _get_file_filters(filters)
if multiple:
if title == 'Select File':
title += 's'
name, _ = QtWidgets.QFileDialog.getOpenFileNames(app.activeWindow(), title, initial, filters)
else:
name, _ = QtWidgets.QFileDialog.getOpenFileName(app.activeWindow(), title, initial, filters)
return name if len(name) > 0 else None
def folder(initial=None, title='Select Folder'):
"""Request to select an existing folder or to create a new folder.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
Returns
-------
:class:`str`
The name of the selected folder or :obj:`None` if the user cancelled
the request to select a folder.
"""
app, title = _get_app_and_title(title)
name = QtWidgets.QFileDialog.getExistingDirectory(app.activeWindow(), title, initial)
return name if len(name) > 0 else None
def information(message, title=None):
"""Display the information `message` in a dialog window.
Parameters
----------
message : :class:`str` or :class:`Exception`
The message to display.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
"""
app, title = _get_app_and_title(title)
if isinstance(message, Exception):
message = traceback.format_exc()
QtWidgets.QMessageBox.information(app.activeWindow(), title, str(message))
def integer(message, default=0, minimum=-2147483647, maximum=2147483647, step=1, title=None):
"""Request an integer value.
Parameters
----------
message : :class:`str`
The message that is shown to the user to describe what the value represents.
default : :class:`int`, optional
The default integer value.
minimum : :class:`int`, optional
The minimum value that the user can enter.
maximum : :class:`int`, optional
The maximum value that the user can enter.
step : :class:`int`, optional
The amount by which the values change as the user presses the arrow
buttons to increment or decrement the value.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`int` or :obj:`None`
The integer value or :obj:`None` if the user cancelled the request to
enter a number.
"""
app, title = _get_app_and_title(title)
value, ok = QtWidgets.QInputDialog.getInt(app.activeWindow(), title, message,
default, minimum, maximum, step,
flags=QtCore.Qt.WindowCloseButtonHint)
return value if ok else None
def item(message, items, index=0, title=None):
"""Request an item from a list of items.
Parameters
----------
message : :class:`str`
The message that is shown to the user to describe what the list of items represent.
items : :class:`list` of :class:`object`
The list of items to choose from. The items can be of any data type.
index : :class:`int`, optional
The index of the default item that is selected.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`object`
The selected item or :obj:`None` if the user cancelled the request to
select an item.
.. note::
The data type of the selected item is preserved. For example, if
`items` = ``[1, 2.0, 2+3j, 'hello', b'world', True, QtWidgets.QPushButton]``
and the user selects the ``True`` item then :obj:`True` is returned.
"""
app, title = _get_app_and_title(title)
items_ = [str(i) for i in items]
value, ok = QtWidgets.QInputDialog.getItem(app.activeWindow(), title, message, items_, index,
editable=False,
flags=QtCore.Qt.WindowCloseButtonHint,
inputMethodHints=QtCore.Qt.ImhNone)
return items[items_.index(value)] if ok else None
def notes(json_path=None, title=None, even_row_color='#FFFFFF', odd_row_color='#EAF2F8'):
"""Ask the user to enter notes.
Opens a :class:`QtWidgets.QDialog` to allow for a user to enter a detailed
description of a task that they are performing. The :class:`QtWidgets.QDialog`
provides a table of all the previous notes that have been used. Notes that are
in the table can be deleted by selecting the desired row(s) and pressing the
``delete`` key or the note in a row can be copied to the note editor by
double-clicking on a row.
This function is useful when acquiring data and you want to include notes
about how the data was acquired. Using a prompt to enter notes forces you
to manually enter the notes every time you acquire data rather than having
the notes typed directly onto the graphical user interface, which you might
forget to update before acquiring the next data set.
.. _JSON: https://www.json.org/
Parameters
----------
json_path : :class:`str`, optional
The path to a JSON_ file that contains the history of the notes that have
been used. If :obj:`None` then the default file is used. The file will
automatically be created if it does not exist.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
even_row_color : :class:`QtGui.QColor`, optional
The background color of the even-numbered rows in the history table.
Can be any data type and value that the constructor of a
:class:`QtGui.QColor` accepts.
odd_row_color : :class:`QtGui.QColor`, optional
The background color of the odd-numbered rows in the history table.
Can be any data type and value that the constructor of a
:class:`QtGui.QColor` accepts.
Returns
-------
:class:`str`
The note that was entered.
"""
# import here since there are circular import errors if you import at the module level
from .notes_history import NotesHistory
app, title = _get_app_and_title(title)
nh = NotesHistory(app.activeWindow(), json_path, title, even_row_color, odd_row_color)
nh.exec_()
return nh.text()
def save(initial=None, filters=None, title='Save As', options=None):
"""Request to select the name of a file to save.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
filters : :class:`str`, :class:`list` of :class:`str` or :class:`dict`, optional
Only filenames that match the specified `filters` are shown.
Examples::
'Images (*.png *.xpm *.jpg)'
'Images (*.png *.xpm *.jpg);;Text files (*.txt);;XML files (*.xml)'
['Images (*.png *.xpm *.jpg)', 'Text files (*.txt)', 'XML files (*.xml)']
{'Images': ('*.png', '*.xpm', '*.jpg'), 'Text files': '*.txt'}
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
options : `QtWidgets.QFileDialog.Option <http://doc.qt.io/qt-5/qfiledialog.html#Option-enum>`_, optional
Specify additional options on how to run the dialog.
Returns
-------
:class:`str`
The name of the file to save or :obj:`None` if the user cancelled the
request to select a file.
"""
app, title = _get_app_and_title(title)
filters = _get_file_filters(filters)
if options is None:
name, _ = QtWidgets.QFileDialog.getSaveFileName(app.activeWindow(), title, initial, filters)
else:
name, _ = QtWidgets.QFileDialog.getSaveFileName(app.activeWindow(), title, initial, filters, options=options)
return name if len(name) > 0 else None
def text(message, default='', multi_line=False, title=None):
"""Request text.
Parameters
----------
message : :class:`str`
The message that is shown to the user to describe what the text represents.
default : :class:`str`, optional
The default text.
multi_line : :class:`bool`, optional
Whether the entered text can span multiple lines.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`str`
The text that the user entered or :obj:`None` if the user cancelled the
request to enter text.
"""
app, title = _get_app_and_title(title)
if multi_line:
value, ok = QtWidgets.QInputDialog.getMultiLineText(app.activeWindow(), title, message, default,
flags=QtCore.Qt.WindowCloseButtonHint,
inputMethodHints=QtCore.Qt.ImhNone)
else:
value, ok = QtWidgets.QInputDialog.getText(app.activeWindow(), title, message, QtWidgets.QLineEdit.Normal,
default, flags=QtCore.Qt.WindowCloseButtonHint,
inputMethodHints=QtCore.Qt.ImhNone)
return value.strip() if ok else None
def warning(message, title=None):
"""Display the warning `message` in a dialog window.
Parameters
----------
message : :class:`str` or :class:`Exception`
The message to display.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
"""
app, title = _get_app_and_title(title)
if isinstance(message, Exception):
message = traceback.format_exc()
QtWidgets.QMessageBox.warning(app.activeWindow(), title, str(message))
def ok_cancel(message, default=True, title=None):
"""Ask for a response to a `message` where the logical options are ``Ok`` and ``Cancel``.
Parameters
----------
message : :class:`str`
The message to ask the user.
default : :class:`bool`, optional
The answer to be selected by default. If :obj:`True` then ``Ok`` is
the default answer, otherwise ``Cancel`` is the default answer.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`bool`
:obj:`True` if the user answered ``Ok``, :obj:`None` if the user answered ``Cancel``.
"""
app, title = _get_app_and_title(title)
d = QtWidgets.QMessageBox.Ok if default else QtWidgets.QMessageBox.Cancel
response = QtWidgets.QMessageBox.question(app.activeWindow(), title, message,
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel, defaultButton=d)
return True if response == QtWidgets.QMessageBox.Ok else None
def yes_no(message, default=True, title=None):
"""Ask a question to receive a ``Yes`` or ``No`` answer.
Parameters
----------
message : :class:`str`
The question to ask the user.
default : :class:`bool`, optional
The answer to be selected by default. If :obj:`True` then ``Yes`` is
the default answer, if :obj:`False` then ``No`` is the default answer.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`bool`
:obj:`True` if the user answered ``Yes``, :obj:`False` otherwise.
"""
app, title = _get_app_and_title(title)
d = QtWidgets.QMessageBox.Yes if default else QtWidgets.QMessageBox.No
answer = QtWidgets.QMessageBox.question(app.activeWindow(), title, message, defaultButton=d)
return answer == QtWidgets.QMessageBox.Yes
def yes_no_cancel(message, default=True, title=None):
"""Ask a question to receive a ``Yes``, ``No``, or ``Cancel`` answer.
Parameters
----------
message : :class:`str`
The question to ask the user.
default : :class:`bool`, optional
The answer to be selected by default. If :obj:`True` then ``Yes`` is
the default answer, if :obj:`False` then ``No`` is the default answer,
else if :obj:`None` then ``Cancel`` is the default answer.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
If :obj:`None` then uses the text in the title bar of the active window.
Returns
-------
:class:`bool`
:obj:`True` if the user answered ``Yes``, :obj:`False` if the user answered ``No``,
or :obj:`None` if the user answered ``Cancel``.
"""
app, title = _get_app_and_title(title)
if default is None:
d = QtWidgets.QMessageBox.Cancel
elif default:
d = QtWidgets.QMessageBox.Yes
else:
d = QtWidgets.QMessageBox.No
result = QtWidgets.QMessageBox.question(
app.activeWindow(), title, message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel, defaultButton=d)
if result == QtWidgets.QMessageBox.Yes:
return True
elif result == QtWidgets.QMessageBox.No:
return False
return None
def _get_app_and_title(title):
"""Returns a tuple of the QApplication instance and the title bar text of the active window."""
app = application()
if title is None:
w = app.activeWindow()
title = 'MSL' if w is None else w.windowTitle()
return app, title
def _get_file_filters(filters):
"""Make the `filters` value be in the appropriate syntax."""
def _check_extn(ex):
"""Check the format of the file extension."""
if ex is None:
return all_files
if '*' in ex:
return ex
if ex.startswith('.'):
return '*' + ex
return '*.' + ex
all_files = 'All Files (*)'
if filters is None:
return all_files
if isinstance(filters, dict):
f = ''
for name, extn in filters.items():
if isinstance(extn, (list, tuple)):
f += '{} ({});;'.format(name, ' '.join(_check_extn(e) for e in extn))
else:
f += '{} ({});;'.format(name, _check_extn(extn))
return f[:-2]
if isinstance(filters, (list, tuple)):
return ';;'.join(f if f is not None else all_files for f in filters)
if filters.endswith(';;'):
return filters[:-2]
return filters
|
"""This module contains the general information for StorageVirtualDrive ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageVirtualDriveConsts:
ACCESS_POLICY_BLOCKED = "blocked"
ACCESS_POLICY_HIDDEN = "hidden"
ACCESS_POLICY_READ_ONLY = "read-only"
ACCESS_POLICY_READ_WRITE = "read-write"
ACCESS_POLICY_TRANSPORT_READY = "transport-ready"
ACCESS_POLICY_UNKNOWN = "unknown"
ACTUAL_WRITE_CACHE_POLICY_UNKNOWN = "unknown"
ACTUAL_WRITE_CACHE_POLICY_WRITE_BACK = "write-back"
ACTUAL_WRITE_CACHE_POLICY_WRITE_THROUGH = "write-through"
ADMIN_ACTION_TRIGGER_CANCELED = "canceled"
ADMIN_ACTION_TRIGGER_IDLE = "idle"
ADMIN_ACTION_TRIGGER_TRIGGERED = "triggered"
ADMIN_STATE_CLEAR_TRANSPORT_READY = "clear-transport-ready"
ADMIN_STATE_DEGRADED = "degraded"
ADMIN_STATE_DELETE = "delete"
ADMIN_STATE_HIDE = "hide"
ADMIN_STATE_OFFLINE = "offline"
ADMIN_STATE_ONLINE = "online"
ADMIN_STATE_RESTORE = "restore"
ADMIN_STATE_SECURE_DRIVE_GROUP = "secure-drive-group"
ADMIN_STATE_TRANSPORT_READY = "transport-ready"
ADMIN_STATE_UNDEFINED = "undefined"
ADMIN_STATE_UNHIDE = "unhide"
AVAILABLE_SIZE_UNKNOWN = "unknown"
BLOCK_SIZE_512 = "512"
BLOCK_SIZE_UNKNOWN = "unknown"
BOOTABLE_FALSE = "false"
BOOTABLE_TRUE = "true"
BOOTABLE_UNKNOWN = "unknown"
CONFIG_STATE_N_A = "N/A"
CONFIG_STATE_APPLIED = "applied"
CONFIG_STATE_APPLY_FAILED = "apply-failed"
CONFIG_STATE_APPLYING = "applying"
CONFIG_STATE_NOT_APPLIED = "not-applied"
CONFIG_STATE_NOT_IN_USE = "not-in-use"
CONFIG_STATE_ORPHANED = "orphaned"
CONFIG_STATE_UNKNOWN = "unknown"
CONFIGURED_WRITE_CACHE_POLICY_ALWAYS_WRITE_BACK = "always-write-back"
CONFIGURED_WRITE_CACHE_POLICY_UNKNOWN = "unknown"
CONFIGURED_WRITE_CACHE_POLICY_WRITE_BACK_GOOD_BBU = "write-back-good-bbu"
CONFIGURED_WRITE_CACHE_POLICY_WRITE_THROUGH = "write-through"
CONNECTION_PROTOCOL_NVME = "NVME"
CONNECTION_PROTOCOL_SAS = "SAS"
CONNECTION_PROTOCOL_SATA = "SATA"
CONNECTION_PROTOCOL_UNSPECIFIED = "unspecified"
DEPLOY_ACTION_ABORT_REPLICATION = "abort-replication"
DEPLOY_ACTION_CREATE = "create"
DEPLOY_ACTION_DELETE = "delete"
DEPLOY_ACTION_MODIFY = "modify"
DEPLOY_ACTION_NO_ACTION = "no-action"
DEPLOY_ACTION_REPLACE = "replace"
DEPLOY_ACTION_RESTORE = "restore"
DEPLOY_ACTION_SET_OFFLINE = "set-offline"
DEPLOY_ACTION_SET_ONLINE = "set-online"
DRIVE_CACHE_DISABLE = "disable"
DRIVE_CACHE_ENABLE = "enable"
DRIVE_CACHE_NO_CHANGE = "no-change"
DRIVE_CACHE_UNKNOWN = "unknown"
DRIVE_SECURITY_FALSE = "false"
DRIVE_SECURITY_NO = "no"
DRIVE_SECURITY_TRUE = "true"
DRIVE_SECURITY_YES = "yes"
DRIVE_STATE_CACHE_DEGRADED = "cache-degraded"
DRIVE_STATE_DEGRADED = "degraded"
DRIVE_STATE_OFFLINE = "offline"
DRIVE_STATE_OPTIMAL = "optimal"
DRIVE_STATE_PARTIALLY_DEGRADED = "partially-degraded"
DRIVE_STATE_REBUILDING = "rebuilding"
DRIVE_STATE_UNKNOWN = "unknown"
ID_UNSPECIFIED = "unspecified"
IO_POLICY_CACHED = "cached"
IO_POLICY_DIRECT = "direct"
IO_POLICY_UNKNOWN = "unknown"
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
NUMBER_OF_BLOCKS_UNKNOWN = "unknown"
OPER_DEVICE_ID_UNSPECIFIED = "unspecified"
OPER_STATE_COMPUTE_DEGRADED = "compute-degraded"
OPER_STATE_COMPUTE_INOPERABLE = "compute-inoperable"
OPER_STATE_OFFLINE = "offline"
OPER_STATE_ONLINE = "online"
OPER_STATE_UNDEFINED = "undefined"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PHYSICAL_BLOCK_SIZE_512 = "512"
PHYSICAL_BLOCK_SIZE_UNKNOWN = "unknown"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
READ_POLICY_NORMAL = "normal"
READ_POLICY_READ_AHEAD = "read-ahead"
READ_POLICY_UNKNOWN = "unknown"
SIZE_NOT_APPLICABLE = "not-applicable"
STRIP_SIZE_UNKNOWN = "unknown"
TYPE_MIRROR = "mirror"
TYPE_MIRROR_STRIPE = "mirror-stripe"
TYPE_RAID = "raid"
TYPE_SIMPLE = "simple"
TYPE_STRIPE = "stripe"
TYPE_STRIPE_DUAL_PARITY = "stripe-dual-parity"
TYPE_STRIPE_DUAL_PARITY_STRIPE = "stripe-dual-parity-stripe"
TYPE_STRIPE_PARITY = "stripe-parity"
TYPE_STRIPE_PARITY_STRIPE = "stripe-parity-stripe"
TYPE_UNSPECIFIED = "unspecified"
class StorageVirtualDrive(ManagedObject):
"""This is StorageVirtualDrive class."""
consts = StorageVirtualDriveConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageVirtualDrive", "storageVirtualDrive", "vd-[id]", VersionMeta.Version211a, "InputOutput", 0x3ff, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-storage"], [u'storageController', u'storageVirtualDriveContainer'], [u'faultInst', u'storageControllerEp', u'storageLunDisk', u'storageOperation', u'storageScsiLunRef', u'storageVDMemberEp'], [None])
prop_meta = {
"access_policy": MoPropertyMeta("access_policy", "accessPolicy", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["blocked", "hidden", "read-only", "read-write", "transport-ready", "unknown"], []),
"actual_write_cache_policy": MoPropertyMeta("actual_write_cache_policy", "actualWriteCachePolicy", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unknown", "write-back", "write-through"], []),
"admin_action_trigger": MoPropertyMeta("admin_action_trigger", "adminActionTrigger", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["canceled", "idle", "triggered"], []),
"admin_name": MoPropertyMeta("admin_name", "adminName", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[\-\.:_a-zA-Z0-9]{0,15}""", [], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version224b, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["clear-transport-ready", "degraded", "delete", "hide", "offline", "online", "restore", "secure-drive-group", "transport-ready", "undefined", "unhide"], []),
"available_size": MoPropertyMeta("available_size", "availableSize", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unknown"], ["0-18446744073709551615"]),
"block_size": MoPropertyMeta("block_size", "blockSize", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["512", "unknown"], ["0-4294967295"]),
"bootable": MoPropertyMeta("bootable", "bootable", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "true", "unknown"], []),
"change_id": MoPropertyMeta("change_id", "changeId", "ulong", VersionMeta.Version227b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-18446744073709551615"]),
"change_qualifier": MoPropertyMeta("change_qualifier", "changeQualifier", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|no-change|policy-change|global-hotspare-change|ded-hotspare-change|name-change|size-change|boot-drive-change|scrub-change),){0,8}(defaultValue|no-change|policy-change|global-hotspare-change|ded-hotspare-change|name-change|size-change|boot-drive-change|scrub-change){0,1}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x10, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"config_qualifier_reason": MoPropertyMeta("config_qualifier_reason", "configQualifierReason", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"config_state": MoPropertyMeta("config_state", "configState", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A", "applied", "apply-failed", "applying", "not-applied", "not-in-use", "orphaned", "unknown"], []),
"configured_write_cache_policy": MoPropertyMeta("configured_write_cache_policy", "configuredWriteCachePolicy", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["always-write-back", "unknown", "write-back-good-bbu", "write-through"], []),
"connection_protocol": MoPropertyMeta("connection_protocol", "connectionProtocol", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NVME", "SAS", "SATA", "unspecified"], []),
"deploy_action": MoPropertyMeta("deploy_action", "deployAction", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["abort-replication", "create", "delete", "modify", "no-action", "replace", "restore", "set-offline", "set-online"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"drive_cache": MoPropertyMeta("drive_cache", "driveCache", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["disable", "enable", "no-change", "unknown"], []),
"drive_security": MoPropertyMeta("drive_security", "driveSecurity", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["false", "no", "true", "yes"], []),
"drive_state": MoPropertyMeta("drive_state", "driveState", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cache-degraded", "degraded", "offline", "optimal", "partially-degraded", "rebuilding", "unknown"], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x80, None, None, None, ["unspecified"], ["0-4294967295"]),
"io_policy": MoPropertyMeta("io_policy", "ioPolicy", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cached", "direct", "unknown"], []),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"number_of_blocks": MoPropertyMeta("number_of_blocks", "numberOfBlocks", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unknown"], ["0-18446744073709551615"]),
"oper_device_id": MoPropertyMeta("oper_device_id", "operDeviceId", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unspecified"], ["0-4294967295"]),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["compute-degraded", "compute-inoperable", "offline", "online", "undefined"], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"physical_block_size": MoPropertyMeta("physical_block_size", "physicalBlockSize", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["512", "unknown"], ["0-4294967295"]),
"pn_dn": MoPropertyMeta("pn_dn", "pnDn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"read_policy": MoPropertyMeta("read_policy", "readPolicy", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["normal", "read-ahead", "unknown"], []),
"ref_dn": MoPropertyMeta("ref_dn", "refDn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"security_flags": MoPropertyMeta("security_flags", "securityFlags", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|none|driveSecurityCapable|driveSecurityEnable),){0,3}(defaultValue|none|driveSecurityCapable|driveSecurityEnable){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"size": MoPropertyMeta("size", "size", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-18446744073709551615"]),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"strip_size": MoPropertyMeta("strip_size", "stripSize", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unknown"], ["0-18446744073709551615"]),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["mirror", "mirror-stripe", "raid", "simple", "stripe", "stripe-dual-parity", "stripe-dual-parity-stripe", "stripe-parity", "stripe-parity-stripe", "unspecified"], []),
"uuid": MoPropertyMeta("uuid", "uuid", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F]){8}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){12})|0""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"vendor_uuid": MoPropertyMeta("vendor_uuid", "vendorUuid", "string", VersionMeta.Version224b, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F]){8}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){12})|0""", [], []),
}
prop_map = {
"accessPolicy": "access_policy",
"actualWriteCachePolicy": "actual_write_cache_policy",
"adminActionTrigger": "admin_action_trigger",
"adminName": "admin_name",
"adminState": "admin_state",
"availableSize": "available_size",
"blockSize": "block_size",
"bootable": "bootable",
"changeId": "change_id",
"changeQualifier": "change_qualifier",
"childAction": "child_action",
"configQualifierReason": "config_qualifier_reason",
"configState": "config_state",
"configuredWriteCachePolicy": "configured_write_cache_policy",
"connectionProtocol": "connection_protocol",
"deployAction": "deploy_action",
"descr": "descr",
"dn": "dn",
"driveCache": "drive_cache",
"driveSecurity": "drive_security",
"driveState": "drive_state",
"id": "id",
"ioPolicy": "io_policy",
"lc": "lc",
"locale": "locale",
"model": "model",
"name": "name",
"numberOfBlocks": "number_of_blocks",
"operDeviceId": "oper_device_id",
"operQualifierReason": "oper_qualifier_reason",
"operState": "oper_state",
"operability": "operability",
"physicalBlockSize": "physical_block_size",
"pnDn": "pn_dn",
"presence": "presence",
"readPolicy": "read_policy",
"refDn": "ref_dn",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"securityFlags": "security_flags",
"serial": "serial",
"size": "size",
"status": "status",
"stripSize": "strip_size",
"type": "type",
"uuid": "uuid",
"vendor": "vendor",
"vendorUuid": "vendor_uuid",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.access_policy = None
self.actual_write_cache_policy = None
self.admin_action_trigger = None
self.admin_name = None
self.admin_state = None
self.available_size = None
self.block_size = None
self.bootable = None
self.change_id = None
self.change_qualifier = None
self.child_action = None
self.config_qualifier_reason = None
self.config_state = None
self.configured_write_cache_policy = None
self.connection_protocol = None
self.deploy_action = None
self.descr = None
self.drive_cache = None
self.drive_security = None
self.drive_state = None
self.io_policy = None
self.lc = None
self.locale = None
self.model = None
self.name = None
self.number_of_blocks = None
self.oper_device_id = None
self.oper_qualifier_reason = None
self.oper_state = None
self.operability = None
self.physical_block_size = None
self.pn_dn = None
self.presence = None
self.read_policy = None
self.ref_dn = None
self.revision = None
self.sacl = None
self.security_flags = None
self.serial = None
self.size = None
self.status = None
self.strip_size = None
self.type = None
self.uuid = None
self.vendor = None
self.vendor_uuid = None
ManagedObject.__init__(self, "StorageVirtualDrive", parent_mo_or_dn, **kwargs)
|
<filename>python.py
# _*_ coding=UTF-8 _*_
# 1.type and object
# 所有的类型都是object的子类,除了object
# 所有的类型都是type的实例,包括type
print(object) #int本身是一种class
print(type(object)) #object是type的实例
print(object.__class__) #object是type的实例
print(object.__bases__) #object没有父类
print(int) #int本身是一种class
print(type(int)) #int是type的实例
print(int.__class__) #int是type的实例
print(int.__bases__) #int的父类是object类
print(type) #type本身是一种class
print(type(type)) #type是type的实例
print(type.__class__) #type是type的实例
print(type.__bases__) #type的父类是object
print(type(1)) #1是int的实例
# 2.decorator
# 类装饰器
class ClassDecorator():
def __init__(self):
pass
def __call__(self, func):
def wrapper(*args, **kwargs):
print("class decorator")
return func(*args, **kwargs)
return wrapper
@ClassDecorator()
def class_print():
print("class print")
class_print()
# 函数装饰器
def func_decorator(para):
def func_wrapper(func):
def wrapper(*args, **kwargs):
print(para)
return func(*args, **kwargs)
return wrapper
return func_wrapper
@func_decorator("hello world")
def func_print():
print("func print")
func_print()
# 3.classmethod staticmethod instancemethod
class PythonMethod(object):
class_attr = "class attr"
def __init__(self, idr, name, age):
self._idr = idr
self._name = name
self.__age = age
@property
def idr(self):
return self._idr
@idr.setter
def idr(self, idr):
self._idr = idr
@classmethod
def class_method(cls):
print("class_method:%s." % cls.class_attr)
@staticmethod
def static_method():
print("static_method")
def normal_method():
print("normal_method")
def __str__(self):
return "PythonMethod__str__" # print(pm)调用
def __repr__(self):
return "PythonMethod__repr__" # >>>pm调用
def __getattr__(self, key):
print(self.__dict__[key])
def __setattr__(self, key, value):
self.__dict__[key] = value
pm = PythonMethod(1,"bao",12)
print(pm.idr)
pm.idr = 2
print(pm.idr)
pm.static_method()
pm.class_method()
PythonMethod.normal_method()
PythonMethod.static_method()
print(pm)
print(str(pm))
print(repr(pm))
setattr(pm,"name","cheng")
print(getattr(pm,"name"))
# 4.iterator generator closure
class Iterator(object):
def __init__(self, count):
self.i = 0
self.count = count
def __iter__(self):
return self
def __next__(self):
if self.i < self.count:
ret = self.i
self.i += 1
return ret
else:
raise StopIteration() #for会对异常处理
for i in Iterator(3):
print(i)
def odd():
n = 1
while True:
yield n
n += 2
odd_num = odd()
count = 0
for o in odd_num:
if count>=5:
break
print(o)
count += 1
class Odd():
def __init__(self):
self.start = -1
def __iter__(self):
return self
def __next__(self):
self.start += 2
return self.start
O = Odd()
for i in range(5):
print(next(O))
def closure():
x = 1
def inner():
nonlocal x
x += 1
return x
return inner
func = closure()
print(func()) # 2
print(func()) # 3
print(func.__closure__)
func_list = []
for i in range(2):
def func(x):
return i*x
func_list.append(func)
for f in func_list:
print(f(2))
func_list.clear()
for i in range(2):
def make_func(i):
def func(x):
return i*x
return func
func_list.append(make_func(i))
for f in func_list:
print(f(2))
func = lambda x: x*x
for i in range(3):
print(func(i))
# 5.装饰器注册函数
class FuncRegister():
def __init__(self):
self.callback = None
def register(self, func):
self.callback = func
def call(self):
self.callback()
fr = FuncRegister()
@fr.register
def print_helloworld():
print("hello world")
fr.call() # hello world
class UrlHandlerReg():
def __init__(self):
self.handler_dict = {}
def get(self, func):
self.handler_dict["get"] = func
def post(self, method):
def wrapper(func):
self.handler_dict[method] = func
return wrapper
def urlhandler_print(self):
for k,v in self.handler_dict.items():
print("%s:%s." %(k, v))
uhr = UrlHandlerReg()
@uhr.get
def get_handler():
pass
@uhr.post("post")
def post_handler():
pass
uhr.urlhandler_print()
# 6. subprocess asyncio threading multiprocessing
import subprocess
ret = subprocess.call('dir',shell=True)
print(ret)
import os
import threading
import asyncio
async def hello():
print("asyncio thread:%s" %(threading.currentThread()))
r = await asyncio.sleep(1)
print("asyncio thread:%s" %(threading.currentThread()))
loop = asyncio.get_event_loop()
tasks = [hello(),hello()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
tlock = threading.Lock()
def worker(sign, lock):
lock.acquire()
print("%s:%s" % (sign, os.getpid()))
lock.release()
thread_list = []
for i in range(3):
thread = threading.Thread(target=worker, args=('thread',tlock))
thread_list.append(thread)
thread.start()
for t in thread_list:
t.join()
import platform
if 'Linux' in platform.platform():
import multiprocessing
plock = multiprocessing.Lock()
process_list = []
for i in range(3):
process = multiprocessing.Process(target=worker, args=('process',plock))
process_list.append(process)
process.start()
for p in process_list:
p.join()
# 7.全局变量,局部变量,函数默认参数
start = 100
def tester(start):
def nested(label):
nonlocal start
print(label, start)
start += 3
return nested
func = tester(100)
func(100) # 100 100
print(start) # 100
def tester_global(start):
def nested(label):
global start
print(label, start)
start += 3
return nested
func = tester_global(200)
func(100) # 100 100
print(start) # 103
def function(para=[]):
para.append(1)
print(para)
function() # [1]
function() # [1,1]
function() # [1,1,1]
def function(para=None):
if para==None:
para = []
para.append(1)
print(para)
function() # [1]
function() # [1]
function() # [1]
def function(count=0):
count += 1
print(count)
function() # 1
function() # 1
function(2) # 3
function() # 1
# 8.map reduce filter
maps = [1,2,3]
new_maps = list(map(lambda x:x+1, maps))
print(new_maps) #[2,3,4]
import functools
new_reduces = functools.reduce(lambda x,y:x+y, maps)
print(new_reduces) # 6
new_filter = list(filter(lambda x: x>1 , maps))
print(new_filter) # [2,3]
# 9.logging
import logging
import sys
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(name)s:%(module)s:%(filename)s:%(funcName)s:%(lineno)d:%(message)s')
console_handler = logging.StreamHandler()
console_handler.formatter = formatter
file_handler = logging.FileHandler('python.log')
file_handler.formatter = formatter
logger = logging.getLogger("python")
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
logger.debug("hello world debug")
logger.info("hello world info")
logger.warning("hello world warning")
logger.error("hello world error")
logger.fatal("hello world fatal")
logging.basicConfig(format='%(asctime)s %(levelname)-8s:%(message)s', level=logging.DEBUG)
logging.debug('This message should go to the console')
import encodings
help(encodings)
print(sys.platform)
print(sys.getdefaultencoding())
print(sys.getsizeof(object))
# 10.opencv
import cv2
im = cv2.imread('./Koala.jpg')
print(im.shape)
h,w = im.shape[:2]
print(h,w)
cv2.imwrite('./Koala_new.png', im)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
print(gray.shape)
|
<filename>Code/tg_plot_subg_hilobias.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 11:32:36 2019
@author: ott
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from tg_suboptimal_goal_choice import tg_suboptimal_goal_choice
def tg_plot_subg_hilobias(dat,params,save = False):
params['bias_2_median'].min()
highest = params['bias_2_median'].idxmax()
highest_val = params.loc[highest,['bias_2_median']]
lowest = params['bias_2_median'].idxmin()
lowest_val = params.loc[lowest,['bias_2_median']]
_,_,_,_,_,subg1,_ = tg_suboptimal_goal_choice(dat,subjects=[lowest],plotting=False)
_,_,_,_,_,subg2,_ = tg_suboptimal_goal_choice(dat,subjects=[highest],plotting=False)
#%% Plotting
barx1 = [0,1,3,4,6,7,9,10]
bary1 = subg1
barerr1 = np.zeros(len(subg1))
barx2 = [0,1,3,4,6,7,9,10]
bary2 = subg2
barerr2 = np.zeros(len(subg2))
ylabel = ['Suboptimal g-choice','Suboptimal g-choice','Suboptimal g2-choice','Suboptimal g1-choice']
subplot_labels =['A','B','C','D']
xticks2 = barx1
xticklabels2 = ['g2','g1','g2','g1','g2','g1','g2','g1']
tick_length = 2
tick_width = 1
red_patch = mpatches.Patch(color='red', label='easy')
green_patch = mpatches.Patch(color='green', label='medium')
blue_patch = mpatches.Patch(color='blue', label='hard')
grey_patch = mpatches.Patch(color='grey', label='all')
colors = ['red','green','blue','grey']
titles =['Low Strategy \n Preference','High Strategy \n Preference' ]
fig, ax = plt.subplots(nrows=1, ncols=2,figsize=(3.3,1.4))
plt.tight_layout()
for i in range(8):
ax[0].bar(barx1[i], bary1[i], width=0.8, color = colors[(np.floor(i/2).astype(int))], yerr=barerr1[i] ,error_kw=dict(elinewidth=1 ),alpha=0.5)
ax[1].bar(barx2[i], bary2[i], width=0.8, color = colors[(np.floor(i/2).astype(int))], yerr=barerr2[i] ,error_kw=dict(elinewidth=1 ),alpha=0.5)
for i ,axes in enumerate(ax.flat):
axes.text(-0.15,1.2,subplot_labels[i],transform=axes.transAxes,horizontalalignment='center',verticalalignment='center',fontsize=10,fontweight='bold')
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim((0,0.7))
axes.tick_params(length=tick_length, width=tick_width)
axes.set_xticks(xticks2)
axes.set_xticklabels(xticklabels2)
axes.tick_params(axis='x',labelsize=6)
axes.set_title(titles[i])
if i == 0:
axes.legend(handles=[red_patch,green_patch,blue_patch,grey_patch],loc='upper center', bbox_to_anchor=(1.1, 1.7), ncol=4,frameon=False)
axes.set_ylabel(ylabel[i],fontsize=7)
if save == True:
fig.savefig('subg_hilobias.png', dpi=300, bbox_inches='tight', transparent=True)
return highest_val,lowest_val |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Integers(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic1 = self._io.read_bytes(6)
if not self.magic1 == b"\x50\x41\x43\x4B\x2D\x31":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x31", self.magic1, self._io, u"/seq/0")
self.uint8 = self._io.read_u1()
self.sint8 = self._io.read_s1()
self.magic_uint = self._io.read_bytes(10)
if not self.magic_uint == b"\x50\x41\x43\x4B\x2D\x55\x2D\x44\x45\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x55\x2D\x44\x45\x46", self.magic_uint, self._io, u"/seq/3")
self.uint16 = self._io.read_u2le()
self.uint32 = self._io.read_u4le()
self.uint64 = self._io.read_u8le()
self.magic_sint = self._io.read_bytes(10)
if not self.magic_sint == b"\x50\x41\x43\x4B\x2D\x53\x2D\x44\x45\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x53\x2D\x44\x45\x46", self.magic_sint, self._io, u"/seq/7")
self.sint16 = self._io.read_s2le()
self.sint32 = self._io.read_s4le()
self.sint64 = self._io.read_s8le()
self.magic_uint_le = self._io.read_bytes(9)
if not self.magic_uint_le == b"\x50\x41\x43\x4B\x2D\x55\x2D\x4C\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x55\x2D\x4C\x45", self.magic_uint_le, self._io, u"/seq/11")
self.uint16le = self._io.read_u2le()
self.uint32le = self._io.read_u4le()
self.uint64le = self._io.read_u8le()
self.magic_sint_le = self._io.read_bytes(9)
if not self.magic_sint_le == b"\x50\x41\x43\x4B\x2D\x53\x2D\x4C\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x53\x2D\x4C\x45", self.magic_sint_le, self._io, u"/seq/15")
self.sint16le = self._io.read_s2le()
self.sint32le = self._io.read_s4le()
self.sint64le = self._io.read_s8le()
self.magic_uint_be = self._io.read_bytes(9)
if not self.magic_uint_be == b"\x50\x41\x43\x4B\x2D\x55\x2D\x42\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x55\x2D\x42\x45", self.magic_uint_be, self._io, u"/seq/19")
self.uint16be = self._io.read_u2be()
self.uint32be = self._io.read_u4be()
self.uint64be = self._io.read_u8be()
self.magic_sint_be = self._io.read_bytes(9)
if not self.magic_sint_be == b"\x50\x41\x43\x4B\x2D\x53\x2D\x42\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x50\x41\x43\x4B\x2D\x53\x2D\x42\x45", self.magic_sint_be, self._io, u"/seq/23")
self.sint16be = self._io.read_s2be()
self.sint32be = self._io.read_s4be()
self.sint64be = self._io.read_s8be()
|
# Copyright (c) 2020-20201, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract Syntax Tree for statements."""
from scripting.ast.abc import BaseAST
class AssignmentStatement(BaseAST):
"""Assignment statement, to create or change a variable value."""
def __init__(self, name, exp):
self.name = name
self.exp = exp
def __repr__(self):
return f"AssignStatement({self.name} = {self.exp})"
async def check_types(self, checker):
"""
Check the types of this AST element.
Check that this AST uses the right types. Variable types
are inferred. Returns the expected type, or types
(in a tuple) of this AST, if appropriate.
Args:
checker (Typechecker): the type checker.
Returns:
type (type, tuple or None): the relevant types of this AST.
"""
exp_type = await self.exp.check_types(checker)
checker.set_variable_type(self.name, exp_type)
async def compute(self, script):
"""
Add the assembly expressions necessary to compute this AST element.
Assembly expressions are simple and specialized pieces of
a script. They perform one thing and cannot be divided
into other, smaller expressions. A script is composed
of a list of assembly expressions, called an assembly chain.
It is common for an AST element to create several assembly
expressions in this chain to represent what to do to evaluate
this AST element.
Args:
script (script.Script): the script object.
"""
await self.exp.compute(script)
script.add_expression("STORE", self.name)
class CompoundStatement(BaseAST):
"""Compound statements, composed of two statements."""
def __init__(self, first, second):
self.first = first
self.second = second
def __repr__(self):
return f"CompoundStatement({self.first}, {self.second})"
async def check_types(self, checker):
"""
Check the types of this AST element.
Check that this AST uses the right types. Variable types
are inferred. Returns the expected type, or types
(in a tuple) of this AST, if appropriate.
Args:
checker (Typechecker): the type checker.
Returns:
type (type, tuple or None): the relevant types of this AST.
"""
await self.first.check_types(checker)
await self.second.check_types(checker)
async def compute(self, script):
"""
Add the assembly expressions necessary to compute this AST element.
Assembly expressions are simple and specialized pieces of
a script. They perform one thing and cannot be divided
into other, smaller expressions. A script is composed
of a list of assembly expressions, called an assembly chain.
It is common for an AST element to create several assembly
expressions in this chain to represent what to do to evaluate
this AST element.
Args:
script (script.Script): the script object.
"""
await self.first.compute(script)
await self.second.compute(script)
class IfStatement(BaseAST):
"""If statement."""
def __init__(self, condition, true_stmt, false_stmt):
self.condition = condition
self.true_stmt = true_stmt
self.false_stmt = false_stmt
def __repr__(self):
return f"IfStatement({self.condition}, {self.true_stmt}, {self.false_stmt})"
async def check_types(self, checker):
"""
Check the types of this AST element.
Check that this AST uses the right types. Variable types
are inferred. Returns the expected type, or types
(in a tuple) of this AST, if appropriate.
Args:
checker (Typechecker): the type checker.
Returns:
type (type, tuple or None): the relevant types of this AST.
"""
await self.condition.check_types(checker)
await self.true_stmt.check_types(checker)
if self.false_stmt is not None:
await self.false_stmt.check_types(checker)
async def compute(self, script):
"""
Add the assembly expressions necessary to compute this AST element.
Assembly expressions are simple and specialized pieces of
a script. They perform one thing and cannot be divided
into other, smaller expressions. A script is composed
of a list of assembly expressions, called an assembly chain.
It is common for an AST element to create several assembly
expressions in this chain to represent what to do to evaluate
this AST element.
Args:
script (script.Script): the script object.
"""
await self.condition.compute(script)
after_condition = script.add_expression("IFFALSE", None)
await self.true_stmt.compute(script)
if self.false_stmt:
end_true = script.add_expression("GOTO", None)
false_line = script.next_line
script.update_expression(after_condition, "IFFALSE", false_line)
await self.false_stmt.compute(script)
script.update_expression(end_true, "GOTO", script.next_line)
else:
script.update_expression(after_condition, "IFFALSE", script.next_line)
class WhileStatement(BaseAST):
"""While statement."""
def __init__(self, condition, body):
self.condition = condition
self.body = body
def __repr__(self):
return f"WhileStatement({self.condition}, {self.body})"
async def check_types(self, checker):
"""
Check the types of this AST element.
Check that this AST uses the right types. Variable types
are inferred. Returns the expected type, or types
(in a tuple) of this AST, if appropriate.
Args:
checker (Typechecker): the type checker.
Returns:
type (type, tuple or None): the relevant types of this AST.
"""
await self.condition.check_types(checker)
await self.body.check_types(checker)
async def compute(self, script):
"""
Add the assembly expressions necessary to compute this AST element.
Assembly expressions are simple and specialized pieces of
a script. They perform one thing and cannot be divided
into other, smaller expressions. A script is composed
of a list of assembly expressions, called an assembly chain.
It is common for an AST element to create several assembly
expressions in this chain to represent what to do to evaluate
this AST element.
Args:
script (script.Script): the script object.
"""
before = script.next_line
await self.condition.compute(script)
line = script.add_expression("IFFALSE", None)
await self.body.compute(script)
script.add_expression("GOTO", before)
new_line = script.next_line
script.update_expression(line, "IFFALSE", new_line)
|
<gh_stars>0
from ThesisAnalysis import get_data, ThesisHDF5Reader, get_plot
from ThesisAnalysis.plotting.setup import ThesisPlotter
from ThesisAnalysis.files import spe_files, CHECM
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from CHECLabPy.core.io import DL1Reader
from CHECLabPy.spectrum_fitters.gentile import GentileFitter
from IPython import embed
class SPEPlotter(ThesisPlotter):
def plot(self, n_illuminations, df_array, df_coeff, df_errors):
coeff = df_coeff.iloc[0].to_dict()
errors = df_errors.iloc[0].to_dict()
edges = df_array.iloc[0]['edges']
between = df_array.iloc[0]['between']
fitx = df_array.iloc[0]['fitx']
for i in range(n_illuminations):
color = next(self.ax._get_lines.prop_cycler)['color']
lambda_ = coeff['lambda_{}'.format(i)]
lambda_err = errors['lambda_{}'.format(i)]
hist = df_array.iloc[0]['hist{}'.format(i)]
fit = df_array.iloc[0]['fit{}'.format(i)]
self.ax.hist(between, bins=edges, weights=hist, histtype='step',
color=color)
self.ax.plot(fitx, fit, color=color,
label="{:.3f} ± {:.3f} p.e.".format(lambda_, lambda_err))
self.add_legend()
self.ax.set_xlabel("Charge (mV ns)")
self.ax.set_ylabel("N")
class SPEPlotterTable(SPEPlotter):
def __init__(self):
super().__init__()
self.fig = plt.figure(figsize=self.get_figsize())
self.ax = plt.subplot2grid((3, 2), (0, 0), rowspan=3)
self.ax_t = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
def plot(self, n_illuminations, df_array, df_coeff, df_errors):
super().plot(n_illuminations, df_array, df_coeff, df_errors)
coeff = df_coeff.iloc[0].to_dict()
errors = df_errors.iloc[0].to_dict()
self.ax_t.axis('off')
columns = ['Fit Coeff', 'Fit Errors']
rows = list(coeff.keys())
cells = [['%.3g' % coeff[i], '%.3g' % errors[i]] for i in rows]
table = self.ax_t.table(cellText=cells, rowLabels=rows,
colLabels=columns, loc='center')
table.set_fontsize(10)
def process(file):
name = file.__class__.__name__
input_path = get_data("spe/{}_spe_spectrum.h5".format(name))
with ThesisHDF5Reader(input_path) as reader:
df_array = reader.read("array")
df_coeff = reader.read("coeff")
df_errors = reader.read("errors")
mapping = reader.read_mapping()
metadata = reader.read_metadata()
n_illuminations = metadata['n_illuminations']
base_name = os.path.splitext(os.path.basename(input_path))[0]
output_dir = get_plot("spe_spectrum")
p_spe = SPEPlotter()
p_spe.plot(n_illuminations, df_array, df_coeff, df_errors)
p_spe.save(os.path.join(output_dir, base_name+".pdf"))
p_spe_table = SPEPlotterTable()
p_spe_table.plot(n_illuminations, df_array, df_coeff, df_errors)
p_spe_table.save(os.path.join(output_dir, base_name+"_table.pdf"))
def main():
[process(f) for f in spe_files]
process(CHECM())
if __name__ == '__main__':
main() |
<gh_stars>1-10
#!/usr/bin/python3
# Copyright (c) 2014, whoever
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from struct import unpack
import sys
import vsdsp
from eeprom import Eeprom
patch_bin = (
# finish previous CALL
(0xb8, 0x8c, 0x40, 0xc0, ),
(0x36, 0xf1, 0xd8, 0x02, ),
(0x36, 0xf2, 0x18, 0x17, ),
(0x36, 0xf4, 0xd8, 0x03, ),
# init C0
(0xd4, 0x48, 0x00, 0x24, ), # XOR C0, C0, C0
# get byte from memory
(0x36, 0x13, 0x00, 0x24, ), # LDX (I6)+1, NULL
(0xf4, 0x00, 0x38, 0x14, ), # STX I4, (I6)
(0xf4, 0x00, 0x41, 0x14, ), # MV C0, I4
#(0x34, 0x14, 0x80, 0x24, ), # LDX (I4)+1, I2
(0x34, 0x09, 0x10, 0x52, ), # LDY (I4)+1, I2
#(0xf4, 0x00, 0x55, 0x06, ), # LDI (I4)+1, C // instruction not implemented
#(0xf4, 0x00, 0x41, 0x12, ), # MV C0, I2
(0xf4, 0x00, 0x45, 0x04, ), # MV I4, C0
(0x36, 0xf5, 0x00, 0x24, ), # LDX (I6)-1, I4
(0x00, 0x00, 0x00, 0x24, ), # NOP
(0x36, 0x13, 0x00, 0x24, ),
(0x3e, 0x14, 0xf8, 0x03, ),
(0x3e, 0x12, 0x38, 0x17, ),
(0x68, 0x9d, 0xe7, 0xe2, ),
#(0xf4, 0x00, 0x41, 0x12, ), # MV C0, I2
#(0x00, 0x3f, 0xe9, 0x12, ), # I2 = 0xffa4
(0x29, 0x08, 0x3e, 0x40, ), # CALL
(0x00, 0x00, 0x00, 0x24, ), # NOP
#(0xf4, 0x00, 0x42, 0x12, ), # MV LR0, I2
(0xb8, 0x8c, 0x40, 0xc0, ),
(0x36, 0xf1, 0xd8, 0x02, ),
(0x36, 0xf2, 0x18, 0x17, ),
(0x36, 0xf4, 0xd8, 0x03, ),
(0xc4, 0x88, 0x00, 0x24, ), # OR C0, NULL, C0
# infinite loop if zero
(0x00, 0x00, 0x00, 0x24, ),# NOP
(0x28, 0x07, 0xe3, 0xc5, ), # JZS 0x1f8f
#(0x28, 0x07, 0xe4, 0x05, ), # JZS 0x1f90 - for LDI
(0x00, 0x00, 0x00, 0x24, ),# NOP
# jump back and get another word
(0x28, 0x07, 0xdf, 0x40, ), # J 0x1f7d
(0x00, 0x00, 0x00, 0x24, ),# NOP
)
"""for (int = 0; ; ++i) { X[i] = X[i]; Y[i] = Y[i]; }"""
def big2little(data):
"""Convert 4B ints from big to little endian (and vice versa)."""
d = []
for d_ in data:
d.append(d_[3])
d.append(d_[2])
d.append(d_[1])
d.append(d_[0])
return bytes(d)
if __name__ == '__main__':
data = big2little(patch_bin)
asms = vsdsp.disassemble(data)
print("")
print(".org 0x%x" % 0)
print(vsdsp.asm2text(asms))
if len(sys.argv) == 2:
eeprom = open(sys.argv[1], 'rb').read()
eeprom = Eeprom.decode(eeprom)
for blob in eeprom[0]:
for idx in range(0, len(blob.data)-3, 4):
if blob.data[idx+3] == 0x29:
if len(blob.data) > len(data) + 8:
blob.data = blob.data[:idx+8] + data + blob.data[idx+8+len(data):]
open(sys.argv[1] + '.patch_0000.bin', 'wb').write(eeprom.blob())
sys.exit(0)
raise RuntimeError('szz')
|
<filename>scalyr_agent/third_party_tls/tlslite/handshakesettings.py
# Authors:
# <NAME>
# <NAME> (Arcode Corporation) - cleanup handling of constants
# <NAME> (ported by <NAME>) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for setting handshake parameters."""
from .constants import CertificateType
from .utils import cryptomath
from .utils import cipherfactory
from .utils.compat import ecdsaAllCurves, int_types
CIPHER_NAMES = ["chacha20-poly1305",
"aes256gcm", "aes128gcm",
"aes256", "aes128",
"3des"]
ALL_CIPHER_NAMES = CIPHER_NAMES + ["chacha20-poly1305_draft00",
"rc4", "null"]
MAC_NAMES = ["sha", "sha256", "sha384", "aead"] # Don't allow "md5" by default.
ALL_MAC_NAMES = MAC_NAMES + ["md5"]
KEY_EXCHANGE_NAMES = ["rsa", "dhe_rsa", "ecdhe_rsa", "srp_sha", "srp_sha_rsa",
"ecdh_anon", "dh_anon"]
CIPHER_IMPLEMENTATIONS = ["openssl", "pycrypto", "python"]
CERTIFICATE_TYPES = ["x509"]
RSA_SIGNATURE_HASHES = ["sha512", "sha384", "sha256", "sha224", "sha1"]
ALL_RSA_SIGNATURE_HASHES = RSA_SIGNATURE_HASHES + ["md5"]
RSA_SCHEMES = ["pss", "pkcs1"]
# while secp521r1 is the most secure, it's also much slower than the others
# so place it as the last one
CURVE_NAMES = ["x25519", "x448", "secp384r1", "secp256r1",
"secp521r1"]
ALL_CURVE_NAMES = CURVE_NAMES + ["secp256k1"]
if ecdsaAllCurves:
ALL_CURVE_NAMES += ["secp224r1", "secp192r1"]
ALL_DH_GROUP_NAMES = ["ffdhe2048", "ffdhe3072", "ffdhe4096", "ffdhe6144",
"ffdhe8192"]
class HandshakeSettings(object):
"""
This class encapsulates various parameters that can be used with
a TLS handshake.
:vartype minKeySize: int
:ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
:vartype maxKeySize: int
:ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
:vartype cipherNames: list
:ivar cipherNames: The allowed ciphers.
The allowed values in this list are 'chacha20-poly1305', 'aes256gcm',
'aes128gcm', 'aes256', 'aes128', '3des', 'chacha20-poly1305_draft00',
'null' and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
.. note:: If '3des' is used in this list, but TLS Lite can't find an
add-on library that supports 3DES, then '3des' will be silently
removed.
The default value is list that excludes 'rc4', 'null' and
'chacha20-poly1305_draft00'.
:vartype macNames: list
:ivar macNames: The allowed MAC algorithms.
The allowed values in this list are 'sha384', 'sha256', 'aead', 'sha'
and 'md5'.
The default value is list that excludes 'md5'.
:vartype certificateTypes: list
:ivar certificateTypes: The allowed certificate types.
The only allowed certificate type is 'x509'. This list is only used
with a
client handshake. The client will advertise to the server which
certificate
types are supported, and will check that the server uses one of the
appropriate types.
:vartype minVersion: tuple
:ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3, 0) for SSL 3.0, (3, 1) for TLS 1.0,
(3, 2) for
TLS 1.1, or (3, 3) for TLS 1.2. If the other party wishes to use a
lower
version, a protocol_version alert will be signalled. The default is
(3, 1).
:vartype maxVersion: tuple
:ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3, 0) for SSL 3.0, (3, 1) for TLS 1.0,
(3, 2) for TLS 1.1, or (3, 3) for TLS 1.2. If the other party wishes
to use a
higher version, a protocol_version alert will be signalled. The
default is (3, 3).
.. warning:: Some servers may (improperly) reject clients which offer
support
for TLS 1.1 or higher. In this case, try lowering maxVersion to
(3, 1).
:vartype useExperimentalTackExtension: bool
:ivar useExperimentalTackExtension: Whether to enabled TACK support.
Note that TACK support is not standardized by IETF and uses a temporary
TLS Extension number, so should NOT be used in production software.
:vartype sendFallbackSCSV: bool
:ivar sendFallbackSCSV: Whether to, as a client, send FALLBACK_SCSV.
:vartype rsaSigHashes: list
:ivar rsaSigHashes: List of hashes supported (and advertised as such) for
TLS 1.2 signatures over Server Key Exchange or Certificate Verify with
RSA signature algorithm.
The list is sorted from most wanted to least wanted algorithm.
The allowed hashes are: "md5", "sha1", "sha224", "sha256",
"sha384" and "sha512". The default list does not include md5.
:vartype eccCurves: list
:ivar eccCurves: List of named curves that are to be supported
:vartype useEncryptThenMAC: bool
:ivar useEncryptThenMAC: whether to support the encrypt then MAC extension
from RFC 7366. True by default.
:vartype useExtendedMasterSecret: bool
:ivar useExtendedMasterSecret: whether to support the extended master
secret calculation from RFC 7627. True by default.
:vartype requireExtendedMasterSecret: bool
:ivar requireExtendedMasterSecret: whether to require negotiation of
extended master secret calculation for successful connection. Requires
useExtendedMasterSecret to be set to true. False by default.
:vartype defaultCurve: str
:ivar defaultCurve: curve that will be used by server in case the client
did not advertise support for any curves. It does not have to be the
first curve for eccCurves and may be distinct from curves from that
list.
"""
def __init__(self):
self.minKeySize = 1023
self.maxKeySize = 8193
self.cipherNames = list(CIPHER_NAMES)
self.macNames = list(MAC_NAMES)
self.keyExchangeNames = list(KEY_EXCHANGE_NAMES)
self.cipherImplementations = list(CIPHER_IMPLEMENTATIONS)
self.certificateTypes = list(CERTIFICATE_TYPES)
self.minVersion = (3, 1)
self.maxVersion = (3, 3)
self.useExperimentalTackExtension = False
self.sendFallbackSCSV = False
self.useEncryptThenMAC = True
self.rsaSigHashes = list(RSA_SIGNATURE_HASHES)
self.rsaSchemes = list(RSA_SCHEMES)
self.eccCurves = list(CURVE_NAMES)
self.usePaddingExtension = True
self.useExtendedMasterSecret = True
self.requireExtendedMasterSecret = False
self.dhParams = None
self.dhGroups = list(ALL_DH_GROUP_NAMES)
self.defaultCurve = "secp256r1"
@staticmethod
def _sanityCheckKeySizes(other):
"""Check if key size limits are sane"""
if other.minKeySize < 512:
raise ValueError("minKeySize too small")
if other.minKeySize > 16384:
raise ValueError("minKeySize too large")
if other.maxKeySize < 512:
raise ValueError("maxKeySize too small")
if other.maxKeySize > 16384:
raise ValueError("maxKeySize too large")
if other.maxKeySize < other.minKeySize:
raise ValueError("maxKeySize smaller than minKeySize")
@staticmethod
def _sanityCheckPrimitivesNames(other):
"""Check if specified cryptographic primitive names are known"""
unknownCiphers = [val for val in other.cipherNames \
if val not in ALL_CIPHER_NAMES]
if unknownCiphers:
raise ValueError("Unknown cipher name: %s" % unknownCiphers)
unknownMacs = [val for val in other.macNames \
if val not in ALL_MAC_NAMES]
if unknownMacs:
raise ValueError("Unknown MAC name: %s" % unknownMacs)
unknownKex = [val for val in other.keyExchangeNames \
if val not in KEY_EXCHANGE_NAMES]
if unknownKex:
raise ValueError("Unknown key exchange name: %s" % unknownKex)
unknownImpl = [val for val in other.cipherImplementations \
if val not in CIPHER_IMPLEMENTATIONS]
if unknownImpl:
raise ValueError("Unknown cipher implementation: %s" % \
unknownImpl)
unknownType = [val for val in other.certificateTypes \
if val not in CERTIFICATE_TYPES]
if unknownType:
raise ValueError("Unknown certificate type: %s" % unknownType)
unknownCurve = [val for val in other.eccCurves \
if val not in ALL_CURVE_NAMES]
if unknownCurve:
raise ValueError("Unknown ECC Curve name: {0}".format(unknownCurve))
if other.defaultCurve not in ALL_CURVE_NAMES:
raise ValueError("Unknown default ECC Curve name: {0}"
.format(other.defaultCurve))
unknownSigHash = [val for val in other.rsaSigHashes \
if val not in ALL_RSA_SIGNATURE_HASHES]
if unknownSigHash:
raise ValueError("Unknown RSA signature hash: '{0}'".\
format(unknownSigHash))
unknownRSAPad = [val for val in other.rsaSchemes
if val not in RSA_SCHEMES]
if unknownRSAPad:
raise ValueError("Unknown RSA padding mode: '{0}'".\
format(unknownRSAPad))
unknownDHGroup = [val for val in other.dhGroups
if val not in ALL_DH_GROUP_NAMES]
if unknownDHGroup:
raise ValueError("Unknown FFDHE group name: '{0}'"
.format(unknownDHGroup))
@staticmethod
def _sanityCheckProtocolVersions(other):
"""Check if set protocol version are sane"""
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if other.minVersion not in ((3, 0), (3, 1), (3, 2), (3, 3)):
raise ValueError("minVersion set incorrectly")
if other.maxVersion not in ((3, 0), (3, 1), (3, 2), (3, 3)):
raise ValueError("maxVersion set incorrectly")
@staticmethod
def _sanityCheckExtensions(other):
"""Check if set extension settings are sane"""
if other.useEncryptThenMAC not in (True, False):
raise ValueError("useEncryptThenMAC can only be True or False")
if other.useExtendedMasterSecret not in (True, False):
raise ValueError("useExtendedMasterSecret must be True or False")
if other.requireExtendedMasterSecret not in (True, False):
raise ValueError("requireExtendedMasterSecret must be True "
"or False")
if other.requireExtendedMasterSecret and \
not other.useExtendedMasterSecret:
raise ValueError("requireExtendedMasterSecret requires "
"useExtendedMasterSecret")
if other.usePaddingExtension not in (True, False):
raise ValueError("usePaddingExtension must be True or False")
def validate(self):
"""
Validate the settings, filter out unsupported ciphersuites and return
a copy of object. Does not modify the original object.
:rtype: HandshakeSettings
:returns: a self-consistent copy of settings
:raises ValueError: when settings are invalid, insecure or unsupported.
"""
other = HandshakeSettings()
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.cipherNames = self.cipherNames
other.macNames = self.macNames
other.keyExchangeNames = self.keyExchangeNames
other.cipherImplementations = self.cipherImplementations
other.certificateTypes = self.certificateTypes
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
other.sendFallbackSCSV = self.sendFallbackSCSV
other.useEncryptThenMAC = self.useEncryptThenMAC
other.usePaddingExtension = self.usePaddingExtension
other.rsaSigHashes = self.rsaSigHashes
other.rsaSchemes = self.rsaSchemes
other.eccCurves = self.eccCurves
other.useExtendedMasterSecret = self.useExtendedMasterSecret
other.requireExtendedMasterSecret = self.requireExtendedMasterSecret
other.dhParams = self.dhParams
other.dhGroups = self.dhGroups
other.defaultCurve = self.defaultCurve
if not cipherfactory.tripleDESPresent:
other.cipherNames = [i for i in self.cipherNames if i != "3des"]
if len(other.cipherNames) == 0:
raise ValueError("No supported ciphers")
if len(other.certificateTypes) == 0:
raise ValueError("No supported certificate types")
if not cryptomath.m2cryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "openssl"]
if not cryptomath.pycryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "pycrypto"]
if len(other.cipherImplementations) == 0:
raise ValueError("No supported cipher implementations")
self._sanityCheckKeySizes(other)
self._sanityCheckPrimitivesNames(other)
self._sanityCheckProtocolVersions(other)
self._sanityCheckExtensions(other)
if other.maxVersion < (3,3):
# No sha-2 and AEAD pre TLS 1.2
other.macNames = [e for e in self.macNames if \
e == "sha" or e == "md5"]
if len(other.rsaSigHashes) == 0 and other.maxVersion >= (3, 3):
raise ValueError("TLS 1.2 requires signature algorithms to be set")
if other.dhParams and (len(other.dhParams) != 2 or
not isinstance(other.dhParams[0], int_types) or
not isinstance(other.dhParams[1], int_types)):
raise ValueError("DH parameters need to be a tuple of integers")
return other
def getCertificateTypes(self):
"""Get list of certificate types as IDs"""
ret = []
for ct in self.certificateTypes:
if ct == "x509":
ret.append(CertificateType.x509)
else:
raise AssertionError()
return ret
|
<reponame>caltechlibrary/bun
'''
cli.py: command-line interface class for Bun
Authors
-------
<NAME> <<EMAIL>> -- Caltech Library
Copyright
---------
Copyright (c) 2020-2021 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from commonpy.string_utils import antiformat
import getpass
from queue import Queue
from rich import box
from rich.box import HEAVY, DOUBLE_EDGE, ASCII
from rich.console import Console
from rich.panel import Panel
from rich.style import Style
from rich.theme import Theme
import shutil
import sys
if __debug__:
from sidetrack import log
from .base import UIBase
# Constants.
# .............................................................................
# I haven't found a reasonable way to switch colors based on whether the
# user's terminal background color is dark or light -- there seems to be no
# universal way to get that information for every terminal emulator, due to
# how they are implemented. So, the following is an attempt to pick a single
# set of colors that will work on both dark and bright color backgrounds.
# The switch on Windows-versus-other is because when testing on Windows, I
# get noticeably different color shades and brightness if I use cmd.exe vs
# Cmder, and those are different *again* from my iTerm2 defaults on macOS.
# So here I'm trying to find some compromise that will work in most cases.
if sys.platform.startswith('win'):
# Note: Microsoft's Terminal (and I guess some others on Windows) can't show
# bold (2021-06-29). C.f. https://github.com/microsoft/terminal/issues/109
# The following style still uses bold in case that changes in the future.
_CLI_THEME = Theme({
'info' : 'green3',
'warn' : 'orange1',
'warning' : 'orange1',
'alert' : 'red',
'alert_fatal' : 'bold red',
'fatal' : 'bold red',
'standout' : 'bold dark_sea_green2',
'banner' : 'green3',
})
else:
_CLI_THEME = Theme({
'info' : 'dark_sea_green4',
'warn' : 'orange1',
'warning' : 'orange1',
'alert' : 'red',
'alert_fatal' : 'bold red',
'fatal' : 'bold red',
'standout' : 'bold chartreuse3',
'banner' : 'dark_sea_green4',
})
# Exported classes.
# .............................................................................
class CLI(UIBase):
'''Command-line interface.'''
def __init__(self, name, subtitle, show_banner, use_gui, use_color, be_quiet):
super().__init__(name, subtitle, show_banner, use_gui, use_color, be_quiet)
if __debug__: log('initializing CLI')
self._started = False
# If another thread was eager to send messages before we finished
# initialization, messages will get queued up on this internal queue.
self._queue = Queue()
# Initialize output configuration.
self._console = Console(theme = _CLI_THEME,
color_system = "auto" if use_color else None)
if show_banner and not be_quiet:
# We need the plain_text version in any case, to calculate length.
subtitle_part = f': {subtitle}' if subtitle else ''
plain_text = f'Welcome to {name}{subtitle_part}'
fancy_text = f'Welcome to [standout]{name}[/]{subtitle_part}'
text = fancy_text if use_color else plain_text
terminal_width = shutil.get_terminal_size().columns or 80
odd_adjustment = 0 if (terminal_width % 2 == 0) else 2
padding = (terminal_width - len(plain_text) - 2 - odd_adjustment) // 2
# Queueing up this message now will make it the 1st thing printed.
box_style = DOUBLE_EDGE if use_color else ASCII
self._print_or_queue(Panel(text, style = 'banner', box = box_style,
padding = (0, padding)), style = 'info')
def start(self):
'''Start the user interface.'''
if __debug__: log('starting CLI')
while not self._queue.empty():
(text, style) = self._queue.get()
self._console.print(text, style = style, highlight = False)
sys.stdout.flush()
self._started = True
def stop(self):
'''Stop the user interface.'''
pass
def _print_or_queue(self, text, style):
if self._started:
if __debug__: log(antiformat(text))
self._console.print(text, style = style, highlight = False)
else:
if __debug__: log(f'queueing message "{antiformat(text)}"')
self._queue.put((text, style))
def inform(self, text, *args, **kwargs):
'''Print an informational message.
By default, the message will not be printed if the UI has been given
the "quiet" flag. However, if this method is passed the keyword
argument "force" with a value of True, then the "quiet" setting will
be overridden and the message printed anyway.'''
if ('force' in kwargs and kwargs['force']) or not self._be_quiet:
self._print_or_queue(text.format(*args), 'info')
else:
if __debug__: log(text, *args)
def warn(self, text, *args):
'''Print a nonfatal, noncritical warning message.'''
self._print_or_queue(text.format(*args), style = 'warn')
def alert(self, text, *args):
'''Print a message reporting an error.'''
self._print_or_queue(text.format(*args), style = 'alert')
def alert_fatal(self, text, *args, **kwargs):
'''Print a message reporting a fatal error.
This method returns after execution and does not force an exit of
the application. In that sense it mirrors the behavior of the GUI
version of alert_fatal(...), which also returns, but unlike the GUI
version, this method does not stop the user interface (because in the
CLI case, there is nothing equivalent to a GUI to shut down).
'''
text += '\n' + kwargs['details'] if 'details' in kwargs else ''
self._print_or_queue(text.format(*args), style = 'fatal')
def confirm(self, question):
'''Ask a yes/no question of the user, on the command line.'''
return input(f'{question} (y/n) ').startswith(('y', 'Y'))
def file_selection(self, operation_type, question, pattern):
'''Ask the user to type in a file path.'''
return input(operation_type.capitalize() + ' ' + question + ': ')
def login_details(self, prompt, user = None, pswd = None):
'''Return a tuple of user, password, and a Boolean indicating
whether the user cancelled the dialog. If 'user' is provided, then
this method offers that as a default for the user. If both 'user'
and 'pswd' are provided, both the user and password are offered as
defaults but the password is not shown to the user. If the user
responds with empty strings, the values returned are '' and not None.
'''
try:
text = (prompt + ' [default: ' + user + ']: ') if user else (prompt + ': ')
input_user = input(text)
if len(input_user) == 0:
input_user = user
hidden = ' [default: ' + '*'*len(pswd) + ']' if pswd else ''
text = 'Password' + (' for "' + user + '"' if user else '') + hidden + ': '
input_pswd = _password(text)
if len(input_pswd) == 0:
input_pswd = pswd
final_user = '' if input_user is None else input_user
final_pswd = '' if input_pswd is None else input_pswd
return final_user, final_pswd, False
except KeyboardInterrupt:
return user, pswd, True
def validated_input(self, message, default_value, is_valid):
'''Get validated input from the user, optionally with a default value.'''
while True:
if __debug__: log(f'asking user: "{message} [{default_value}]"')
default = (' [' + default_value + ']') if default_value else ''
value = input(message + default + ': ')
if default_value and value == '':
if __debug__: log(f'user chose default value "{default_value}"')
return default_value
elif is_valid(value):
if __debug__: log(f'got "{value}" from user')
return value
else:
self.alert(f'"{value}" does not appear valid for {message}')
return None
# Miscellaneous utilities
# .............................................................................
def _password(prompt):
# If it's a tty, use the version that doesn't echo the password.
if sys.stdin.isatty():
return getpass.getpass(prompt)
else:
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline().rstrip()
|
"""
About the Callhome Egyptian Arabic Corpus
The CALLHOME Egyptian Arabic corpus of telephone speech consists of 120 unscripted
telephone conversations between native speakers of Egyptian Colloquial Arabic (ECA),
the spoken variety of Arabic found in Egypt. The dialect of ECA that this
dictionary represents is Cairene Arabic.
This recipe uses the speech and transcripts available through LDC. In addition,
an Egyptian arabic phonetic lexicon (available via LDC) is used to get word to
phoneme mappings for the vocabulary. This datasets are:
Speech : LDC97S45
Transcripts : LDC97T19
Lexicon : LDC99L22
"""
from decimal import Decimal
from pathlib import Path
from typing import Dict, Optional, Union
from tqdm.auto import tqdm
from lhotse import Recording, RecordingSet, SupervisionSegment, SupervisionSet
from lhotse.qa import fix_manifests, validate_recordings_and_supervisions
from lhotse.utils import Pathlike, check_and_rglob
def prepare_callhome_egyptian(
audio_dir: Pathlike,
transcript_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
absolute_paths: bool = False,
) -> Dict[str, Union[RecordingSet, SupervisionSet]]:
"""
Prepare manifests for the Switchboard corpus.
We create two manifests: one with recordings, and the other one with text supervisions.
When ``sentiment_dir`` is provided, we create another supervision manifest with sentiment annotations.
:param audio_dir: Path to ``LDC2001S97`` package.
:param rttm_dir: Path to the transcripts directory (typically named "swb_ms98_transcriptions").
If not provided, the transcripts will be downloaded.
:param output_dir: Directory where the manifests should be written. Can be omitted to avoid writing.
:param absolute_paths: Whether to return absolute or relative (to the corpus dir) paths for recordings.
:return: A dict with manifests. The keys are: ``{'recordings', 'supervisions'}``.
"""
audio_dir = Path(audio_dir)
transcript_dir = Path(transcript_dir)
manifests = {}
for split in ["train", "devtest", "evaltest"]:
audio_paths = check_and_rglob(
# The LDC distribution has a typo.
audio_dir / "callhome/arabic" / split.replace("evaltest", "evltest"),
"*.sph",
)
recordings = RecordingSet.from_recordings(
Recording.from_file(p, relative_path_depth=None if absolute_paths else 4)
for p in tqdm(audio_paths)
)
transcript_paths = check_and_rglob(
transcript_dir / f"callhome_arabic_trans_970711/transcrp/{split}/roman",
"*.txt",
)
# TODO: Add text normalization like in Kaldi recipe.
# Not doing this right now as it's not needed for VAD/diarization...
supervisions = []
for p in transcript_paths:
idx = 0
for line in p.read_text().splitlines():
line = line.strip()
if not line:
continue
recording_id = p.stem
# example line:
# 19.33 21.18 B: %ah Tayyib
start, end, spk, text = line.split(maxsplit=3)
spk = spk.replace(":", "")
duration = float(Decimal(end) - Decimal(start))
if duration <= 0:
continue
start = float(start)
supervisions.append(
SupervisionSegment(
id=f"{recording_id}_{idx}",
recording_id=recording_id,
start=start,
duration=duration,
speaker=f"{recording_id}_{spk}",
text=text,
)
)
idx += 1
supervisions = SupervisionSet.from_segments(supervisions)
recordings, supervisions = fix_manifests(recordings, supervisions)
validate_recordings_and_supervisions(recordings, supervisions)
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
recordings.to_json(output_dir / f"recordings_{split}.json")
supervisions.to_json(output_dir / f"supervisions_{split}.json")
manifests[split] = {"recordings": recordings, "supervisions": supervisions}
return manifests
|
<reponame>leits/openprocurement.tender.openeu
from uuid import uuid4
from datetime import timedelta
from iso8601 import parse_date
from pyramid.security import Allow
from zope.interface import implementer
from schematics.types import StringType, MD5Type, BooleanType
from schematics.types.compound import ModelType
from schematics.types.serializable import serializable
from schematics.transforms import blacklist, whitelist
from schematics.exceptions import ValidationError
from openprocurement.api.models import (
ITender, TZ, Model, Address, Period, IsoDateTimeType, ListType,
Tender as BaseTender, Identifier as BaseIdentifier, Bid as BaseBid,
Contract as BaseContract, Cancellation as BaseCancellation, Lot as BaseLot,
Document as BaseDocument, ContactPoint as BaseContactPoint,
LotValue as BaseLotValue, ComplaintModelType as BaseComplaintModelType,
plain_role, create_role, edit_role, view_role, listing_role, draft_role,
auction_view_role, auction_post_role, auction_patch_role, enquiries_role,
auction_role, chronograph_role, chronograph_view_role, view_bid_role,
Administrator_bid_role, Administrator_role, schematics_default_role,
schematics_embedded_role, get_now, embedded_lot_role, default_lot_role,
calc_auction_end_time, get_tender, validate_lots_uniq,
validate_cpv_group, validate_items_uniq, rounding_shouldStartAfter,
)
from openprocurement.tender.openua.utils import (
calculate_business_date, BLOCK_COMPLAINT_STATUS, PENDING_COMPLAINT_STATUS,
)
from openprocurement.tender.openua.models import (
Complaint as BaseComplaint, Award as BaseAward, Item as BaseItem,
PeriodStartEndRequired, SifterListType, COMPLAINT_SUBMIT_TIME,
EnquiryPeriod, ENQUIRY_STAND_STILL_TIME, AUCTION_PERIOD_TIME,
calculate_normalized_date,
)
eu_role = blacklist('enquiryPeriod', 'qualifications')
edit_role_eu = edit_role + eu_role
create_role_eu = create_role + eu_role
pre_qualifications_role = (blacklist('owner_token', '_attachments', 'revisions') + schematics_embedded_role)
eu_auction_role = auction_role
TENDERING_DAYS = 30
TENDERING_DURATION = timedelta(days=TENDERING_DAYS)
TENDERING_AUCTION = timedelta(days=35)
QUESTIONS_STAND_STILL = timedelta(days=10)
PREQUALIFICATION_COMPLAINT_STAND_STILL = timedelta(days=5)
COMPLAINT_STAND_STILL = timedelta(days=10)
def bids_validation_wrapper(validation_func):
def validator(klass, data, value):
if data['status'] in ('deleted', 'invalid', 'draft'):
# skip not valid bids
return
tender = data['__parent__']
request = tender.__parent__.request
if request.method == "PATCH" and isinstance(tender, Tender) and request.authenticated_role == "tender_owner":
# disable bids validation on tender PATCH requests as tender bids will be invalidated
return
return validation_func(klass, data, value)
return validator
class ComplaintModelType(BaseComplaintModelType):
view_claim_statuses = ['active.tendering', 'active.pre-qualification', 'active.pre-qualification.stand-still', 'active.auction']
class Item(BaseItem):
"""A good, service, or work to be contracted."""
description_en = StringType(required=True, min_length=1)
class Identifier(BaseIdentifier):
legalName_en = StringType(required=True, min_length=1)
class ContactPoint(BaseContactPoint):
name_en = StringType(required=True, min_length=1)
availableLanguage = StringType(required=True, choices=['uk', 'en', 'ru'], default='uk')
class Organization(Model):
"""An organization."""
class Options:
roles = {
'embedded': schematics_embedded_role,
'view': schematics_default_role,
}
name = StringType(required=True)
name_en = StringType(required=True, min_length=1)
name_ru = StringType()
identifier = ModelType(Identifier, required=True)
additionalIdentifiers = ListType(ModelType(Identifier))
address = ModelType(Address, required=True)
contactPoint = ModelType(ContactPoint, required=True)
additionalContactPoints = ListType(ModelType(ContactPoint, required=True),
required=False)
class ProcuringEntity(Organization):
"""An organization."""
class Options:
roles = {
'embedded': schematics_embedded_role,
'view': schematics_default_role,
'edit_active.tendering': schematics_default_role + blacklist("kind"),
}
kind = StringType(choices=['general', 'special', 'defense', 'other'])
class Document(BaseDocument):
language = StringType(required=True, choices=['uk', 'en', 'ru'], default='uk')
class ConfidentialDocument(Document):
""" Confidential Document """
class Options:
roles = {
'edit': blacklist('id', 'url', 'datePublished', 'dateModified', ''),
'embedded': schematics_embedded_role,
'view': (blacklist('revisions') + schematics_default_role),
'restricted_view': (blacklist('revisions', 'url') + schematics_default_role),
'revisions': whitelist('url', 'dateModified'),
}
confidentiality = StringType(choices=['public', 'buyerOnly'], default='public')
confidentialityRationale = StringType()
def validate_confidentialityRationale(self, data, val):
if data['confidentiality'] != 'public':
if not val:
raise ValidationError(u"confidentialityRationale is required")
elif len(val) < 30:
raise ValidationError(u"confidentialityRationale should contain at least 30 characters")
class Contract(BaseContract):
documents = ListType(ModelType(Document), default=list())
items = ListType(ModelType(Item))
class Complaint(BaseComplaint):
class Options:
roles = {
'active.pre-qualification': view_bid_role,
'active.pre-qualification.stand-still': view_bid_role,
}
documents = ListType(ModelType(Document), default=list())
def serialize(self, role=None, context=None):
if role == 'view' and self.type == 'claim' and get_tender(self).status in ['active.tendering', 'active.pre-qualification', 'active.pre-qualification.stand-still', 'active.auction']:
role = 'view_claim'
return super(Complaint, self).serialize(role=role, context=context)
class Cancellation(BaseCancellation):
class Options:
roles = {
'create': whitelist('reason', 'status', 'reasonType', 'cancellationOf', 'relatedLot'),
'edit': whitelist('status', 'reasonType'),
'embedded': schematics_embedded_role,
'view': schematics_default_role,
}
documents = ListType(ModelType(Document), default=list())
reasonType = StringType(choices=['cancelled', 'unsuccessful'], default='cancelled')
class TenderAuctionPeriod(Period):
"""The auction period."""
@serializable(serialize_when_none=False)
def shouldStartAfter(self):
if self.endDate:
return
tender = self.__parent__
if tender.lots or tender.status not in ['active.tendering', 'active.pre-qualification.stand-still', 'active.auction']:
return
start_after = None
if tender.status == 'active.tendering' and tender.tenderPeriod.endDate:
start_after = calculate_business_date(tender.tenderPeriod.endDate, TENDERING_AUCTION, tender)
elif self.startDate and get_now() > calc_auction_end_time(tender.numberOfBids, self.startDate):
start_after = calc_auction_end_time(tender.numberOfBids, self.startDate)
elif tender.qualificationPeriod and tender.qualificationPeriod.endDate:
start_after = tender.qualificationPeriod.endDate
if start_after:
return rounding_shouldStartAfter(start_after, tender).isoformat()
class LotAuctionPeriod(Period):
"""The auction period."""
@serializable(serialize_when_none=False)
def shouldStartAfter(self):
if self.endDate:
return
tender = get_tender(self)
lot = self.__parent__
if tender.status not in ['active.tendering', 'active.pre-qualification.stand-still', 'active.auction'] or lot.status != 'active':
return
start_after = None
if tender.status == 'active.tendering' and tender.tenderPeriod.endDate:
start_after = calculate_business_date(tender.tenderPeriod.endDate, TENDERING_AUCTION, tender)
elif self.startDate and get_now() > calc_auction_end_time(lot.numberOfBids, self.startDate):
start_after = calc_auction_end_time(lot.numberOfBids, self.startDate)
elif tender.qualificationPeriod and tender.qualificationPeriod.endDate:
start_after = tender.qualificationPeriod.endDate
if start_after:
return rounding_shouldStartAfter(start_after, tender).isoformat()
class Lot(BaseLot):
class Options:
roles = {
'create': whitelist('id', 'title', 'title_en', 'title_ru', 'description', 'description_en', 'description_ru', 'value', 'guarantee', 'minimalStep'),
'edit': whitelist('title', 'title_en', 'title_ru', 'description', 'description_en', 'description_ru', 'value', 'guarantee', 'minimalStep'),
'embedded': embedded_lot_role,
'view': default_lot_role,
'default': default_lot_role,
'auction_view': default_lot_role,
'auction_patch': whitelist('id', 'auctionUrl'),
'chronograph': whitelist('id', 'auctionPeriod'),
'chronograph_view': whitelist('id', 'auctionPeriod', 'numberOfBids', 'status'),
}
auctionPeriod = ModelType(LotAuctionPeriod, default={})
@serializable
def numberOfBids(self):
"""A property that is serialized by schematics exports."""
bids = [
bid
for bid in self.__parent__.bids
if self.id in [i.relatedLot for i in bid.lotValues if i.status in ["active", "pending"]] and bid.status in ["active", "pending"]
]
return len(bids)
class LotValue(BaseLotValue):
class Options:
roles = {
'create': whitelist('value', 'relatedLot', 'subcontractingDetails'),
'edit': whitelist('value', 'relatedLot', 'subcontractingDetails'),
'auction_view': whitelist('value', 'date', 'relatedLot', 'participationUrl', 'status',),
}
subcontractingDetails = StringType()
status = StringType(choices=['pending', 'active', 'unsuccessful'],
default='pending')
def validate_value(self, data, value):
if value and isinstance(data['__parent__'], Model) and (data['__parent__'].status not in ('invalid', 'deleted')) and data['relatedLot']:
lots = [i for i in get_tender(data['__parent__']).lots if i.id == data['relatedLot']]
if not lots:
return
lot = lots[0]
if lot.value.amount < value.amount:
raise ValidationError(u"value of bid should be less than value of lot")
if lot.get('value').currency != value.currency:
raise ValidationError(u"currency of bid should be identical to currency of value of lot")
if lot.get('value').valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(u"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of lot")
def validate_relatedLot(self, data, relatedLot):
if isinstance(data['__parent__'], Model) and (data['__parent__'].status not in ('invalid', 'deleted')) and relatedLot not in [i.id for i in get_tender(data['__parent__']).lots]:
raise ValidationError(u"relatedLot should be one of lots")
class Bid(BaseBid):
class Options:
roles = {
'Administrator': Administrator_bid_role,
'embedded': view_bid_role,
'view': view_bid_role,
'create': whitelist('value', 'tenderers', 'parameters', 'lotValues', 'status', 'selfQualified', 'selfEligible', 'subcontractingDetails'),
'edit': whitelist('value', 'tenderers', 'parameters', 'lotValues', 'status', 'subcontractingDetails'),
'auction_view': whitelist('value', 'lotValues', 'id', 'date', 'parameters', 'participationUrl', 'status'),
'auction_post': whitelist('value', 'lotValues', 'id', 'date'),
'auction_patch': whitelist('id', 'lotValues', 'participationUrl'),
'active.enquiries': whitelist(),
'active.tendering': whitelist(),
'active.pre-qualification': whitelist('id', 'status', 'documents', 'eligibilityDocuments', 'tenderers'),
'active.pre-qualification.stand-still': whitelist('id', 'status', 'documents', 'eligibilityDocuments', 'tenderers'),
'active.auction': whitelist('id', 'status', 'documents', 'eligibilityDocuments', 'tenderers'),
'active.qualification': view_bid_role,
'active.awarded': view_bid_role,
'complete': view_bid_role,
'unsuccessful': view_bid_role,
'bid.unsuccessful': whitelist('id', 'status', 'tenderers', 'documents', 'eligibilityDocuments', 'parameters', 'selfQualified', 'selfEligible', 'subcontractingDetails'),
'cancelled': view_bid_role,
'invalid': whitelist('id', 'status'),
'invalid.pre-qualification': whitelist('id', 'status', 'documents', 'eligibilityDocuments', 'tenderers'),
'deleted': whitelist('id', 'status'),
}
documents = ListType(ModelType(ConfidentialDocument), default=list())
financialDocuments = ListType(ModelType(ConfidentialDocument), default=list())
eligibilityDocuments = ListType(ModelType(ConfidentialDocument), default=list())
qualificationDocuments = ListType(ModelType(ConfidentialDocument), default=list())
lotValues = ListType(ModelType(LotValue), default=list())
selfQualified = BooleanType(required=True, choices=[True])
selfEligible = BooleanType(required=True, choices=[True])
subcontractingDetails = StringType()
status = StringType(choices=['draft','pending', 'active', 'invalid', 'invalid.pre-qualification', 'unsuccessful', 'deleted'],
default='pending')
def serialize(self, role=None):
if role and role != 'create' and self.status in ['invalid', 'invalid.pre-qualification', 'deleted']:
role = self.status
elif role and role != 'create' and self.status == 'unsuccessful':
role = 'bid.unsuccessful'
return super(Bid, self).serialize(role)
@serializable(serialized_name="status")
def serialize_status(self):
if self.status in ['draft', 'invalid', 'deleted'] or self.__parent__.status in ['active.tendering', 'cancelled']:
return self.status
if self.__parent__.lots:
active_lots = [lot.id for lot in self.__parent__.lots if lot.status in ('active', 'complete',)]
if not self.lotValues:
return 'invalid'
elif [i.relatedLot for i in self.lotValues if i.status == 'pending' and i.relatedLot in active_lots]:
return 'pending'
elif [i.relatedLot for i in self.lotValues if i.status == 'active' and i.relatedLot in active_lots]:
return 'active'
else:
return 'unsuccessful'
return self.status
@bids_validation_wrapper
def validate_value(self, data, value):
BaseBid._validator_functions['value'](self, data, value)
@bids_validation_wrapper
def validate_lotValues(self, data, lotValues):
BaseBid._validator_functions['lotValues'](self, data, lotValues)
@bids_validation_wrapper
def validate_participationUrl(self, data, participationUrl):
BaseBid._validator_functions['participationUrl'](self, data, participationUrl)
@bids_validation_wrapper
def validate_parameters(self, data, parameters):
BaseBid._validator_functions['parameters'](self, data, parameters)
class Award(BaseAward):
""" An award for the given procurement. There may be more than one award
per contracting process e.g. because the contract is split amongst
different providers, or because it is a standing offer.
"""
complaints = ListType(ModelType(Complaint), default=list())
items = ListType(ModelType(Item))
documents = ListType(ModelType(Document), default=list())
qualified = BooleanType()
eligible = BooleanType()
def validate_qualified(self, data, qualified):
pass
def validate_eligible(self, data, eligible):
pass
class Qualification(Model):
""" Pre-Qualification """
class Options:
roles = {
'create': blacklist('id', 'status', 'documents', 'date'),
'edit': whitelist('status', 'qualified', 'eligible', 'title', 'title_en', 'title_ru',
'description', 'description_en', 'description_ru'),
'embedded': schematics_embedded_role,
'view': schematics_default_role,
}
title = StringType()
title_en = StringType()
title_ru = StringType()
description = StringType()
description_en = StringType()
description_ru = StringType()
id = MD5Type(required=True, default=lambda: uuid4().hex)
bidID = StringType(required=True)
lotID = MD5Type()
status = StringType(choices=['pending', 'active', 'unsuccessful', 'cancelled'], default='pending')
date = IsoDateTimeType()
documents = ListType(ModelType(Document), default=list())
complaints = ListType(ModelType(Complaint), default=list())
qualified = BooleanType(default=False)
eligible = BooleanType(default=False)
def validate_qualified(self, data, qualified):
if data['status'] == 'active' and not qualified:
raise ValidationError(u'This field is required.')
def validate_eligible(self, data, eligible):
if data['status'] == 'active' and not eligible:
raise ValidationError(u'This field is required.')
def validate_lotID(self, data, lotID):
if isinstance(data['__parent__'], Model):
if not lotID and data['__parent__'].lots:
raise ValidationError(u'This field is required.')
if lotID and lotID not in [i.id for i in data['__parent__'].lots]:
raise ValidationError(u"lotID should be one of lots")
@implementer(ITender)
class Tender(BaseTender):
""" OpenEU tender model """
class Options:
roles = {
'plain': plain_role,
'create': create_role_eu,
'edit': edit_role_eu,
'edit_draft': edit_role_eu,
'edit_active.tendering': edit_role_eu,
'edit_active.pre-qualification': whitelist('status'),
'edit_active.pre-qualification.stand-still': whitelist(),
'edit_active.auction': whitelist(),
'edit_active.qualification': whitelist(),
'edit_active.awarded': whitelist(),
'edit_complete': whitelist(),
'edit_unsuccessful': whitelist(),
'edit_cancelled': whitelist(),
'view': view_role,
'listing': listing_role,
'auction_view': auction_view_role,
'auction_post': auction_post_role,
'auction_patch': auction_patch_role,
'draft': enquiries_role,
'active.tendering': enquiries_role,
'active.pre-qualification': pre_qualifications_role,
'active.pre-qualification.stand-still': pre_qualifications_role,
'active.auction': pre_qualifications_role,
'active.qualification': view_role,
'active.awarded': view_role,
'complete': view_role,
'unsuccessful': view_role,
'cancelled': view_role,
'chronograph': chronograph_role,
'chronograph_view': chronograph_view_role,
'Administrator': Administrator_role,
'default': schematics_default_role,
'contracting': whitelist('doc_id', 'owner'),
}
procurementMethodType = StringType(default="aboveThresholdEU")
title_en = StringType(required=True, min_length=1)
enquiryPeriod = ModelType(EnquiryPeriod, required=False)
tenderPeriod = ModelType(PeriodStartEndRequired, required=True)
auctionPeriod = ModelType(TenderAuctionPeriod, default={})
documents = ListType(ModelType(Document), default=list()) # All documents and attachments related to the tender.
items = ListType(ModelType(Item), required=True, min_size=1, validators=[validate_cpv_group, validate_items_uniq]) # The goods and services to be purchased, broken into line items wherever possible. Items should not be duplicated, but a quantity of 2 specified instead.
complaints = ListType(ComplaintModelType(Complaint), default=list())
contracts = ListType(ModelType(Contract), default=list())
cancellations = ListType(ModelType(Cancellation), default=list())
awards = ListType(ModelType(Award), default=list())
procuringEntity = ModelType(ProcuringEntity, required=True) # The entity managing the procurement, which may be different from the buyer who is paying / using the items being procured.
bids = SifterListType(ModelType(Bid), default=list(), filter_by='status', filter_in_values=['invalid', 'invalid.pre-qualification', 'deleted']) # A list of all the companies who entered submissions for the tender.
qualifications = ListType(ModelType(Qualification), default=list())
qualificationPeriod = ModelType(Period)
lots = ListType(ModelType(Lot), default=list(), validators=[validate_lots_uniq])
status = StringType(choices=['draft', 'active.tendering', 'active.pre-qualification', 'active.pre-qualification.stand-still', 'active.auction',
'active.qualification', 'active.awarded', 'complete', 'cancelled', 'unsuccessful'], default='active.tendering')
create_accreditation = 3
edit_accreditation = 4
procuring_entity_kinds = ['general', 'special', 'defense']
def __acl__(self):
acl = [
(Allow, '{}_{}'.format(i.owner, i.owner_token), 'create_qualification_complaint')
for i in self.bids
if i.status in ['active', 'unsuccessful']
]
acl.extend([
(Allow, '{}_{}'.format(i.owner, i.owner_token), 'create_award_complaint')
for i in self.bids
if i.status == 'active'
])
acl.extend([
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'edit_tender'),
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'upload_tender_documents'),
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'edit_complaint'),
])
return acl
def initialize(self):
endDate = calculate_business_date(self.tenderPeriod.endDate, -QUESTIONS_STAND_STILL, self)
self.enquiryPeriod = EnquiryPeriod(dict(startDate=self.tenderPeriod.startDate,
endDate=endDate,
invalidationDate=self.enquiryPeriod and self.enquiryPeriod.invalidationDate,
clarificationsUntil=calculate_business_date(endDate, ENQUIRY_STAND_STILL_TIME, self, True)))
now = get_now()
self.date = now
if self.lots:
for lot in self.lots:
lot.date = now
@serializable(serialized_name="enquiryPeriod", type=ModelType(EnquiryPeriod))
def tender_enquiryPeriod(self):
endDate = calculate_business_date(self.tenderPeriod.endDate, -QUESTIONS_STAND_STILL, self)
return EnquiryPeriod(dict(startDate=self.tenderPeriod.startDate,
endDate=endDate,
invalidationDate=self.enquiryPeriod and self.enquiryPeriod.invalidationDate,
clarificationsUntil=calculate_business_date(endDate, ENQUIRY_STAND_STILL_TIME, self, True)))
@serializable(type=ModelType(Period))
def complaintPeriod(self):
normalized_end = calculate_normalized_date(self.tenderPeriod.endDate, self)
return Period(dict(startDate=self.tenderPeriod.startDate, endDate=calculate_business_date(normalized_end, -COMPLAINT_SUBMIT_TIME, self)))
@serializable(serialize_when_none=False)
def next_check(self):
now = get_now()
checks = []
if self.status == 'active.tendering' and self.tenderPeriod.endDate and \
not any([i.status in BLOCK_COMPLAINT_STATUS for i in self.complaints]) and \
not any([i.id for i in self.questions if not i.answer]):
checks.append(self.tenderPeriod.endDate.astimezone(TZ))
elif self.status == 'active.pre-qualification.stand-still' and self.qualificationPeriod and self.qualificationPeriod.endDate and not any([
i.status in PENDING_COMPLAINT_STATUS
for q in self.qualifications
for i in q.complaints
]):
checks.append(self.qualificationPeriod.endDate.astimezone(TZ))
elif not self.lots and self.status == 'active.auction' and self.auctionPeriod and self.auctionPeriod.startDate and not self.auctionPeriod.endDate:
if now < self.auctionPeriod.startDate:
checks.append(self.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ))
elif self.lots and self.status == 'active.auction':
for lot in self.lots:
if lot.status != 'active' or not lot.auctionPeriod or not lot.auctionPeriod.startDate or lot.auctionPeriod.endDate:
continue
if now < lot.auctionPeriod.startDate:
checks.append(lot.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ))
elif not self.lots and self.status == 'active.awarded':
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in self.awards
if a.complaintPeriod.endDate
]
if standStillEnds:
standStillEnd = max(standStillEnds)
if standStillEnd > now:
checks.append(standStillEnd)
elif self.lots and self.status in ['active.qualification', 'active.awarded']:
lots_ends = []
for lot in self.lots:
if lot['status'] != 'active':
continue
lot_awards = [i for i in self.awards if i.lotID == lot.id]
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in lot_awards
if a.complaintPeriod.endDate
]
if not standStillEnds:
continue
standStillEnd = max(standStillEnds)
if standStillEnd > now:
lots_ends.append(standStillEnd)
if lots_ends:
checks.append(min(lots_ends))
return min(checks).isoformat() if checks else None
def validate_tenderPeriod(self, data, period):
# if data['_rev'] is None when tender was created just now
if not data['_rev'] and calculate_business_date(get_now(), -timedelta(minutes=10)) >= period.startDate:
raise ValidationError(u"tenderPeriod.startDate should be in greater than current date")
if period and calculate_business_date(period.startDate, TENDERING_DURATION, data) > period.endDate:
raise ValidationError(u"tenderPeriod should be greater than {} days".format(TENDERING_DAYS))
@serializable
def numberOfBids(self):
"""A property that is serialized by schematics exports."""
return len([bid for bid in self.bids if bid.status in ("active", "pending",)])
def check_auction_time(self):
if self.auctionPeriod and self.auctionPeriod.startDate and self.auctionPeriod.shouldStartAfter \
and self.auctionPeriod.startDate > calculate_business_date(parse_date(self.auctionPeriod.shouldStartAfter), AUCTION_PERIOD_TIME, self, True):
self.auctionPeriod.startDate = None
for lot in self.lots:
if lot.auctionPeriod and lot.auctionPeriod.startDate and lot.auctionPeriod.shouldStartAfter \
and lot.auctionPeriod.startDate > calculate_business_date(parse_date(lot.auctionPeriod.shouldStartAfter), AUCTION_PERIOD_TIME, self, True):
lot.auctionPeriod.startDate = None
def invalidate_bids_data(self):
self.check_auction_time()
self.enquiryPeriod.invalidationDate = get_now()
for bid in self.bids:
if bid.status not in ["deleted", "draft"]:
bid.status = "invalid"
|
<reponame>deejay1/selena<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.db import connection
CLASSIFICATION_ALL = 1
CLASSIFICATION_ONLY_CORE = 2
CLASSIFICATION_NOT_CORE = 3
def _get_index_data_sql(classification, errors_only, services):
classification_sql = ''
if classification == CLASSIFICATION_ONLY_CORE:
classification_sql = 'AND S.is_core_service=1 '
elif classification == CLASSIFICATION_NOT_CORE:
classification_sql = 'AND S.is_core_service=0 '
having_condition = """HAVING SUM(
CASE WHEN SH.tick_failed=1 AND SH.main_probe=0 THEN 1 ELSE 0 END
) > 0""" if errors_only else ''
services_condition = ''
if services:
services_condition = 'AND S.id IN (%s)' % ','.join(
[str(service) for service in services],
)
sql = """
SELECT
S.id,
S.name,
S.url,
S.is_core_service,
SUM(
CASE WHEN SH.response_state>1 THEN 1 ELSE 0 END
) AS have_problems,
ROUND(MIN(SH.response_time), 2) AS min_response_time,
ROUND(MAX(SH.response_time), 2) AS max_response_time,
ROUND(AVG(SH.response_time), 2) AS avg_response_time,
C.sla7days,
C.sla1month,
C.sla3months
FROM services_servicehistory AS SH
JOIN services_service AS S ON S.id=SH.service_id
JOIN services_slacache AS C ON C.service_id=SH.service_id
WHERE SH.created>=%s AND SH.created<=%s AND S.is_active=1 {} {}
GROUP BY S.name
{}
ORDER BY S.order ASC, S.name ASC
""".format(classification_sql, services_condition, having_condition)
return sql
def get_index_data(start_date, end_date, classification=CLASSIFICATION_ALL,
errors_only=False, services=[]):
cursor = connection.cursor()
cursor.execute(
_get_index_data_sql(classification, errors_only, services),
[
start_date.strftime("%Y-%m-%d %H:%M"),
end_date.strftime("%Y-%m-%d %H:%M"),
],
)
desc = cursor.description
for row in cursor.fetchall():
yield dict(zip([col[0] for col in desc], row))
def _get_probes_for_bar_chart_sql():
return """
SELECT
service_id,
CASE main_probe WHEN 0 THEN id ELSE main_probe END AS probe,
SUM(
CASE WHEN response_state > 1 THEN 1 ELSE 0 END
) AS problems_count,
COUNT(id) AS probes_count,
created
FROM
services_servicehistory
WHERE
service_id = %s AND created >= %s AND created <= %s
GROUP BY probe
ORDER BY created ASC
"""
def _get_probe_color(problems_count, probes_count):
count = problems_count / 2
if problems_count < (probes_count / 2):
green = hex(255 - int(255 * count / probes_count))
green = str(green)[2:]
color = 'ff{0:>02}00'.format(green)
else:
red = hex(255 - int(255 * count / probes_count))
red = str(red)[2:]
color = '{0:>02}0000'.format(red)
return color
def get_probes_for_bar_chart(service_id, start_date, end_date):
cursor = connection.cursor()
cursor.execute(
_get_probes_for_bar_chart_sql(),
[service_id, start_date, end_date],
)
desc = cursor.description
for row in cursor.fetchall():
data = dict(zip([col[0] for col in desc], row))
if data['problems_count'] > 0:
data.update({
'color': _get_probe_color(
data['problems_count'],
data['probes_count'],
),
})
yield data
def get_history_items(start_date, stop_date, service_id=None):
cursor = connection.cursor()
params = [
start_date.strftime("%Y-%m-%d 00:00:00"),
stop_date.strftime("%Y-%m-%d 23:59:59"),
]
sql = """
SELECT
SH.service_id,
S.url,
SH.response_state,
SH.response_code,
SH.response_time,
SH.namelookup_time,
SH.connect_time,
SH.pretransfer_time,
SH.starttransfer_time,
SH.redirect_time,
SH.size_download,
SH.speed_download,
SH.redirect_count,
SH.num_connects,
SH.agent_id,
A.name,
SH.created
FROM services_servicehistory AS SH
LEFT JOIN services_service AS S ON S.id = SH.service_id
LEFT JOIN services_agent AS A ON A.id = SH.agent_id
WHERE
SH.response_state > 1 AND SH.created >= %s AND SH.created <= %s
"""
if service_id:
sql = sql + " AND S.id=%s"
params.append(service_id)
cursor.execute(sql, params)
for row in cursor.fetchall():
yield {
'service_id': row[0],
'url': row[1],
'response_state': row[2],
'response_code': row[3],
'response_time': row[4],
'namelookup_time': row[5],
'connect_time': row[6],
'pretransfer_time': row[7],
'starttransfer_time': row[8],
'redirect_time': row[9],
'size_download': row[10],
'speed_download': row[11],
'redirect_count': row[12],
'num_connects': row[13],
'agent_id': row[14],
'agent_name': row[15],
'created': row[16]
}
|
import pickle
import copy
import pathlib
import dash
import math
import datetime as dt
import pandas as pd
import pydriller
pydriller.Commit
# Multi-dropdown options
from controls import COUNTIES, WELL_STATUSES, WELL_TYPES, WELL_COLORS
# Create controls
county_options = [
{"label": str(COUNTIES[county]), "value": str(county)} for county in COUNTIES
]
well_status_options = [
{"label": str(WELL_STATUSES[well_status]), "value": str(well_status)}
for well_status in WELL_STATUSES
]
well_type_options = [
{"label": str(WELL_TYPES[well_type]), "value": str(well_type)}
for well_type in WELL_TYPES
]
# Create global chart template
mapbox_access_token = "<KEY>"
layout = dict(
autosize=True,
automargin=True,
margin=dict(l=30, r=30, b=20, t=40),
hovermode="closest",
plot_bgcolor="#F9F9F9",
paper_bgcolor="#F9F9F9",
legend=dict(font=dict(size=10), orientation="h"),
title="Satellite Overview",
mapbox=dict(
accesstoken=mapbox_access_token,
style="light",
center=dict(lon=-78.05, lat=42.54),
zoom=7,
),
)
# Helper functions
def human_format(num):
if num == 0:
return "0"
magnitude = int(math.log(num, 1000))
mantissa = str(int(num / (1000 ** magnitude)))
return mantissa + ["", "K", "M", "G", "T", "P"][magnitude]
def filter_dataframe(df, points, well_statuses, well_types, year_slider):
dff = df[
df["Well_Status"].isin(well_statuses)
& df["Well_Type"].isin(well_types)
& (df["Date_Well_Completed"] > dt.datetime(year_slider[0], 1, 1))
& (df["Date_Well_Completed"] < dt.datetime(year_slider[1], 1, 1))
]
return dff
def produce_individual(api_well_num, points):
try:
points[api_well_num]
except:
return None, None, None, None
index = list(
range(min(points[api_well_num].keys()), max(points[api_well_num].keys()) + 1)
)
gas = []
oil = []
water = []
for year in index:
try:
gas.append(points[api_well_num][year]["Gas Produced, MCF"])
except:
gas.append(0)
try:
oil.append(points[api_well_num][year]["Oil Produced, bbl"])
except:
oil.append(0)
try:
water.append(points[api_well_num][year]["Water Produced, bbl"])
except:
water.append(0)
return index, gas, oil, water
def produce_aggregate(selected, year_slider):
index = list(range(max(year_slider[0], 1985), 2016))
gas = []
oil = []
water = []
for year in index:
count_gas = 0
count_oil = 0
count_water = 0
for api_well_num in selected:
try:
count_gas += points[api_well_num][year]["Gas Produced, MCF"]
except:
pass
try:
count_oil += points[api_well_num][year]["Oil Produced, bbl"]
except:
pass
try:
count_water += points[api_well_num][year]["Water Produced, bbl"]
except:
pass
gas.append(count_gas)
oil.append(count_oil)
water.append(count_water)
return index, gas, oil, water |
#!/usr/bin/python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import data
import time
import word2vec
import tensorflow as tf
import numpy as np
import random
from datetime import datetime
from Logger import Logger
tf.flags.DEFINE_integer("BATCH_SIZE", 50, "Training batch size")
tf.flags.DEFINE_integer("NUM_EPOCHS", 500, "Number of training epochs")
tf.flags.DEFINE_string("DATASET", "TREC", "Dataset to perform training and testing on")
tf.flags.DEFINE_string("REGION_SIZES", "3,4,5", "Region sizes for convolutional layer")
tf.flags.DEFINE_integer("NUM_FILTERS", 100, "Number of filters per region size")
tf.flags.DEFINE_boolean("STATIC_EMBEDDINGS", True, "Word2Vec embeddings will not be fine-tuned during the training")
tf.flags.DEFINE_float("MAX_L2_NORM", 3, "Maximum L2 norm for convolutional layer weights")
tf.flags.DEFINE_float("REG_LAMBDA", 0, "Lambda regularization parameter for fully-connected layer")
tf.flags.DEFINE_float("DROPOUT_PROB", 0.5, "Neuron dropout probability")
tf.flags.DEFINE_float("LEARNING_RATE", 3e-4, "Initial learning rate value")
tf.flags.DEFINE_float("LEARNING_DECAY_RATE", 0.95, "Rate at which learning rate will exponentially decay during the training")
tf.flags.DEFINE_string("MODEL", "CNN_YoonKim", "Neural network model to use")
tf.flags.DEFINE_integer("EVAL_CHECKPOINT", 50, "Evaluate the model every this number of epochs")
tf.flags.DEFINE_boolean("GPU_ALLOW_GROWTH", True, "Only grow memory usage as is needed by the process")
tf.flags.DEFINE_boolean("SAVE", False, "Model will be saved")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
today = datetime.today()
id_string = "{}_{}_{:02}-{:02}-{:02}_{:02}-{:02}-{:02}".format(
FLAGS.DATASET,
"_".join(FLAGS.MODEL.split("_")[1:]),
today.day,
today.month,
int(str(today.year)[-2:]),
today.hour,
today.minute,
today.second
)
logger = Logger(
id_string+".txt",
print_to_stdout=True
)
logger.log("ID: "+id_string)
logger.log("")
logger.log("Hyperparameters:")
for param, value in sorted(FLAGS.__flags.items()):
logger.log(param + ": " + str(value))
logger.log("")
train, test, num_classes, class_dict, max_sentence_length = data.load_dataset(FLAGS.DATASET)
logger.log("Train set size: " + str(len(train)))
logger.log("Test set size: " + str(len(test)))
logger.log("Classes: " + str(num_classes))
logger.log("Max sentence length: " + str(max_sentence_length))
logger.log()
# train data prepare
for i in range(len(train)):
sentence, label = train[i]
word_indices = data.index_and_align(sentence, max_sentence_length, generate_new_vector=True)
train[i]=(word_indices,label)
# test data prepare
for i in range(len(test)):
sentence, label = test[i]
word_indices = data.index_and_align(sentence, max_sentence_length, generate_new_vector=False)
test[i]=(word_indices,label)
config = tf.ConfigProto()
config.gpu_options.allow_growth=FLAGS.GPU_ALLOW_GROWTH
with tf.Session(config=config) as sess, logger:
model_class = data.get_model_class(FLAGS.MODEL)
word2vec.load_embeddings()
neural_network = model_class(
model_name=id_string,
session=sess,
learning_rate=FLAGS.LEARNING_RATE,
learning_decay_rate=FLAGS.LEARNING_DECAY_RATE,
optimizer=tf.train.AdamOptimizer,
filter_sizes=[int(region_size) for region_size in FLAGS.REGION_SIZES.split(",")],
num_filters=FLAGS.NUM_FILTERS,
embeddings=word2vec.embeddings,
new_embeddings=word2vec.new_embeddings,
vocabulary_size=word2vec.vocabulary_size,
static=FLAGS.STATIC_EMBEDDINGS,
max_sentence_length=max_sentence_length,
num_classes=num_classes,
embedding_dim=word2vec.vector_dimension,
max_l2_norm=FLAGS.MAX_L2_NORM,
regularization_lambda=FLAGS.REG_LAMBDA,
dropout_keep_prob=1-FLAGS.DROPOUT_PROB
)
def evaluate():
logger.log("Evaluating...", end=" ")
correct=0
for i in range(len(test)):
indices, label = test[i]
output, predictions = neural_network.feed([indices])
accuracy=label[predictions[0]]
correct+=accuracy
logger.log("Test set accuracy: " + str(correct/len(test)*100) + " %")
start_time = time.time()
batch_indices = data.generate_partitions(len(train), FLAGS.BATCH_SIZE)
try: # allow user to end training using Ctrl+C
for epoch in range(1, FLAGS.NUM_EPOCHS+1):
random.shuffle(train)
avg_loss=0
for start, end in batch_indices:
indices, labels = zip(*train[start:end])
loss = neural_network.train_step(indices, labels)
avg_loss+=loss
avg_loss/=len(batch_indices)
logger.log("Epoch " + str(epoch) + " loss: " + str(avg_loss))
if epoch%FLAGS.EVAL_CHECKPOINT==0:
evaluate()
except KeyboardInterrupt:
pass
end_time=time.time()
training_minutes=int((end_time-start_time)//60)
training_seconds=int((end_time-start_time)-training_minutes*60)
logger.log("Training DONE ({} m {} s).".format(training_minutes, training_seconds))
evaluate()
if FLAGS.SAVE:
data.save_model(neural_network)
|
'''
Bi-Isame-Allah
This script has following networks
0. Seed Net version 1 & 101
1. Seg_net & Seg_net_original (in these n_filters=32)
2. U_net
3. U_net WC
4. ES_net
5. FCN_8s
6. PSP_net (op = 1/8 x ip)
7. Deeplab_v3
8. GCN (ip = 512x512)
9. DAN (op = 1/8 x ip)
'''
import tensorflow as tf
from conv_blocks_1 import SE_ResNet, conv2d_block, SE_ResNet0
from conv_blocks_1 import avg_img_pyramid, max_img_pyramid, dense_skip, dense_skip0, global_dil, PDC, ResNet_block_op, DAM
from conv_blocks_2 import ASPP_v2, ASPP_v3, PSP_module, Red_net_ip, WC, ES, PSP_module, ASPP_v3, ASPP_v2, GCN, BR, PAM, CAM, BAM, CBAM
from layers import MaxPoolingWithArgmax2D, MaxUnpooling2D, MinPooling2D
if tf.__version__ == '2.0.0' or tf.__version__ == '2.2.0' or tf.__version__ == '2.2.0-rc2':
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2, l1
from tensorflow.keras.layers import Input, BatchNormalization, Activation, SpatialDropout2D, PReLU, Lambda, add
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from tensorflow.keras.layers import MaxPooling2D, concatenate
from tensorflow.keras.optimizers import Adam, Nadam, SGD
import tensorflow.keras.backend as K
if tf.__version__ == '1.15.0' or tf.__version__ == '1.13.1':
from keras.models import Model
from keras.regularizers import l2, l1
from keras.layers import Input, BatchNormalization, Activation, SpatialDropout2D, PReLU, Lambda, add
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import MaxPooling2D, concatenate
from keras.optimizers import Adam, Nadam, SGD
import keras.backend as K
def use_customdropout():
use_mydropout = True # 1 for Ture
return use_mydropout # 0 for False
use_mydropout = use_customdropout()
if use_mydropout == True:
from layers import Dropout
elif use_mydropout == False:
if tf.__version__ == '1.15.0' or tf.__version__ == '1.13.1':
from keras.layers import Dropout
if tf.__version__ == '2.2.0' or tf.__version__ == '2.0.0':
from tensorflow.keras.layers import Dropout
'''
For binary segmentation set num_class=2
'''
def num_of_classes():
num_class = 15
return num_class
num_class = num_of_classes()
if num_class == 2:
output_ch = 1
else:
output_ch = num_class
starting_ch = 16 # For networks other than Seed Net
#%%**********************************************SEED_Netv1**********************************************
def SEED_Netv1(input_img, n_filters, dropout, batchnorm = True, activation = 'relu'):
"""Function to define the UNET Model"""
#Making image pyramids for concatinaing at later stages will simply resize the images
avg_pyramid1, avg_pyramid2, avg_pyramid3, avg_pyramid4 = avg_img_pyramid(input_img) #via average pooling
max_pyramid1, max_pyramid2, max_pyramid3, max_pyramid4 = max_img_pyramid(input_img) #via max pooling
# Contracting Path
c1 = SE_ResNet(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
#connecting encoder decoder
#c5 = parallel_dil(c5, n_filters * 16, kernel_size = 3) #with parallet dil
c5 = dense_skip(c5, avg_pyramid4, max_pyramid4, n_filters * 16, kernel_size = 7)#with dense skip
#c5 = PDC(c5, n_filters * 16)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
ds6 = dense_skip(c4, avg_pyramid3, max_pyramid3, n_filters * 8, kernel_size = 9)
u6 = concatenate([u6, ds6])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
ds7 = dense_skip(c3, avg_pyramid2, max_pyramid2, n_filters * 4, kernel_size = 11)
u7 = concatenate([u7, ds7])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
ds8 = dense_skip(c2, avg_pyramid1, max_pyramid1, n_filters * 2, kernel_size = 13)
u8 = concatenate([u8, ds8])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
ds9 = dense_skip0(c1, input_img, n_filters * 1, kernel_size = 15)
u9 = concatenate([u9, ds9])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c_out = ResNet_block_op(c9, output_ch, kernel_size = 3, batchnorm = batchnorm)
outputs = Conv2D(output_ch, (1, 1))(c_out)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%**********************************************SEED_Netv92**********************************************
def SEED_Netv101(input_img, n_filters, dropout, weight_decay=False, batchnorm = True, activation = 'relu'):
"""Function to define the UNET Model"""
if weight_decay == True:
weight_decay = l2(5e-4)
else:
weight_decay = None
c0 = Conv2D(16, kernel_size = (7, 7), strides=(2, 2), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation(activation)(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2, activation = activation)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2, activation = activation)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4, activation = activation)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4, activation = activation)
#p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(c4)
c5 = SE_ResNet(p4, n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8, activation = activation)
c5 = SE_ResNet0(c5, n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8, activation = activation)
#p4 = MaxPooling2D((2, 2))(c4)
p5 = Dropout(dropout)(c5)
c6 = SE_ResNet(p5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
c6 = SE_ResNet0(c6, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1, activation = activation)
#connecting encoder decoder
c6 = PDC(c6, n_filters * 16)
#c6 = PSP_module(c6, n_filters * 16)
#c6 = ASPP_v3(c6, n_filters*16, input_img, downsample_by = 16)
#c6 = ASPP_v2(c6, n_filters*16)
# Expanding Path
dam6 = DAM(c3, c6, n_filters*16, 15, input_img, 8)
u6 = UpSampling2D(interpolation='bilinear')(dam6)
dam7 = DAM(c2, u6, n_filters*8, 15, input_img, 4)
u7 = UpSampling2D(interpolation='bilinear')(dam7)
dam8 = DAM(c1, u7, n_filters*4, 15, input_img, 2)
u8 = UpSampling2D(size=(4,4), interpolation='bilinear')(dam8)
outputs = Conv2D(output_ch, (1, 1), kernel_initializer = 'he_normal')(u8)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def FCN_8s(input_img, n_filters, dropout, kernel=3, batchnorm = True):
# Block 1
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p5 = MaxPooling2D((2, 2))(c5)
p5 = Dropout(dropout)(p5)
# up convolutions and sum
p5u = UpSampling2D(size = (2,2),interpolation='bilinear')(p5)
p5u = Conv2D(n_filters * 8, kernel_size = (1, 1), kernel_initializer = 'he_normal', padding = 'same')(p5u)
sum45 = add([p5u, p4])
sum45 = UpSampling2D(size = (2,2),interpolation='bilinear')(sum45)
sum45 = Conv2D(n_filters * 4, kernel_size = (1, 1), kernel_initializer = 'he_normal', padding = 'same')(sum45)
sum453 = add([sum45, p3])
sum453 = UpSampling2D(size = (2,2),interpolation='bilinear')(sum453)
sum453 = Conv2D(n_filters * 2, kernel_size = (1, 1), kernel_initializer = 'he_normal', padding = 'same')(sum453)
sum4532 = add([sum453, p2])
sum4532 = UpSampling2D(size = (2,2),interpolation='bilinear')(sum4532)
sum4532 = Conv2D(n_filters * 1, kernel_size = (1, 1), kernel_initializer = 'he_normal', padding = 'same')(sum4532)
sum45321 = add([sum4532, p1])
sum45321 = UpSampling2D(size = (2,2),interpolation='bilinear')(sum45321)
outputs = Conv2D(output_ch, 1)(sum45321)#, activation = 'softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Seg_net(input_img, n_filters, dropout, kernel=3, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# encoder
conv_1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_2 = SE_ResNet0(conv_1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
pool_1, mask_1 = MaxPoolingWithArgmax2D((2,2))(conv_2)
conv_3 = SE_ResNet(pool_1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_4 = SE_ResNet0(conv_3, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
pool_2, mask_2 = MaxPoolingWithArgmax2D((2,2))(conv_4)
conv_5 = SE_ResNet(pool_2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_6 = SE_ResNet0(conv_5, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_7 = SE_ResNet0(conv_6, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
pool_3, mask_3 = MaxPoolingWithArgmax2D((2,2))(conv_7)
conv_8 = SE_ResNet(pool_3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_9 = SE_ResNet0(conv_8, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_10 = SE_ResNet0(conv_9, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
pool_4, mask_4 = MaxPoolingWithArgmax2D((2,2))(conv_10)
conv_11 = SE_ResNet(pool_4, n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)#8
conv_12 = SE_ResNet0(conv_11, n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)#8
conv_13 = SE_ResNet0(conv_12, n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)#8
pool_5, mask_5 = MaxPoolingWithArgmax2D((2,2))(conv_13)
# decoder
unpool_1 = MaxUnpooling2D((2,2))([pool_5, mask_5])
conv_14 = SE_ResNet(unpool_1, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_15 = SE_ResNet0(conv_14, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_16 = SE_ResNet0(conv_15, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
unpool_2 = MaxUnpooling2D((2,2))([conv_16, mask_4])
conv_17 = SE_ResNet(unpool_2, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_18 = SE_ResNet0(conv_17, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_19 = SE_ResNet(conv_18, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
unpool_3 = MaxUnpooling2D((2,2))([conv_19, mask_3])
conv_20 = SE_ResNet(unpool_3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_21 = SE_ResNet0(conv_20, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_22 = SE_ResNet(conv_21, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
unpool_4 = MaxUnpooling2D((2,2))([conv_22, mask_2])
conv_23 = SE_ResNet(unpool_4, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_24 = SE_ResNet(conv_23, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
unpool_5 = MaxUnpooling2D((2,2))([conv_24, mask_1])
conv_25 = SE_ResNet(unpool_5, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
conv_26 = Conv2D(output_ch, (1, 1), padding="valid")(conv_25)
outputs = BatchNormalization()(conv_26)
#outputs = Activation('softmax')(outputs)
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def U_net(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Unet_WC(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
ctr = WC(c5, n_filters*16, kernel_size=13)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(ctr)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def ES_net(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
ctr = WC(c5, n_filters*16, kernel_size=13)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(ctr)
c4_es = ES(c4, n_filters * 8)
u6 = concatenate([u6, c4_es])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
c3_es = ES(c3, n_filters * 4)
u7 = concatenate([u7, c3_es])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
c2_es = ES(c2, n_filters * 2)
u8 = concatenate([u8, c2_es])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
c1_es = ES(c1, n_filters * 1)
u9 = concatenate([u9, c1_es])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='softmax
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Pyramid_Unet(input_img, n_filters, dropout, batchnorm = True):
"""Function to define the UNET Model"""
#Making image pyramids for concatinaing at later stages will simply resize the images
avg_pyramid1, avg_pyramid2, avg_pyramid3, avg_pyramid4 = avg_img_pyramid(input_img) #via average pooling
max_pyramid1, max_pyramid2, max_pyramid3, max_pyramid4 = max_img_pyramid(input_img) #via max pooling
# Contracting Path
c1 = SE_ResNet(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#connecting encoder decoder
#c5 = parallel_dil(c5, n_filters * 16, kernel_size = 3) #with parallet dil
c5 = dense_skip(c5, avg_pyramid4, max_pyramid4, n_filters * 16, kernel_size = 7)#with dense skip
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
ds6 = dense_skip(c4, avg_pyramid3, max_pyramid3, n_filters * 8, kernel_size = 9)
u6 = concatenate([u6, ds6])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 1, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
ds7 = dense_skip(c3, avg_pyramid2, max_pyramid2, n_filters * 4, kernel_size = 11)
u7 = concatenate([u7, ds7])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 1, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
ds8 = dense_skip(c2, avg_pyramid1, max_pyramid1, n_filters * 2, kernel_size = 13)
u8 = concatenate([u8, ds8])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 1, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
ds9 = dense_skip0(c1, input_img, n_filters * 1, kernel_size = 15)
u9 = concatenate([u9, ds9])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 1, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, 1)(c9)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def PSP_net(input_img, n_filters, dropout, batchnorm = True):
'''
For this model output is 1/8 of the input size
'''
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
#c4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(c4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
ctr = PSP_module(c5, n_filters*16)
outputs = Conv2D(output_ch, (1, 1))(ctr)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Deeplab_v2(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
#p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(c4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
ctr = ASPP_v2(c5, n_filters*16)
up = Conv2D(output_ch, (1, 1), kernel_initializer = 'he_normal', activation='relu')(ctr)
up = UpSampling2D(size=((8,8)), interpolation='bilinear')(up)#x8 times upsample directly
outputs = Conv2D(output_ch, (1, 1))(up)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Deeplab_v3(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
ctr = ASPP_v3(c5, n_filters*16, input_img, downsample_by = 16)
# Upsampling
up = Conv2D(n_filters*8, (1, 1), kernel_initializer = 'he_normal', activation='relu')(ctr)
up = UpSampling2D(size=((4,4)), interpolation='bilinear')(up)#x4 times upsample
up1 = Conv2D(n_filters, kernel_size = (1, 1), kernel_initializer = 'he_normal', padding = 'same')(c3)
upc = concatenate([up1, up])
up2 = Conv2D(n_filters*4, kernel_size = (3, 3), kernel_initializer = 'he_normal', padding = 'same')(upc)
up2 = UpSampling2D(size=((4,4)), interpolation='bilinear')(up2)#x4 times upsample
outputs = Conv2D(output_ch, (1, 1))(up2)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def GCN_net(input_img, n_filters, dropout, batchnorm = True):
'''
This one is designed for 512x512 input
'''
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p5 = MaxPooling2D((2, 2))(c5)
p5 = Dropout(dropout)(p5)
# Expanding Path
u5 = GCN(p5, output_ch, kernel_size = 15)
u5 = BR(u5, output_ch)
c6 = Conv2DTranspose(output_ch, (3, 3), strides = (2, 2), padding = 'same')(u5)
c6 = Dropout(dropout)(c6)
u4 = GCN(p4, output_ch, kernel_size = 15)
u4 = BR(u4, output_ch)
u4 = add([u4, c6])
u4 = BR(u4, output_ch)
c7 = Conv2DTranspose(output_ch, (3, 3), strides = (2, 2), padding = 'same')(u4)
u3 = GCN(p3, output_ch, kernel_size = 15)
u3 = BR(u3, output_ch)
u3 = add([u3, c7])
u3 = BR(u3, output_ch)
c8 = Conv2DTranspose(output_ch, (3, 3), strides = (2, 2), padding = 'same')(u3)
u2 = GCN(p2, output_ch, kernel_size = 15)
u2 = BR(u2, output_ch)
u2 = add([u2, c8])
u2 = BR(u2, output_ch)
c9 = Conv2DTranspose(output_ch, (3, 3), strides = (2, 2), padding = 'same')(u2)
c10 = BR(c9, output_ch)
c11 = Conv2DTranspose(output_ch, (3, 3), strides = (2, 2), padding = 'same')(c10)
c11 = BR(c11, output_ch)
outputs = Conv2D(output_ch, (1, 1))(c11)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def DAN_net(input_img, n_filters, dropout, batchnorm = True):
'''
For this model output is 1/8 of the input size
'''
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
#c4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(c4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
#Transition
'''
**([c5, n_filters])** For giving more than 1 tensor as an input to lambda layer.
Now this list will be passed on to the function inside lambda layer. We can call the values by normal indexing
operation in order. e.g.
def custom_layer(tensor):
tensor1 = tensor[0]
tensor2 = tensor[1]
return tensor1 + tensor2
'''
ctrp = Lambda(PAM, name="lambda_PAM")(c5)
ctrc = Lambda(CAM, name="lambda_CAM")(c5)
ctr = add([ctrp, ctrc])
outputs = Conv2D(output_ch, (1, 1))(ctr)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def BAM_net(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
c4_es = BAM(c4, n_filters * 8, 3, dil_rate = 4)
u6 = concatenate([u6, c4_es])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
c3_es = BAM(c3, n_filters * 4, 3, dil_rate = 4)
u7 = concatenate([u7, c3_es])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
c2_es = BAM(c2, n_filters * 2, 3, dil_rate = 4)
u8 = concatenate([u8, c2_es])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
c1_es = BAM(c1, n_filters * 1, 3, dil_rate = 4)
u9 = concatenate([u9, c1_es])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def CBAM_net(input_img, n_filters, dropout, batchnorm = True):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(input_img)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
c4_es = CBAM(c4, n_filters * 8)
u6 = concatenate([u6, c4_es])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
c3_es = CBAM(c3, n_filters * 4)
u7 = concatenate([u7, c3_es])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
c2_es = CBAM(c2, n_filters * 2)
u8 = concatenate([u8, c2_es])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
c1_es = CBAM(c1, n_filters * 1)
u9 = concatenate([u9, c1_es])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='softmax'
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def RED_net(input_img, n_filters, dropout, L = 1, batchnorm = True):
loop = L
red_ip = Lambda(Red_net_ip, name="gray_concat")(input_img)
for i in range(loop):
c0 = Conv2D(starting_ch, kernel_size = (7, 7), kernel_initializer = 'he_normal', padding = 'same')(red_ip)
c0 = BatchNormalization()(c0)
c0 = Activation('relu')(c0)
# Contracting Path
c1 = SE_ResNet(c0, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c1 = SE_ResNet0(c1, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = SE_ResNet(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c2 = SE_ResNet0(c2, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = SE_ResNet(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
c3 = SE_ResNet0(c3, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 2)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = SE_ResNet(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
c4 = SE_ResNet0(c4, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 4)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 8)
c5 = Dropout(dropout)(c5)
c5 = SE_ResNet(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
c5 = SE_ResNet0(c5, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
# Expanding Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = SE_ResNet(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = SE_ResNet(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = SE_ResNet(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(dropout)(u9)
c9 = SE_ResNet(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm, dil_rate = 1)
outputs = Conv2D(output_ch, (1, 1))(c9)#, activation='sigmoid'
red_ip = concatenate([input_img, outputs])
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
def Seg_net_original(input_img, n_filters, dropout, kernel=3, batchnorm = True):
# encoder
conv_1 = Conv2D(n_filters, (kernel, kernel), padding="same")(input_img)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Conv2D(n_filters, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D((2,2))(conv_2)
conv_3 = Conv2D(n_filters*2, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Conv2D(n_filters*2, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D((2,2))(conv_4)
conv_5 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D((2,2))(conv_7)
conv_8 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D((2,2))(conv_10)
conv_11 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D((2,2))(conv_13)
# decoder
unpool_1 = MaxUnpooling2D((2,2))([pool_5, mask_5])
conv_14 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D((2,2))([conv_16, mask_4])
conv_17 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Conv2D(n_filters*8, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D((2,2))([conv_19, mask_3])
conv_20 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Conv2D(n_filters*4, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Conv2D(n_filters*2, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D((2,2))([conv_22, mask_2])
conv_23 = Conv2D(n_filters*2, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Conv2D(n_filters, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D((2,2))([conv_24, mask_1])
conv_25 = Conv2D(n_filters, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Conv2D(output_ch, (1, 1), padding="valid")(conv_25)
outputs = BatchNormalization()(conv_26)
#outputs = Activation('softmax')(conv_26)
model = Model(inputs=[input_img], outputs=[outputs])
return model
|
<filename>App.py
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray
import sys
import numpy as np
from typing import Callable
from numbers import Number
def process_image(
input_image: np.array,
kernel_size: int,
kernel_fn: Callable[[np.array], float]) -> np.array:
padding_width: int = kernel_size // 2
padding_height: int = kernel_size // 2
padding = ((padding_height, padding_height), (padding_width, padding_width))
input_image_padding: np.array = np.pad(
array=input_image,
pad_width=padding,
mode='edge')
result_image: np.array = np.zeros(input_image.shape, dtype='float')
image_height, image_width = result_image.shape
for image_x in range(image_width):
for image_y in range(image_height):
x_pos_begin = image_x
x_pos_end = image_x + kernel_size
y_pos_begin = image_y
y_pos_end = image_y + kernel_size
image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end]
result_image[image_y][image_x] = kernel_fn(image_segment)
return result_image
def mean_fn(
image_segment: np.array) -> float:
return float(np.mean(image_segment))
def std_fn(
image_segment: np.array) -> float:
return float(np.std(image_segment))
def convert_to_binary(
input_image: np.array,
threshold: int = 127) -> np.array:
max_val: int = 255
min_val: int = 0
initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)
final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)
return final_conv
def normalize_image(
input_image: np.array) -> np.array:
result_image: np.array = np.zeros(input_image.shape)
input_max = input_image.max()
input_min = input_image.min()
input_range = input_max - input_min
height, width = input_image.shape
for y in range(height):
for x in range(width):
input_value = input_image[y][x]
scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0
result_image[y][x] = scaled_input_value * 255.0
return result_image
def fill_image(
input_image: np.array,
value: Number,
replace_value: Number):
height, width = input_image.shape
for y in range(height):
for x in range(width):
if input_image[y, x] == value:
input_image[y, x] = replace_value
def mark_objects(
input_image: np.array) -> np.array:
result_image: np.array = np.copy(input_image)
current_object_id = 1
height, width = input_image.shape
for y in range(height):
for x in range(width):
if y == 0:
c = 0
else:
c = result_image[y - 1, x]
if x == 0:
b = 0
else:
b = result_image[y, x - 1]
a = result_image[y, x]
if a == 0:
pass
elif b == 0 and c == 0:
current_object_id += 1
result_image[y, x] = current_object_id
elif b != 0 and c == 0:
result_image[y, x] = b
elif b == 0 and c != 0:
result_image[y, x] = c
elif b != 0 and c != 0:
if b == c:
result_image[y, x] = b
else:
result_image[y, x] = b
fill_image(
input_image=result_image,
value=c,
replace_value=b)
return result_image
def delete_objects(
input_image: np.array,
object_size: int):
unique_mask, hist = np.unique(input_image, return_counts=True)
for i in range(1, len(unique_mask)):
if hist[i] < object_size:
for (y, x), _ in np.ndenumerate(input_image):
if input_image[y, x] == unique_mask[i]:
input_image[y, x] = 0
class Ui(QtWidgets.QMainWindow):
def __init__(self):
super(Ui, self).__init__()
uic.loadUi('Main.ui', self)
self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')
self.action_open.triggered.connect(self.action_open_triggered)
self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')
self.action_exit.triggered.connect(self.action_exit_triggered)
self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')
self.bt_apply.clicked.connect(self.bt_apply_pressed)
self.input_image_canvas = QtWidgets.QLabel()
self.input_image_canvas.setBackgroundRole(QPalette.Base)
self.input_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.input_image_canvas.setScaledContents(True)
self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')
self.sa_input_image.setWidget(self.input_image_canvas)
self.sa_input_image.setWidgetResizable(False)
self.processed_image_canvas = QtWidgets.QLabel()
self.processed_image_canvas.setBackgroundRole(QPalette.Base)
self.processed_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.processed_image_canvas.setScaledContents(True)
self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')
self.sa_processed_image.setWidget(self.processed_image_canvas)
self.sa_processed_image.setWidgetResizable(False)
self.mask_image_canvas = QtWidgets.QLabel()
self.mask_image_canvas.setBackgroundRole(QPalette.Base)
self.mask_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.mask_image_canvas.setScaledContents(True)
self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')
self.sa_mask_image.setWidget(self.mask_image_canvas)
self.sa_mask_image.setWidgetResizable(False)
self.segmented_image_canvas = QtWidgets.QLabel()
self.segmented_image_canvas.setBackgroundRole(QPalette.Base)
self.segmented_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.segmented_image_canvas.setScaledContents(True)
self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')
self.sa_segmented_image.setWidget(self.segmented_image_canvas)
self.sa_segmented_image.setWidgetResizable(False)
self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')
self.cb_method.addItems(['Mean', 'Std'])
self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')
self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')
self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')
self.show()
def action_open_triggered(self):
options = QtWidgets.QFileDialog.Options()
file_name, _ = QtWidgets.QFileDialog.\
getOpenFileName(self,
'QFileDialog.getOpenFileName()',
'',
'Images (*.png *.jpeg *.jpg *.bmp *.gif)',
options=options)
if file_name:
image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)
if image.isNull():
QtWidgets.QMessageBox.\
information(self,
"Texture segmentation",
"Cannot load %s." % file_name)
return
self.input_image_canvas.setPixmap(QPixmap.fromImage(image))
self.input_image_canvas.adjustSize()
def action_exit_triggered(self):
self.close()
def bt_apply_pressed(self):
method = self.cb_method.currentIndex()
kernel_size = int(self.le_kernel_size.text())
threshold = int(self.le_threshold.text())
object_size = int(self.le_delete_objects.text())
input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)
input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')
for (y, x), _ in np.ndenumerate(input_image):
input_image[y, x] = qGray(input_q_image.pixel(x, y))
if method == 0:
kernel_fn = mean_fn
elif method == 1:
kernel_fn = std_fn
else:
return
processed_image: np.array = process_image(
input_image=input_image,
kernel_size=kernel_size,
kernel_fn=kernel_fn)
normalized_image: np.array = normalize_image(input_image=processed_image)
binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)
marked_image = mark_objects(input_image=binarized_image)
delete_objects(
input_image=marked_image,
object_size=object_size)
segmented_image = np.copy(input_image)
for (y, x), _ in np.ndenumerate(segmented_image):
if marked_image[y, x] == 0:
segmented_image[y, x] = 0
self.set_image(
input_image=normalized_image,
canvas=self.processed_image_canvas)
self.set_image(
input_image=normalize_image(
input_image=marked_image),
canvas=self.mask_image_canvas)
self.set_image(
input_image=segmented_image,
canvas=self.segmented_image_canvas)
@staticmethod
def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):
height, width = input_image.shape
q_image = QImage(width, height, QImage.Format_RGB32)
for y in range(height):
for x in range(width):
pixel = int(input_image[y, x])
q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))
canvas.setPixmap(QPixmap.fromImage(q_image))
canvas.adjustSize()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_()
|
# -*- coding: utf-8 -*-
"""
awsecommerceservice
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class ItemSearchRequest(object):
"""Implementation of the 'ItemSearchRequest' model.
TODO: type model description here.
Attributes:
actor (string): TODO: type description here.
artist (string): TODO: type description here.
availability (AvailabilityEnum): TODO: type description here.
audience_rating (list of AudienceRatingEnum): TODO: type description
here.
author (string): TODO: type description here.
brand (string): TODO: type description here.
browse_node (string): TODO: type description here.
composer (string): TODO: type description here.
condition (ConditionEnum): TODO: type description here.
conductor (string): TODO: type description here.
director (string): TODO: type description here.
item_page (int): TODO: type description here.
keywords (string): TODO: type description here.
manufacturer (string): TODO: type description here.
maximum_price (int): TODO: type description here.
merchant_id (string): TODO: type description here.
minimum_price (int): TODO: type description here.
min_percentage_off (int): TODO: type description here.
music_label (string): TODO: type description here.
orchestra (string): TODO: type description here.
power (string): TODO: type description here.
publisher (string): TODO: type description here.
related_item_page (object): TODO: type description here.
relationship_type (list of string): TODO: type description here.
response_group (list of string): TODO: type description here.
search_index (string): TODO: type description here.
sort (string): TODO: type description here.
title (string): TODO: type description here.
release_date (string): TODO: type description here.
include_reviews_summary (string): TODO: type description here.
truncate_reviews_at (int): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"actor":'Actor',
"artist":'Artist',
"availability":'Availability',
"audience_rating":'AudienceRating',
"author":'Author',
"brand":'Brand',
"browse_node":'BrowseNode',
"composer":'Composer',
"condition":'Condition',
"conductor":'Conductor',
"director":'Director',
"item_page":'ItemPage',
"keywords":'Keywords',
"manufacturer":'Manufacturer',
"maximum_price":'MaximumPrice',
"merchant_id":'MerchantId',
"minimum_price":'MinimumPrice',
"min_percentage_off":'MinPercentageOff',
"music_label":'MusicLabel',
"orchestra":'Orchestra',
"power":'Power',
"publisher":'Publisher',
"related_item_page":'RelatedItemPage',
"relationship_type":'RelationshipType',
"response_group":'ResponseGroup',
"search_index":'SearchIndex',
"sort":'Sort',
"title":'Title',
"release_date":'ReleaseDate',
"include_reviews_summary":'IncludeReviewsSummary',
"truncate_reviews_at":'TruncateReviewsAt'
}
def __init__(self,
actor=None,
artist=None,
availability=None,
audience_rating=None,
author=None,
brand=None,
browse_node=None,
composer=None,
condition=None,
conductor=None,
director=None,
item_page=None,
keywords=None,
manufacturer=None,
maximum_price=None,
merchant_id=None,
minimum_price=None,
min_percentage_off=None,
music_label=None,
orchestra=None,
power=None,
publisher=None,
related_item_page=None,
relationship_type=None,
response_group=None,
search_index=None,
sort=None,
title=None,
release_date=None,
include_reviews_summary=None,
truncate_reviews_at=None):
"""Constructor for the ItemSearchRequest class"""
# Initialize members of the class
self.actor = actor
self.artist = artist
self.availability = availability
self.audience_rating = audience_rating
self.author = author
self.brand = brand
self.browse_node = browse_node
self.composer = composer
self.condition = condition
self.conductor = conductor
self.director = director
self.item_page = item_page
self.keywords = keywords
self.manufacturer = manufacturer
self.maximum_price = maximum_price
self.merchant_id = merchant_id
self.minimum_price = minimum_price
self.min_percentage_off = min_percentage_off
self.music_label = music_label
self.orchestra = orchestra
self.power = power
self.publisher = publisher
self.related_item_page = related_item_page
self.relationship_type = relationship_type
self.response_group = response_group
self.search_index = search_index
self.sort = sort
self.title = title
self.release_date = release_date
self.include_reviews_summary = include_reviews_summary
self.truncate_reviews_at = truncate_reviews_at
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
actor = dictionary.get('Actor')
artist = dictionary.get('Artist')
availability = dictionary.get('Availability')
audience_rating = dictionary.get('AudienceRating')
author = dictionary.get('Author')
brand = dictionary.get('Brand')
browse_node = dictionary.get('BrowseNode')
composer = dictionary.get('Composer')
condition = dictionary.get('Condition')
conductor = dictionary.get('Conductor')
director = dictionary.get('Director')
item_page = dictionary.get('ItemPage')
keywords = dictionary.get('Keywords')
manufacturer = dictionary.get('Manufacturer')
maximum_price = dictionary.get('MaximumPrice')
merchant_id = dictionary.get('MerchantId')
minimum_price = dictionary.get('MinimumPrice')
min_percentage_off = dictionary.get('MinPercentageOff')
music_label = dictionary.get('MusicLabel')
orchestra = dictionary.get('Orchestra')
power = dictionary.get('Power')
publisher = dictionary.get('Publisher')
related_item_page = dictionary.get('RelatedItemPage')
relationship_type = dictionary.get('RelationshipType')
response_group = dictionary.get('ResponseGroup')
search_index = dictionary.get('SearchIndex')
sort = dictionary.get('Sort')
title = dictionary.get('Title')
release_date = dictionary.get('ReleaseDate')
include_reviews_summary = dictionary.get('IncludeReviewsSummary')
truncate_reviews_at = dictionary.get('TruncateReviewsAt')
# Return an object of this model
return cls(actor,
artist,
availability,
audience_rating,
author,
brand,
browse_node,
composer,
condition,
conductor,
director,
item_page,
keywords,
manufacturer,
maximum_price,
merchant_id,
minimum_price,
min_percentage_off,
music_label,
orchestra,
power,
publisher,
related_item_page,
relationship_type,
response_group,
search_index,
sort,
title,
release_date,
include_reviews_summary,
truncate_reviews_at)
|
from student import *
"""
需求:
1.能够存储数据
数据文件(student.data),数据格式:list
2. 功能系统
增删改查,显示全部,保存数据
"""
class ManagerSystem(object):
"""
功能系统循环使用,用户输入不同序号执行不同功能。
"""
# 初始化
def __init__(self):
# 存储数据的列表
self.student_list = []
# 一、入口函数,启动程序后执行的函数
def run(self):
"""
加载数据
显示功能菜单
用户输入功能序号
根据序号执行不同功能
"""
# 加载学员信息
self.load_students()
while True:
# 显示功能菜单
self.show_menu()
# 用户输入功能序号
menu_num = int(input("用户输入功能序号:"))
# 根据序号执行功能
if menu_num == 1:
self.add_student()
elif menu_num == 2:
self.del_student()
elif menu_num == 3:
self.modify_student()
elif menu_num == 4:
self.search_student()
elif menu_num == 5:
self.show_student()
elif menu_num == 6:
self.save_student()
elif menu_num == 7:
break
# 二、功能函数
# 2.1显示功能菜单 --打印功能和序号对应关系,不涉及对象和对象数据的使用
# 不需要每个对象有自己的显示方式, --静态以节省资源
@staticmethod
def show_menu():
print("-" * 20)
print("请选择如下功能:")
print("1:添加学员")
print("2:删除信息")
print("3:修改信息")
print("4:查询信息")
print("5:显示所有信息")
print("6:保存信息")
print("7:退出")
print("-" * 20)
# 2.2添加学员
def add_student(self):
# 1.用户输入姓名、性别、电话
name = input("请输入姓名:")
gender = input("性别:")
tel = input("电话:")
# 2.创建学员对象
student = Student(name, gender, tel)
# 3.将该对象添加到学员列表
self.student_list.append(student)
print(self.student_list)
print(student)
# 2.3删除学员
def del_student(self):
# 1.用户属于要删除的学员姓名
del_name = input('请输入要删除的学员姓名:')
# 2.如果学名姓名存在,则删除,不存在,则提示无此人
for i in self.student_list:
if del_name == i.name:
self.student_list.remove(i)
break
# 循环代码正常执行完毕,说明没有删除的对象
else:
print("查无此人")
# 打印学员列表,验证删除功能
print(self.student_list)
# 2.4修改学员信息
def modify_student(self):
# 1.用户输入学名姓名
modify_name = input('请输入需要修改的学员姓名:')
# 2.遍历列表,如果存在需要修改的学员则修改姓名、性别和电话,不存在提示信息
for i in self.student_list:
if i.name == modify_name:
i.name = input('name:')
i.gender = input('gender:')
i.tel = input('tel:')
print('学员信息修改完毕。姓名:{},性别:{},电话:{}'.format(i.name, i.gender, i.tel))
break
else:
print('学员不存在')
# 2.5查询信息
def search_student(self):
# 1.用户输入学员信息
search_name = input('请输入查询的学员姓名:')
# 2.遍历学员列表,存在则按格式显示信息,不存在提示并退出
for i in self.student_list:
if search_name == i.name:
print('name:{}, gender:{}, tel:{}'.format(i.name, i.gender, i.tel))
break
else:
print('学员不存在')
# 2.6显示所有学员信息
def show_student(self):
# 1.打印表头
print('姓名\t性别\t电话')
# 2.遍历打印学员信息
for i in self.student_list:
print('{}\t{}\t{}'.format(i.name, i.gender, i.tel))
# 2.7保存信息
def save_student(self):
# 1.打开文件
data_file = open('student.data', 'w')
# 2.写入学员信息
# 2.1学员对象转换成字典
new_list = [i.__dict__ for i in self.student_list]
# 2.2文件写入必须是字符串格式
data_file.write(str(new_list))
# 3.关闭文件
data_file.close()
# 2.8加载学员信息
def load_students(self):
# 1.打开文件:尝试r模式打开学员信息文件,如有报错则新建w
try:
f = open('student.data', 'r')
except:
f = open('student.data', 'w')
# 2.读取文件:格式转换,先将字符串转换为字典,再将字典转换为对象
else:
data = f.read()
new_list = eval(data)
self.student_list = [Student(i['name'], i['gender'], i['tel']) for i in new_list]
# 3.关闭文件
finally:
f.close()
|
# from itertools import chain, islice
import abc
from kipoiseq.extractors import CDSFetcher, UTRFetcher
from kipoiseq.dataclasses import Interval, Variant
from kipoiseq.transforms.functional import translate
from kipoiseq.extractors.multi_interval import (
GenericMultiIntervalSeqExtractor,
BaseMultiIntervalVCFSeqExtractor,
SingleVariantExtractorMixin,
SingleSeqExtractorMixin,
)
from kipoiseq.extractors.fasta import FastaStringExtractor
from kipoiseq.extractors.vcf import MultiSampleVCF
from kipoiseq.extractors.vcf_matching import SingleVariantMatcher
from typing import List, Union
import logging
log = logging.getLogger(__name__)
__all__ = [
"cut_transcript_seq",
"UTRSeqExtractor",
"TranscriptSeqExtractor",
"ProteinSeqExtractor",
"TranscriptVCFSeqExtractor",
"ProteinVCFSeqExtractor",
"SingleSeqProteinVCFSeqExtractor",
"SingleVariantProteinVCFSeqExtractor",
]
class UTRSeqExtractor(GenericMultiIntervalSeqExtractor):
def __init__(
self,
gtf_file,
fasta_file,
feature_type="5UTR",
infer_from_cds=False,
on_error_warn=True,
):
"""
Reference sequence extractor for UTR's
:param fasta_file: fasta file for reference sequence input
:param gtf_file: path to the GTF file
:param feature_type: type of the feature that will be filtered for. In general '5UTR' or '3UTR'.
:param infer_from_cds: Substract the CDS from the exon regions to infer the UTR regions.
Will use 'feature_type' to decide whether '5UTR' or '3UTR' should be returned.
:param on_error_warn: print warning instead of throwing an error
"""
self.fasta_file = str(fasta_file)
self.gtf_file = str(gtf_file)
utr_fetcher = UTRFetcher(self.gtf_file, feature_type=feature_type, infer_from_cds=infer_from_cds,
on_error_warn=on_error_warn)
extractor = FastaStringExtractor(self.fasta_file, use_strand=False)
super().__init__(
extractor=extractor,
interval_fetcher=utr_fetcher
)
@property
def df(self):
return self.extractor.df
def cut_transcript_seq(seq: str, tag: str):
"""
Some of the sequences contain length % 3 != 0, because they have ambiguous
start and/or end. If this is the case, they should be cut until length % 3 == 0
There are sequences which have both ambiguous start and end => no solution yet
:param seq: dna sequences of the current protein
:param tag: tags, which contain information about ambiguous start and/or end
:return: correct dna sequence with length % 3 == 0
if ambiguous start and end or no tags provided, but the sequence has length % 3 != 0
seq = 'NNN'
"""
# if not tag:
# return seq
if "cds_end_NF" in tag and "cds_start_NF" not in tag:
# remove suffix
seq_modulo = len(seq) % 3
if seq_modulo != 0:
seq = seq[:-seq_modulo]
if seq[-3:] in ["TAA", "TAG", "TGA"]:
seq = seq[:-3]
elif "cds_end_NF" not in tag and "cds_start_NF" in tag and len(seq) % 3 != 0:
# remove prefix
seq_modulo = len(seq) % 3
if seq_modulo != 0:
seq = seq[seq_modulo:]
seq = "XXX" + seq
elif "cds_end_NF" in tag and "cds_start_NF" in tag:
log.warning("Ambiguous start and end! Skip seq!")
seq = "NNN" # NNN will be translated as empty string
elif "cds_end_NF" not in tag and "cds_start_NF" not in tag and len(seq) % 3 != 0:
log.warning("No tags for ambiguous start and end, but len % 3 != 0. Skip seq!")
seq = "NNN" # NNN will be translated as empty string
return seq
# TODO: documentation
class TranscriptSeqExtractor(GenericMultiIntervalSeqExtractor):
def __init__(self, gtf_file, fasta_file):
self.fasta_file = str(fasta_file)
self.gtf_file = str(gtf_file)
cds_fetcher = CDSFetcher(self.gtf_file)
extractor = FastaStringExtractor(self.fasta_file, use_strand=False)
super().__init__(
extractor=extractor,
interval_fetcher=cds_fetcher
)
@property
def df(self):
return self.extractor.df
@property
def cds(self):
return self.extractor.df
@classmethod
def _prepare_seq(
cls,
seqs: List[str],
intervals: List[Interval],
reverse_complement: Union[str, bool],
# **kwargs
) -> str:
"""
Prepare the dna sequence in the final variant, which should be
translated in amino acid sequence
:param seqs: current dna sequence
:param intervals: the list of intervals corresponding to the sequence snippets
:param reverse_complement: should the dna be reverse-complemented?
:return: prepared dna sequence ready for translation into amino acid sequence
"""
seq = super()._prepare_seq(
seqs=seqs,
intervals=intervals,
reverse_complement=reverse_complement,
)
tag = intervals[0].attrs["tag"]
seq = cut_transcript_seq(seq, tag)
return seq
def get_protein_seq(self, transcript_id: str):
"""
Extract amino acid sequence for given transcript_id
:param transcript_id:
:return: amino acid sequence
"""
return translate(self.get_seq(transcript_id), hg38=True)
class ProteinSeqExtractor(TranscriptSeqExtractor):
@classmethod
def _prepare_seq(cls, *args, **kwargs):
"""
Prepare the dna sequence and translate it into amino acid sequence
:param seqs: current dna sequence
:param intervals: the list of intervals corresponding to the sequence snippets
:param reverse_complement: should the dna be reverse-complemented?
:return: amino acid sequence
"""
return translate(super()._prepare_seq(*args, **kwargs), hg38=True)
class TranscriptVCFSeqExtractor(BaseMultiIntervalVCFSeqExtractor, metaclass=abc.ABCMeta):
@classmethod
def _prepare_seq(
cls,
seqs: List[str],
intervals: List[Interval],
reverse_complement: Union[str, bool],
# **kwargs
) -> str:
"""
Prepare the dna sequence in the final variant, which should be
translated in amino acid sequence
:param seqs: current dna sequence
:param intervals: the list of intervals corresponding to the sequence snippets
:param reverse_complement: should the dna be reverse-complemented?
:return: prepared dna sequence ready for translation into amino acid sequence
"""
seq = super()._prepare_seq(
seqs=seqs,
intervals=intervals,
reverse_complement=reverse_complement,
)
tag = intervals[0].attrs["tag"]
seq = cut_transcript_seq(seq, tag)
return seq
class ProteinVCFSeqExtractor(TranscriptVCFSeqExtractor, metaclass=abc.ABCMeta):
def __init__(self, gtf_file, fasta_file, vcf_file):
self.gtf_file = str(gtf_file)
self.fasta_file = str(fasta_file)
self.vcf_file = str(vcf_file)
cds_fetcher = CDSFetcher(self.gtf_file)
self.cds = cds_fetcher.df
reference_seq = FastaStringExtractor(self.fasta_file)
multi_sample_VCF = MultiSampleVCF(self.vcf_file)
# dataframe to pyranges
import pyranges
# match variant with transcript_id
variant_matcher = SingleVariantMatcher(
self.vcf_file,
pranges=pyranges.PyRanges(cds_fetcher.df.reset_index())
)
super().__init__(
interval_fetcher=cds_fetcher,
reference_seq_extractor=reference_seq,
variant_matcher=variant_matcher,
multi_sample_VCF=multi_sample_VCF,
)
@classmethod
def _prepare_seq(cls, *args, **kwargs):
"""
Prepare the dna sequence and translate it into amino acid sequence
:param seqs: current dna sequence
:param intervals: the list of intervals corresponding to the sequence snippets
:param reverse_complement: should the dna be reverse-complemented?
:return: amino acid sequence
"""
return translate(super()._prepare_seq(*args, **kwargs), hg38=True)
def _filter_snv(self, variants):
for variant in variants:
if len(variant.ref) == len(variant.alt) == 1: # only SOVs supported
yield variant
elif len(variant.ref) == len(variant.alt) > 1:
log.warning('Current version of extractor works only for len(variant.ref)'
' == len(variant.alt) == 1, but the len was: ' + str(len(variant.alt)))
else:
log.warning('Current version of extractor ignores indel'
' to avoid shift in frame')
class SingleSeqProteinVCFSeqExtractor(SingleSeqExtractorMixin, ProteinVCFSeqExtractor):
pass
class SingleVariantProteinVCFSeqExtractor(SingleVariantExtractorMixin, ProteinVCFSeqExtractor):
pass
|
import scapy
from scapy import config as scapy_conf
from scapy import arch as scapy_arch
from scapy.layers import l2, inet, dhcp
from scapy import sendrecv
import codecs
import concurrent.futures
import asyncio
import logging
import threading
from utils import waiter
# This conf is needed to make dhcp requests, so that responses
# will be not be checked against our real ip address
scapy_conf.conf.checkIPaddr = False
class DHCPRequestor(object):
''' The purpose of this class is to deal with sysadmins that does not
give me access to dhcp server, and i need to guess VM ip in advance to return
to the client what would be ip address of the vm after it will be created,
so we take vm mac address and its name and send dhcp request.. nasty stuff
'''
def __init__(self, net_iface, loop, verbose=False, dhcp_timeout_sec=10):
self._loop = loop
self._net_iface = net_iface
self._real_mac = scapy_arch.get_if_hwaddr(self._net_iface)
self._thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
self._dhcp_timeout_sec = dhcp_timeout_sec
self._verbose = verbose
@staticmethod
def _dhcp_reply_info(dhcp_reply):
bootp = dhcp_reply.getlayer('BOOTP')
options_list = bootp.payload.getfieldval('options')
result = {"ip" : bootp.yiaddr}
for option in options_list:
if type(option) is tuple:
result[option[0]] = option[1]
return result
def _dhcp_request(self, mac_raw, requested_ip, xid_cookie=0, server_id="0.0.0.0", timeout_sec=10):
logging.debug(f"Sending dhcp request for {requested_ip} cookie {xid_cookie} server id {server_id} net {self._net_iface}")
broadcast_flag = scapy.fields.FlagValue(0b1000000000000000, "???????????????B")
dhcp_options = [("message-type", "request")]
if server_id is not None:
dhcp_options.append(("server_id", server_id))
dhcp_options.extend([("requested_addr", requested_ip), ("param_req_list", 0), "end"])
dhcp_request = l2.Ether(src=self._real_mac, dst="ff:ff:ff:ff:ff:ff") / \
inet.IP(src="0.0.0.0", dst="255.255.255.255") / \
inet.UDP(sport=68, dport=67) / \
dhcp.BOOTP(chaddr=mac_raw, xid=xid_cookie, flags=broadcast_flag) / \
dhcp.DHCP(options=dhcp_options)
# send request, wait for ack
dhcp_reply = sendrecv.srp1(dhcp_request, iface=self._net_iface, verbose=self._verbose, timeout=timeout_sec)
if dhcp_reply is None:
raise TimeoutError(f"DHCP request timeout on net {self._net_iface}")
reply = DHCPRequestor._dhcp_reply_info(dhcp_reply)
if dhcp.DHCPTypes[reply['message-type']] != 'ack':
raise Exception("Failed to get ack %s" % reply)
return reply
@staticmethod
def _server_id_from_offer(dhcp_reply):
# Last option is the "end" option it is not a tuple
reply_options = dhcp_reply.payload.fields['options'][:-1]
options_dict = {option[0] : option[1] for option in reply_options}
return options_dict['server_id']
def _do_request_lease(self, mac_address, ip=None, timeout_sec=10):
logging.debug(f"Requesting lease for mac {mac_address} ip {ip} iface {self._net_iface}")
mac_raw = codecs.decode(mac_address.replace(':', ''), 'hex')
if ip is None:
broadcast_flag = scapy.fields.FlagValue(0b1000000000000000, "???????????????B")
dhcp_discover = l2.Ether(src=self._real_mac, dst='ff:ff:ff:ff:ff:ff') / \
inet.IP(src='0.0.0.0', dst='255.255.255.255') / \
inet.UDP(dport=67, sport=68) / \
dhcp.BOOTP(chaddr=mac_raw, xid=scapy.volatile.RandInt(), flags=broadcast_flag) / dhcp.DHCP(options=[('message-type', 'discover'), 'end'])
dhcp_offer = sendrecv.srp1(dhcp_discover, iface=self._net_iface, verbose=self._verbose, timeout=timeout_sec)
if dhcp_offer is None:
raise TimeoutError(f"Timeout. failed to get offer for mac {mac_address} iface: {self._net_iface}")
ip = dhcp_offer[dhcp.BOOTP].yiaddr
server_id = DHCPRequestor._server_id_from_offer(dhcp_offer[dhcp.BOOTP])
xid_cookie = dhcp_offer[dhcp.BOOTP].xid
else:
server_id = None
xid_cookie = 0
return self._dhcp_request(mac_raw, ip, xid_cookie, server_id, timeout_sec=timeout_sec)
def _request_lease(self, mac_address, ip=None):
dhcp_operation_timeout_sec = 3
return waiter.wait_for_predicate_nothrow(lambda: self._do_request_lease(mac_address, ip=ip, timeout_sec=dhcp_operation_timeout_sec),
timeout=self._dhcp_timeout_sec, exception_cls=TimeoutError)
async def request_lease(self, mac, ip=None):
lease_info = await self._loop.run_in_executor(self._thread_pool, lambda: self._request_lease(mac, ip))
return lease_info['ip']
async def release_lease(self, mac):
pass
class LibvirtDHCPAllocator(object):
def __init__(self, loop, libivrt_wrapper, network_name):
self._libvirt = libivrt_wrapper
self._net_name = network_name
self._thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
self._loop = loop
self._ip_allocation_lock = threading.Lock()
def _allocate_ip_sync(self, mac, ip=None):
with self._ip_allocation_lock:
dhcp_info = self._libvirt.get_network_dhcp_info(self._net_name)
# Check if there are no free ips .. raise
if len(dhcp_info['hosts']) == 0:
raise Exception(f'IP range for network {self._net_name} is empty')
# If we are with ip check that it is in the range
if ip is not None:
if ip not in dhcp_info['hosts']:
raise Exception(f"Requested ip {ip} not in dhcp range {dhcp_info['hosts']}")
ip_candidate = ip
else:
ip_candidate = dhcp_info['hosts'].pop()
logging.debug(f"Requesting least mac {mac} ip {ip_candidate}")
self._libvirt.add_dhcp_entry(self._net_name, ip_candidate, mac)
return str(ip_candidate)
async def request_lease(self, mac, ip=None):
lease_info = await self._loop.run_in_executor(self._thread_pool, lambda: self._allocate_ip_sync(mac, ip))
return lease_info
async def release_lease(self, mac):
await self._loop.run_in_executor(self._thread_pool, lambda: self._libvirt.remove_dhcp_entry(self._net_name, mac))
class DHCPManager(object):
def __init__(self, handlers):
self._handlers = handlers
async def allocate_ip(self, net_info):
net_type = net_info['mode']
logging.debug(f"Allocating ip for net {net_info}")
mac = net_info['macaddress']
ip = net_info.get('ip', None)
return await self._handlers[net_type].request_lease(mac, ip)
async def deallocate_ip(self, net_info):
net_type = net_info['mode']
logging.debug(f"Releasig lease for net {net_info}")
mac = net_info['macaddress']
return await self._handlers[net_type].release_lease(mac)
async def reallocate_ip(self, net_info):
logging.debug(f"Reallocate ip net {net_info}")
try:
await self.deallocate_ip(net_info)
except:
# we dont care if deallocate ip had failed we just do it to make sure it is released
pass
await self.allocate_ip(net_info)
if __name__ == '__main__':
import argparse
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
parser = argparse.ArgumentParser()
parser.add_argument("--iface", help="Name of the interface")
parser.add_argument("--ip", help="IP to ask", required=False, default=None)
parser.add_argument("mac", help="Mac address to request")
args = parser.parse_args()
loop = asyncio.get_event_loop()
client = DHCPRequestor(args.iface, loop)
print(loop.run_until_complete(client.request_lease(args.mac, args.ip)))
|
<filename>ironic_inspector/pxe_filter/iptables.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from ironic_inspector.common import ironic as ir_utils
from ironic_inspector import node_cache
from ironic_inspector.pxe_filter import base as pxe_filter
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_EMAC_REGEX = 'EMAC=([0-9a-f]{2}(:[0-9a-f]{2}){5}) IMAC=.*'
def _should_enable_dhcp():
"""Check whether we should enable DHCP at all.
We won't even open our DHCP if no nodes are on introspection and
node_not_found_hook is not set.
"""
return (node_cache.introspection_active() or
CONF.processing.node_not_found_hook is not None)
class IptablesFilter(pxe_filter.BaseFilter):
"""A PXE boot filtering interface implementation."""
def __init__(self):
super(IptablesFilter, self).__init__()
self.blacklist_cache = None
self.enabled = True
self.interface = CONF.iptables.dnsmasq_interface
self.chain = CONF.iptables.firewall_chain
self.new_chain = self.chain + '_temp'
# Determine arguments used for pxe filtering, we only support 4 and 6
# at this time.
if CONF.iptables.ip_version == '4':
self._cmd_iptables = 'iptables'
self._dhcp_port = '67'
else:
self._cmd_iptables = 'ip6tables'
self._dhcp_port = '547'
self.base_command = ('sudo', 'ironic-inspector-rootwrap',
CONF.rootwrap_config, self._cmd_iptables)
def reset(self):
self.enabled = True
self.blacklist_cache = None
for chain in (self.chain, self.new_chain):
try:
self._clean_up(chain)
except Exception as e:
LOG.exception('Encountered exception resetting filter: %s', e)
super(IptablesFilter, self).reset()
@pxe_filter.locked_driver_event(pxe_filter.Events.initialize)
def init_filter(self):
# -w flag makes iptables wait for xtables lock, but it's not supported
# everywhere yet
try:
cmd = self.base_command + ('-w', '-h')
processutils.execute(*cmd)
except processutils.ProcessExecutionError:
LOG.warning('iptables does not support -w flag, please update '
'it to at least version 1.4.21')
else:
self.base_command += ('-w',)
self._clean_up(self.chain)
# Not really needed, but helps to validate that we have access to
# iptables
self._iptables('-N', self.chain)
LOG.debug('The iptables filter was initialized')
@pxe_filter.locked_driver_event(pxe_filter.Events.sync)
def sync(self, ironic):
"""Sync firewall filter rules for introspection.
Gives access to PXE boot port for any machine, except for those, whose
MAC is registered in Ironic and is not on introspection right now.
This function is called from both introspection initialization code and
from periodic task. This function is supposed to be resistant to
unexpected iptables state.
``init()`` function must be called once before any call to this
function. This function is using ``eventlet`` semaphore to serialize
access from different green threads.
:param ironic: an ironic client instance.
:returns: nothing.
"""
if not _should_enable_dhcp():
self._disable_dhcp()
return
to_blacklist = _get_blacklist(ironic)
if to_blacklist == self.blacklist_cache:
LOG.debug('Not updating iptables - no changes in MAC list %s',
to_blacklist)
return
LOG.debug('Blacklisting active MAC\'s %s', to_blacklist)
with self._temporary_chain(self.new_chain, self.chain):
# Force update on the next iteration if this attempt fails
self.blacklist_cache = None
# - Blacklist active macs, so that nova can boot them
for mac in to_blacklist:
self._iptables('-A', self.new_chain, '-m', 'mac',
'--mac-source', mac, '-j', 'DROP')
# - Whitelist everything else
self._iptables('-A', self.new_chain, '-j', 'ACCEPT')
# Cache result of successful iptables update
self.enabled = True
self.blacklist_cache = to_blacklist
LOG.debug('The iptables filter was synchronized')
@contextlib.contextmanager
def _temporary_chain(self, chain, main_chain):
"""Context manager to operate on a temporary chain."""
# Clean up a bit to account for possible troubles on previous run
self._clean_up(chain)
self._iptables('-N', chain)
yield
# Swap chains
self._iptables('-I', 'INPUT', '-i', self.interface, '-p', 'udp',
'--dport', self._dhcp_port, '-j', chain)
self._iptables('-D', 'INPUT', '-i', self.interface, '-p', 'udp',
'--dport', self._dhcp_port, '-j', main_chain,
ignore=True)
self._iptables('-F', main_chain, ignore=True)
self._iptables('-X', main_chain, ignore=True)
self._iptables('-E', chain, main_chain)
def _iptables(self, *args, **kwargs):
# NOTE(dtantsur): -w flag makes it wait for xtables lock
cmd = self.base_command + args
ignore = kwargs.pop('ignore', False)
LOG.debug('Running iptables %s', args)
try:
processutils.execute(*cmd)
except processutils.ProcessExecutionError as exc:
if ignore:
LOG.debug('Ignoring failed iptables %(args)s: %(error)s',
{'args': args, 'error': exc})
else:
LOG.error('iptables %(iptables)s failed: %(error)s',
{'iptables': args, 'error': exc})
raise
def _clean_up(self, chain):
self._iptables('-D', 'INPUT', '-i', self.interface, '-p', 'udp',
'--dport', self._dhcp_port, '-j', chain,
ignore=True)
self._iptables('-F', chain, ignore=True)
self._iptables('-X', chain, ignore=True)
def _disable_dhcp(self):
"""Disable DHCP completely."""
if not self.enabled:
LOG.debug('DHCP is already disabled, not updating')
return
LOG.debug('No nodes on introspection and node_not_found_hook is '
'not set - disabling DHCP')
self.blacklist_cache = None
with self._temporary_chain(self.new_chain, self.chain):
# Blacklist everything
self._iptables('-A', self.new_chain, '-j', 'REJECT')
self.enabled = False
def _ib_mac_to_rmac_mapping(ports):
"""Update port InfiniBand MAC address to EthernetOverInfiniBand MAC
On InfiniBand deployment we need to map between the baremetal host
InfiniBand MAC to the EoIB MAC. The EoIB MAC addresses are learned
automatically by the EoIB interfaces and those MACs are recorded to the
/sys/class/net/<ethoib_interface>/eth/neighs file. The InfiniBand GUID is
taken from the ironic port client-id extra attribute. The InfiniBand GUID
is the last 8 bytes of the client-id. The file format allows to map the
GUID to EoIB MAC. The filter rules based on those MACs get applied during a
driver.update() call
:param ports: list of ironic ports
:returns: Nothing.
"""
ethoib_interfaces = CONF.iptables.ethoib_interfaces
for interface in ethoib_interfaces:
neighs_file = (
os.path.join('/sys/class/net', interface, 'eth/neighs'))
try:
with open(neighs_file, 'r') as fd:
data = fd.read()
except IOError:
LOG.error('Interface %s is not Ethernet Over InfiniBand; '
'Skipping ...', interface)
continue
for port in ports:
client_id = port.extra.get('client-id')
if client_id:
# Note(moshele): The last 8 bytes in the client-id is
# the baremetal node InfiniBand GUID
guid = client_id[-23:]
p = re.compile(_EMAC_REGEX + guid)
match = p.search(data)
if match:
port.address = match.group(1)
def _get_blacklist(ironic):
ports = [port.address for port in
ir_utils.call_with_retries(ironic.port.list, limit=0,
fields=['address', 'extra'])
if port.address not in node_cache.active_macs()]
_ib_mac_to_rmac_mapping(ports)
return ports
|
<gh_stars>1-10
########################################################
# Autogenerated by tutorial/utils/process_floorplan.py #
########################################################
from siliconcompiler.core import Chip
from siliconcompiler.floorplan import Floorplan
import math
GPIO = 'sky130_ef_io__gpiov2_pad_wrapped'
VDD = 'sky130_ef_io__vccd_hvc_pad'
VDDIO = 'sky130_ef_io__vddio_hvc_pad'
VSS = 'sky130_ef_io__vssd_hvc_pad'
VSSIO = 'sky130_ef_io__vssio_hvc_pad'
CORNER = 'sky130_ef_io__corner_pad'
FILL_CELLS = ['sky130_ef_io__com_bus_slice_1um',
'sky130_ef_io__com_bus_slice_5um',
'sky130_ef_io__com_bus_slice_10um',
'sky130_ef_io__com_bus_slice_20um']
RAM = 'sky130_sram_2kbyte_1rw1r_32x512_8'
def configure_chip(design):
chip = Chip()
chip.target('skywater130')
chip.set('design', design)
libname = 'ram'
chip.add('library', libname, 'nldm', 'typical', 'lib', 'asic/sky130/ram/sky130_sram_2kbyte_1rw1r_32x512_8_TT_1p8V_25C.lib')
chip.add('library', libname, 'lef', 'asic/sky130/ram/sky130_sram_2kbyte_1rw1r_32x512_8.lef')
chip.add('library', libname, 'gds', 'asic/sky130/ram/sky130_sram_2kbyte_1rw1r_32x512_8.gds')
chip.add('asic', 'macrolib', libname)
chip.set('library', libname, 'type', 'component')
libname = 'io'
chip.add('library', libname, 'nldm', 'typical', 'lib', 'asic/sky130/io/sky130_dummy_io.lib')
chip.set('library', libname, 'lef', 'asic/sky130/io/sky130_ef_io.lef')
# Need both GDS files: "ef" relies on "fd"
chip.add('library', libname, 'gds', 'asic/sky130/io/sky130_ef_io.gds')
chip.add('library', libname, 'gds', 'asic/sky130/io/sky130_fd_io.gds')
chip.add('asic', 'macrolib', libname)
chip.set('library', libname, 'type', 'component')
chip.set('showtool', 'def', 'klayout')
chip.set('showtool', 'gds', 'klayout')
return chip
def define_dimensions(fp):
place_w = 4860 * fp.stdcell_width
place_h = 648 * fp.stdcell_height
margin_left = 60 * fp.stdcell_width
margin_bottom = 10 * fp.stdcell_height
core_w = place_w + 2 * margin_left
core_h = place_h + 2 * margin_bottom
# Use math.ceil to ensure that top-level's dimensions are a whole number of
# microns. This implicitly stretches out the top/right margins around the
# placement area a bit.
gpio_h = fp.available_cells[GPIO].height
top_w = math.ceil(core_w + 2 * gpio_h)
top_h = math.ceil(core_h + 2 * gpio_h)
core_w = top_w - 2 * gpio_h
core_h = top_h - 2 * gpio_h
return (top_w, top_h), (core_w, core_h), (place_w, place_h), (margin_left, margin_bottom)
def calculate_even_spacing(fp, pads, distance, start):
n = len(pads)
pads_width = sum(fp.available_cells[pad].width for pad in pads)
spacing = (distance - pads_width) // (n + 1)
pos = start + spacing
io_pos = []
for pad in pads:
io_pos.append((pad, pos))
pos += fp.available_cells[pad].width + spacing
return io_pos
def define_io_placement(fp):
we_io = [GPIO] * 5 + [VDD, VSS, VDDIO, VSSIO] + [GPIO] * 4
no_io = [GPIO] * 9 + [VDDIO, VSSIO, VDD, VSS]
ea_io = [GPIO] * 9 + [VDDIO, VSS, VDD, VSSIO]
so_io = [GPIO] * 5 + [VDD, VSS, VDDIO, VSSIO] + [GPIO] * 4
(top_w, top_h), _, _, _ = define_dimensions(fp)
corner_w = fp.available_cells[CORNER].width
corner_h = fp.available_cells[CORNER].height
we_io_pos = calculate_even_spacing(fp, we_io, top_h - corner_h - corner_w, corner_h)
so_io_pos = calculate_even_spacing(fp, so_io, top_w - corner_h - corner_w, corner_w)
# For east and north, we crowd GPIO on the first half of the side to make
# sure we don't run into routing congestion issues due to the RAM in the
# top-right corner.
mid_w = (top_w - corner_h - corner_w) // 2
no_io_pos = (calculate_even_spacing(fp, no_io[:9], mid_w, corner_h) +
calculate_even_spacing(fp, no_io[9:], mid_w, mid_w + corner_h))
mid_h = (top_h - corner_h - corner_w) // 2
ea_io_pos = (calculate_even_spacing(fp, ea_io[:9], mid_h, corner_w) +
calculate_even_spacing(fp, ea_io[9:], mid_h, mid_h + corner_w))
return we_io_pos, no_io_pos, ea_io_pos, so_io_pos
def core_floorplan(fp):
## Set up die area ##
dims = define_dimensions(fp)
_, (core_w, core_h), (place_w, place_h), (margin_left, margin_bottom) = dims
diearea = [(0, 0), (core_w, core_h)]
corearea = [(margin_left, margin_bottom), (place_w + margin_left, place_h + margin_bottom)]
fp.create_diearea(diearea, corearea=corearea)
## Place RAM macro ##
ram_w = fp.available_cells[RAM].width
ram_h = fp.available_cells[RAM].height
ram_x = place_w + margin_left - ram_w
ram_y = place_h + margin_bottom - ram_h - 15 * fp.stdcell_height
instance_name = 'soc.ram.u_mem.gen_sky130.u_impl_sky130.gen32x512.mem'
fp.place_macros([(instance_name, RAM)], ram_x, ram_y, 0, 0, 'N', snap=True)
ram_margin_x = 120 * fp.stdcell_width
ram_margin_y = 20 * fp.stdcell_height
blockage_x = ram_x - ram_margin_x
blockage_y = ram_y - ram_margin_y
blockage_w = ram_w + ram_margin_x
blockage_h = ram_h + ram_margin_y
fp.place_blockage(blockage_x, blockage_y, blockage_w, blockage_h)
## Place pins ##
pins = [
# (name, offset from cell edge, # bit in vector, width of vector)
('din', 75.085, 0, 1), # in
('dout', 19.885, 0, 1), # out
('ie', 41.505, 0, 1), # inp_dis
('oen', 4.245, 0, 1), # oe_n
('tech_cfg', 31.845, 0, 18), # hld_h_n
('tech_cfg', 35.065, 1, 18), # enable_h
('tech_cfg', 38.285, 2, 18), # enable_inp_h
('tech_cfg', 13.445, 3, 18), # enable_vdda_h
('tech_cfg', 16.665, 4, 18), # enable_vswitch_h
('tech_cfg', 69.105, 5, 18), # enable_vddio
('tech_cfg', 7.465, 6, 18), # ib_mode_sel
('tech_cfg', 10.685, 7, 18), # vtrip_sel
('tech_cfg', 65.885, 8, 18), # slow
('tech_cfg', 22.645, 9, 18), # hld_ovr
('tech_cfg', 50.705, 10, 18), # analog_en
('tech_cfg', 29.085, 11, 18), # analog_sel
('tech_cfg', 44.265, 12, 18), # analog_pol
('tech_cfg', 47.485, 13, 18), # dm[0]
('tech_cfg', 56.685, 14, 18), # dm[1]
('tech_cfg', 25.865, 15, 18), # dm[2]
('tech_cfg', 78.305, 16, 18), # tie_lo_esd
('tech_cfg', 71.865, 17, 18), # tie_hi_esd
]
pin_width = 0.28
pin_depth = 1
pin_layer = 'm2'
we_pads, no_pads, ea_pads, so_pads = define_io_placement(fp)
gpio_w = fp.available_cells[GPIO].width
gpio_h = fp.available_cells[GPIO].height
# Filter out GPIO pins
we_gpio_pos = [pos for pad, pos in we_pads if pad == GPIO]
# Constant x position for west side
pin_x = 0
for i, pad_y in enumerate(we_gpio_pos):
pad_y -= gpio_h # account for padring height
for pin, offset, bit, width in pins:
# Construct name based on side, pin name, and bit # in vector
name = f'we_{pin}[{i * width + bit}]'
# Calculate pin position based on cell and offset
pin_y = pad_y + offset
# Place pin!
fp.place_pins([name], pin_x, pin_y, 0, 0, pin_depth, pin_width, pin_layer)
# Repeat the same logic for each of the other 3 sides, with positions/axes
# adjusted accordingly...
no_gpio_pos = [pos for pad, pos in no_pads if pad == GPIO]
pin_y = core_h - pin_depth
for i, pad_x in enumerate(no_gpio_pos):
pad_x -= gpio_h
for pin, offset, bit, width in pins:
name = f'no_{pin}[{i * width + bit}]'
pin_x = pad_x + offset
fp.place_pins([name], pin_x, pin_y, 0, 0, pin_width, pin_depth, pin_layer)
ea_gpio_pos = [pos for pad, pos in ea_pads if pad == GPIO]
pin_x = core_w - pin_depth
for i, pad_y in enumerate(ea_gpio_pos):
pad_y -= gpio_h
for pin, offset, bit, width in pins:
name = f'ea_{pin}[{i * width + bit}]'
pin_y = pad_y + gpio_w - offset - pin_width
fp.place_pins([name], pin_x, pin_y, 0, 0, pin_depth, pin_width, pin_layer)
so_gpio_pos = [pos for pad, pos in so_pads if pad == GPIO]
pin_y = 0
for i, pad_x in enumerate(so_gpio_pos):
pad_x -= gpio_h
for pin, offset, bit, width in pins:
name = f'so_{pin}[{i * width + bit}]'
pin_x = pad_x + gpio_w - offset - pin_width
fp.place_pins([name], pin_x, pin_y, 0, 0, pin_width, pin_depth, pin_layer)
## Place PDN ##
place_pdn(fp, ram_x, ram_y, ram_margin_x)
def place_pdn(fp, ram_x, ram_y, ram_margin):
dims = define_dimensions(fp)
_, (core_w, core_h), (place_w, place_h), (margin_left, margin_bottom) = dims
we_pads, no_pads, ea_pads, so_pads = define_io_placement(fp)
n_vert = 8 # how many vertical straps to place
vwidth = 5 # width of vertical straps in microns
n_hori = 10 # how many horizontal straps to place
hwidth = 5 # width of horizontal straps
vlayer = 'm4' # metal layer for vertical straps
hlayer = 'm5' # metal layer for horizontal straps
# Calculate even spacing for straps
vpitch = ((ram_x - ram_margin - margin_left) - n_vert * vwidth) / (n_vert + 1)
hpitch = (core_h - n_hori * hwidth) / (n_hori + 1)
fp.add_net('_vdd', ['VPWR', 'vccd1'], 'power')
fp.add_net('_vss', ['VGND', 'vssd1'], 'ground')
vss_ring_left = margin_left - 4 * vwidth
vss_ring_bottom = margin_bottom - 4 * hwidth
vss_ring_width = place_w + 9 * vwidth
vss_ring_height = place_h + 9 * hwidth
vss_ring_right = vss_ring_left + vss_ring_width
vss_ring_top = vss_ring_bottom + vss_ring_height
vdd_ring_left = vss_ring_left + 2 * vwidth
vdd_ring_bottom = vss_ring_bottom + 2 * hwidth
vdd_ring_width = vss_ring_width - 4 * vwidth
vdd_ring_height = vss_ring_height - 4 * hwidth
vdd_ring_right = vdd_ring_left + vdd_ring_width
vdd_ring_top = vdd_ring_bottom + vdd_ring_height
fp.place_ring('_vdd', vdd_ring_left, vdd_ring_bottom, vdd_ring_width,
vdd_ring_height, hwidth, vwidth, hlayer, vlayer)
fp.place_ring('_vss', vss_ring_left, vss_ring_bottom, vss_ring_width,
vss_ring_height, hwidth, vwidth, hlayer, vlayer)
# Horizontal stripes
spacing = 2 * (hpitch + hwidth)
bottom = margin_bottom + hpitch
fp.place_wires(['_vdd'] * (n_hori // 2), vdd_ring_left, bottom, 0, spacing,
vdd_ring_width, hwidth, hlayer, shape='stripe')
bottom = margin_bottom + hpitch + (hpitch + hwidth)
fp.place_wires(['_vss'] * (n_hori // 2), vss_ring_left, bottom, 0, spacing,
vss_ring_width, hwidth, hlayer, shape='stripe')
# Vertical stripes
spacing = 2 * (vpitch + vwidth)
left = margin_left + vpitch
fp.place_wires(['_vdd'] * (n_vert // 2), left, vdd_ring_bottom, spacing, 0,
vwidth, vdd_ring_height, vlayer, shape='stripe')
left = margin_left + vpitch + (vpitch + vwidth)
fp.place_wires(['_vss'] * (n_vert // 2), left, vss_ring_bottom, spacing, 0,
vwidth, vss_ring_height, vlayer, shape='stripe')
gpio_h = fp.available_cells[GPIO].height
pow_h = fp.available_cells[VDD].height
# account for GPIO padcells being larger than power padcells
pow_gap = gpio_h - pow_h
pin_width = 23.9
pin_offsets = (0.495, 50.39)
# Place wires/pins connecting power pads to the power ring
for pad_type, y in we_pads:
y -= gpio_h
for offset in pin_offsets:
if pad_type == VDD:
fp.place_wires(['_vdd'], -pow_gap, y + offset, 0, 0,
vdd_ring_left + vwidth + pow_gap, pin_width, 'm3')
fp.place_pins (['_vdd'], 0, y + offset, 0, 0,
vdd_ring_left + vwidth, pin_width, 'm3', use='power')
elif pad_type == VDDIO:
fp.place_pins (['_vddio'], 0, y + offset, 0, 0,
margin_left, pin_width, 'm3')
elif pad_type == VSS:
fp.place_wires(['_vss'], -pow_gap, y + offset, 0, 0,
vss_ring_left + vwidth + pow_gap, pin_width, 'm3')
fp.place_pins(['_vss'], 0, y + offset, 0, 0,
vss_ring_left + vwidth, pin_width, 'm3', use='power')
for pad_type, x in no_pads:
x -= gpio_h
for offset in pin_offsets:
if pad_type == VDD:
fp.place_wires(['_vdd'], x + offset, vdd_ring_top - hwidth, 0, 0,
pin_width, core_h - vdd_ring_top + hwidth + pow_gap, 'm3')
fp.place_pins(['_vdd'], x + offset, vdd_ring_top - hwidth, 0, 0,
pin_width, core_h - vdd_ring_top + hwidth, 'm3', use='power')
elif pad_type == VDDIO:
fp.place_pins(['_vddio'], x + offset, margin_bottom + place_h, 0, 0,
pin_width, core_h - (margin_bottom + place_h), 'm3')
elif pad_type == VSS:
fp.place_wires(['_vss'], x + offset, vss_ring_top - hwidth, 0, 0,
pin_width, core_h - vss_ring_top + hwidth + pow_gap, 'm3')
fp.place_pins(['_vss'], x + offset, vss_ring_top - hwidth, 0, 0,
pin_width, core_h - vss_ring_top + hwidth, 'm3', use='power')
for pad_type, y in ea_pads:
y -= gpio_h
pad_w = fp.available_cells[pad_type].width
for offset in pin_offsets:
if pad_type == VDD:
fp.place_wires(['_vdd'], vdd_ring_right - vwidth, y + pad_w - offset - pin_width, 0, 0,
core_w - vdd_ring_right + vwidth + pow_gap, pin_width, 'm3')
fp.place_pins(['_vdd'], vdd_ring_right - vwidth, y + pad_w - offset - pin_width, 0, 0,
core_w - vdd_ring_right + vwidth, pin_width, 'm3', use='power')
elif pad_type == VDDIO:
fp.place_pins(['_vddio'], margin_left + place_w, y + pad_w - offset - pin_width, 0, 0,
core_w - (margin_left + place_w), pin_width, 'm3')
elif pad_type == VSS:
fp.place_wires(['_vss'], vss_ring_right - vwidth, y + pad_w - offset - pin_width, 0, 0,
core_w - vss_ring_right + vwidth + pow_gap, pin_width, 'm3')
fp.place_pins(['_vss'], vss_ring_right - vwidth, y + pad_w - offset - pin_width, 0, 0,
core_w - vss_ring_right + vwidth, pin_width, 'm3', use='power')
for pad_type, x in so_pads:
x -= gpio_h
pad_w = fp.available_cells[pad_type].width
for offset in pin_offsets:
if pad_type == VDD:
fp.place_wires(['_vdd'], x + pad_w - offset - pin_width, -pow_gap, 0, 0,
pin_width, vdd_ring_bottom + hwidth + pow_gap, 'm3')
fp.place_pins(['_vdd'], x + pad_w - offset - pin_width, 0, 0, 0,
pin_width, vdd_ring_bottom + hwidth, 'm3', use='power')
elif pad_type == VDDIO:
fp.place_pins(['_vddio'], x + pad_w - offset - pin_width, 0, 0, 0,
pin_width, margin_bottom, 'm3')
elif pad_type == VSS:
fp.place_wires(['_vss'], x + pad_w - offset - pin_width, -pow_gap, 0, 0,
pin_width, vss_ring_bottom + hwidth + pow_gap, 'm3')
fp.place_pins(['_vss'], x + pad_w - offset - pin_width, 0, 0, 0,
pin_width, vss_ring_bottom + hwidth, 'm3', use='power')
rows_below_ram = (ram_y - margin_bottom) // fp.stdcell_height
rows_above_ram = len(fp.rows) - rows_below_ram
npwr_below = 1 + math.floor(rows_below_ram / 2)
ngnd_below = math.ceil(rows_below_ram / 2)
npwr_above = 1 + math.floor(rows_above_ram / 2)
ngnd_above = math.ceil(rows_above_ram / 2)
stripe_w = 0.48
spacing = 2 * fp.stdcell_height
bottom = margin_bottom - stripe_w/2
fp.place_wires(['_vdd'] * npwr_below, margin_left, bottom, 0, spacing,
place_w, stripe_w, 'm1', 'followpin')
bottom = margin_bottom - stripe_w/2 + fp.stdcell_height
fp.place_wires(['_vss'] * ngnd_below, margin_left, bottom, 0, spacing,
place_w, stripe_w, 'm1', 'followpin')
bottom = margin_bottom - stripe_w/2 + npwr_below * 2 * fp.stdcell_height
fp.place_wires(['_vdd'] * npwr_above, margin_left, bottom, 0, spacing,
ram_x - 2 * margin_left, stripe_w, 'm1', 'followpin')
bottom = margin_bottom - stripe_w/2 + fp.stdcell_height + ngnd_below * 2 * fp.stdcell_height
fp.place_wires(['_vss'] * ngnd_above, margin_left, bottom, 0, spacing,
ram_x - 2 * margin_left, stripe_w, 'm1', 'followpin')
ram_x = fp.snap(ram_x, fp.stdcell_width)
ram_y = fp.snap(ram_y, fp.stdcell_height)
ram_vdd_pin_bottom = 4.76
ram_vdd_pins_left = (4.76, 676.6)
ram_vdd_pins_width = 6.5 - 4.76
ram_vdd_pins_height = 411.78 - 4.76
for x_offset in ram_vdd_pins_left:
fp.place_wires(['_vdd'], ram_x + x_offset, ram_y + ram_vdd_pin_bottom,
0, 0, ram_vdd_pins_width, ram_vdd_pins_height, 'm4')
ram_vss_pin_bottom = 1.36
ram_vss_pins_left = (1.36, 680)
ram_vss_pins_width = 3.1 - 1.36
ram_vss_pins_height = 415.18 - 1.36
for x_offset in ram_vss_pins_left:
fp.place_wires(['_vss'], ram_x + x_offset, ram_y + ram_vss_pin_bottom,
0, 0, ram_vss_pins_width, ram_vss_pins_height, 'm4')
fp.insert_vias(layers=[('m1', 'm4'), ('m3', 'm4'), ('m3', 'm5'), ('m4', 'm5')])
def top_floorplan(fp):
## Create die area ##
(top_w, top_h), (core_w, core_h), (place_w, place_h), (margin_left, margin_bottom) = define_dimensions(fp)
fp.create_diearea([(0, 0), (top_w, top_h)])
## Place pads ##
we_pads, no_pads, ea_pads, so_pads = define_io_placement(fp)
indices = {}
indices[GPIO] = 0
indices[VDD] = 0
indices[VSS] = 0
indices[VDDIO] = 0
indices[VSSIO] = 0
gpio_h = fp.available_cells[GPIO].height
pow_h = fp.available_cells[VDD].height
corner_w = fp.available_cells[CORNER].width
corner_h = fp.available_cells[CORNER].height
fill_cell_h = fp.available_cells[FILL_CELLS[0]].height
pin_dim = 10
# Calculate where to place pin based on hardcoded GPIO pad pin location
pin_offset_width = (11.2 + 73.8) / 2 - pin_dim / 2
pin_offset_depth = gpio_h - ((102.525 + 184.975) / 2 - pin_dim / 2)
for pad_type, y in we_pads:
i = indices[pad_type]
indices[pad_type] += 1
if pad_type == GPIO:
pad_name = f'padring.we_pads\\[0\\].i0.padio\\[{i}\\].i0.gpio'
pin_name = f'we_pad[{i}]'
else:
if pad_type == VDD:
pin_name = 'vdd'
elif pad_type == VSS:
pin_name = 'vss'
elif pad_type == VDDIO:
pin_name = 'vddio'
elif pad_type == VSSIO:
pin_name = 'vssio'
pad_name = f'{pin_name}{i}'
fp.place_macros([(pad_name, pad_type)], 0, y, 0, 0, 'W')
fp.place_pins([pin_name], pin_offset_depth, y + pin_offset_width,
0, 0, pin_dim, pin_dim, 'm5')
indices[GPIO] = 0
for pad_type, x in no_pads:
i = indices[pad_type]
indices[pad_type] += 1
if pad_type == GPIO:
pad_name = f'padring.no_pads\\[0\\].i0.padio\\[{i}\\].i0.gpio'
pin_name = f'no_pad[{i}]'
else:
if pad_type == VDD:
pin_name = 'vdd'
elif pad_type == VSS:
pin_name = 'vss'
elif pad_type == VDDIO:
pin_name = 'vddio'
elif pad_type == VSSIO:
pin_name = 'vssio'
pad_name = f'{pin_name}{i}'
pad_h = fp.available_cells[pad_type].height
fp.place_macros([(pad_name, pad_type)], x, top_h - pad_h, 0, 0, 'N')
fp.place_pins([pin_name], x + pin_offset_width, top_h - pin_offset_depth,
0, 0, pin_dim, pin_dim, 'm5')
indices[GPIO] = 0
for pad_type, y in ea_pads:
i = indices[pad_type]
indices[pad_type] += 1
if pad_type == GPIO:
pad_name = f'padring.ea_pads\\[0\\].i0.padio\\[{i}\\].i0.gpio'
pin_name = f'ea_pad[{i}]'
else:
if pad_type == VDD:
pin_name = 'vdd'
elif pad_type == VSS:
pin_name = 'vss'
elif pad_type == VDDIO:
pin_name = 'vddio'
elif pad_type == VSSIO:
pin_name = 'vssio'
pad_name = f'{pin_name}{i}'
pad_h = fp.available_cells[pad_type].height
fp.place_macros([(pad_name, pad_type)], top_w - pad_h, y, 0, 0, 'E')
fp.place_pins([pin_name], top_w - pin_offset_depth, y + pin_offset_width,
0, 0, pin_dim, pin_dim, 'm5')
indices[GPIO] = 0
for pad_type, x in so_pads:
i = indices[pad_type]
indices[pad_type] += 1
if pad_type == GPIO:
pad_name = f'padring.so_pads\\[0\\].i0.padio\\[{i}\\].i0.gpio'
pin_name = f'so_pad[{i}]'
else:
if pad_type == VDD:
pin_name = 'vdd'
elif pad_type == VSS:
pin_name = 'vss'
elif pad_type == VDDIO:
pin_name = 'vddio'
elif pad_type == VSSIO:
pin_name = 'vssio'
pad_name = f'{pin_name}{i}'
fp.place_macros([(pad_name, pad_type)], x, 0, 0, 0, 'S')
fp.place_pins([pin_name], x + pin_offset_width, pin_offset_depth,
0, 0, pin_dim, pin_dim, 'm5')
## Connections to vddio pins ##
pin_width = 23.9
pin_offsets = (0.495, 50.39)
pad_h = fp.available_cells[VDDIO].height
pow_gap = fp.available_cells[GPIO].height - pad_h
# Place wires/pins connecting power pads to the power ring
fp.add_net('_vddio', [], 'power')
for pad_type, y in we_pads:
if pad_type == VDDIO:
for offset in pin_offsets:
fp.place_wires (['_vddio'], pad_h, y + offset, 0, 0,
margin_left + pow_gap, pin_width, 'm3')
margin_top = core_h - (margin_bottom + place_h)
for pad_type, x in no_pads:
if pad_type == VDDIO:
for offset in pin_offsets:
fp.place_wires (['_vddio'], x + offset, top_h - pad_h - (margin_top + pow_gap), 0, 0,
pin_width, margin_top + pow_gap, 'm3')
margin_right = core_w - (margin_left + place_w)
for pad_type, y in ea_pads:
if pad_type == VDDIO:
for offset in pin_offsets:
fp.place_wires (['_vddio'], top_w - pad_h - (margin_right + pow_gap), y + offset, 0, 0,
margin_right + pow_gap, pin_width, 'm3')
for pad_type, x in so_pads:
if pad_type == VDDIO:
for offset in pin_offsets:
fp.place_wires (['_vddio'], x + offset, pad_h, 0, 0,
pin_width, margin_bottom + pow_gap, 'm3')
## Place corner cells ##
fp.place_macros([('corner_sw', CORNER)], 0, 0, 0, 0, 'S')
fp.place_macros([('corner_nw', CORNER)], 0, top_h - corner_w, 0, 0, 'W')
fp.place_macros([('corner_se', CORNER)], top_w - corner_h, 0, 0, 0, 'E')
fp.place_macros([('corner_ne', CORNER)], top_w - corner_w, top_h - corner_h, 0, 0, 'N')
## Fill I/O ring ##
fp.fill_io_region([(0, 0), (fill_cell_h, top_h)], FILL_CELLS, 'W', 'v')
fp.fill_io_region([(0, top_h - fill_cell_h), (top_w, top_h)], FILL_CELLS, 'N', 'h')
fp.fill_io_region([(top_w - fill_cell_h, 0), (top_w, top_h)], FILL_CELLS, 'E', 'v')
fp.fill_io_region([(0, 0), (top_w, fill_cell_h)], FILL_CELLS, 'S', 'h')
## Place core ##
fp.place_macros([('core', 'asic_core')], gpio_h, gpio_h, 0, 0, 'N')
def generate_core_floorplan(chip):
fp = Floorplan(chip)
core_floorplan(fp)
fp.write_def('asic_core.def')
fp.write_lef('asic_core.lef')
def generate_top_floorplan(chip):
fp = Floorplan(chip)
top_floorplan(fp)
fp.write_def('asic_top.def')
def main():
core_chip = configure_chip('asic_core')
core_chip.write_manifest('sc_manifest.json')
core_fp = Floorplan(core_chip)
core_floorplan(core_fp)
core_fp.write_def('asic_core.def')
core_fp.write_lef('asic_core.lef')
chip = configure_chip('asic_top')
# Add asic_core as library
libname = 'asic_core'
chip.add('asic', 'macrolib', libname)
chip.set('library', libname, 'type', 'component')
chip.set('library', libname, 'lef', 'asic_core.lef')
fp = Floorplan(chip)
top_floorplan(fp)
fp.write_def('asic_top.def')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import backend as K
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
mm = MinMaxScaler()
ss = StandardScaler()
rs = RobustScaler()
toyota_df = pd.read_csv('./toyota_new.csv')
X = toyota_df.drop(['Reach12+','Reach15+', 'new_reg', 'after_1m', 'year',
'after_2m', 'after_3m', 'after_4m', 'after_5m', 'after_6m','log_0', 'log_1', 'log_2', 'log_3', 'log_4',
'log_5', 'log_6'],1)
y = toyota_df['log_6']
i = 6
from tensorflow.keras.models import load_model
model = load_model(f'./elu_SGD_final/{i}_best_model.h5')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=13, shuffle=True)
X_train2, X_test2, y_train2, y_test2 = train_test_split(X, y, test_size=0.3, random_state=23, shuffle=True)
X_train3, X_test3, y_train3, y_test3 = train_test_split(X, y, test_size=0.3, random_state=63, shuffle=True)
X_train4, X_test4, y_train4, y_test4 = train_test_split(X, y, test_size=0.3, random_state=43, shuffle=True)
X_train5, X_test5, y_train5, y_test5 = train_test_split(X, y, test_size=0.3, random_state=53, shuffle=True)
ss_f = ss.fit(X_train)
X_train = ss_f.transform(X_train)
X_test = ss_f.transform(X_test)
# 교차검증
X_test2 = ss_f.transform(X_test2)
X_test3 = ss_f.transform(X_test3)
X_test4 = ss_f.transform(X_test4)
X_test5 = ss_f.transform(X_test5)
result = str(model.evaluate(X_test, y_test)[0])
result2 = str(model.evaluate(X_test2, y_test2)[0])
result3 = str(model.evaluate(X_test3, y_test3)[0])
result4 = str(model.evaluate(X_test4, y_test4)[0])
result5 = str(model.evaluate(X_test5, y_test5)[0])
print('test_mse, test_mae: ', result)
print('test_mse2, test_mae2: ', result2)
print('test_mse3, test_mae3: ', result3)
print('test_mse4, test_mae4: ', result4)
print('test_mse5, test_mae5: ', result5)
f = open(f'{i}_model_kfold.txt', mode='wt', encoding='utf-8')
f.write('X_test : ')
f.write(result)
f.write('\n')
f.write('X_test2 : ')
f.write(result2)
f.write('\n')
f.write('X_test3 : ')
f.write(result3)
f.write('\n')
f.write('X_test4 : ')
f.write(result4)
f.write('\n')
f.write('X_test5 : ')
f.write(result5)
f.write('\n')
f.close() |
"""
Django settings for aquila project.
Generated by 'django-admin startproject' using Django 2.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from . import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.DJANGO_SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.DJANGO_DEBUG
ALLOWED_HOSTS = config.DJANGO_ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'assign_rights',
'django_auth_adfs',
'crispy_forms',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_auth_adfs.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'aquila.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aquila.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": config.SQL_ENGINE,
"NAME": config.SQL_DATABASE,
"USER": config.SQL_USER,
"PASSWORD": config.SQL_PASSWORD,
"HOST": config.SQL_HOST,
"PORT": config.SQL_PORT,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'assign_rights.User'
SUPERUSER_USERNAME = config.SUPERUSER_USERNAME
SUPERUSER_EMAIL = config.SUPERUSER_EMAIL
AUTHENTICATION_BACKENDS = [
'django_auth_adfs.backend.AdfsAuthCodeBackend',
'django.contrib.auth.backends.ModelBackend',
]
AUTH_ADFS = {
"SERVER": "adfs.rockarch.org",
"CLIENT_ID": config.SSO_CLIENT_ID,
"RELYING_PARTY_ID": config.SSO_RELYING_PARTY_ID,
"AUDIENCE": config.SSO_AUDIENCE,
"CLAIM_MAPPING": {"first_name": "given_name",
"last_name": "family_name",
"email": "email"},
'LOGIN_EXEMPT_URLS': ['^api'],
'GROUPS_CLAIM': None,
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': []
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_URL = config.LOGIN_URL
LOGIN_REDIRECT_URL = "home"
# django-crispy-forms template pack
CRISPY_TEMPLATE_PACK = 'bootstrap4'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
|
<gh_stars>0
# Code generated by font_to_py.py.
# Font: NewYork.ttf
# Cmd: ../../../micropython-font-to-py/font_to_py.py -x /System/Library/Fonts/NewYork.ttf 30 newyork30.py
version = '0.33'
def height():
return 30
def baseline():
return 23
def max_width():
return 29
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 126
_font =\
b'\x09\x00\x00\x00\x00\x00\x38\x00\x7c\x00\x7e\x00\x07\x00\x03\x00'\
b'\x01\x00\x01\x00\x01\x00\x03\x00\x06\x00\x1e\x00\x1c\x00\x18\x00'\
b'\x20\x00\x10\x00\x00\x00\x00\x00\x00\x00\x38\x00\x38\x00\x38\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00'\
b'\x00\x00\x38\x38\x38\x38\x38\x38\x38\x38\x10\x10\x10\x10\x10\x10'\
b'\x10\x00\x00\x00\x38\x38\x38\x00\x00\x00\x00\x00\x00\x00\x0c\x00'\
b'\x00\x00\x00\x00\x39\xc0\x39\xc0\x39\xc0\x39\xc0\x39\xc0\x10\x80'\
b'\x10\x80\x10\x80\x10\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10'\
b'\x7f\xfe\x04\x10\x08\x10\x08\x10\x08\x20\x08\x20\x08\x20\x7f\xfe'\
b'\x08\x20\x08\x20\x08\x20\x08\x20\x08\x20\x08\x20\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x02\x00\x02\x00'\
b'\x02\x00\x0f\xc0\x3a\x70\x32\x30\x72\x10\x72\x10\x72\x00\x7a\x00'\
b'\x3e\x00\x3f\x00\x1f\xc0\x07\xe0\x03\xf0\x02\x78\x02\x38\x02\x38'\
b'\x42\x38\x42\x38\x62\x70\x72\xe0\x1f\x80\x02\x00\x02\x00\x02\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00'\
b'\x1e\x00\x80\x33\x00\x80\x21\x01\x00\x61\x81\x00\x61\x82\x00\x61'\
b'\x82\x00\x61\x84\x00\x61\x84\x00\x21\x08\x00\x33\x10\x00\x1e\x10'\
b'\xf0\x00\x21\x98\x00\x21\x08\x00\x43\x0c\x00\x43\x0c\x00\x83\x0c'\
b'\x00\x83\x0c\x01\x03\x0c\x01\x01\x08\x02\x01\x98\x02\x00\xf0\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x03\xe0\x00\x0e'\
b'\x38\x00\x0c\x1c\x00\x1c\x1c\x00\x1c\x1c\x00\x1c\x1c\x00\x1e\x18'\
b'\x00\x0f\x30\x00\x0f\xe0\x00\x07\xc0\xf0\x07\xe0\x40\x19\xf0\x40'\
b'\x38\xf8\x80\x30\x7d\x00\x70\x3e\x00\x70\x1f\x00\x70\x0f\x80\x78'\
b'\x07\xc0\x38\x1b\xe0\x1e\x20\xf0\x0f\xc0\x78\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\x00\x00\x00\x38\x38\x38\x38\x38\x10\x10\x10\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x80\x01\x00\x06\x00\x04\x00\x0c\x00\x18\x00\x18\x00'\
b'\x38\x00\x38\x00\x30\x00\x70\x00\x70\x00\x70\x00\x70\x00\x70\x00'\
b'\x70\x00\x70\x00\x70\x00\x30\x00\x38\x00\x38\x00\x18\x00\x0c\x00'\
b'\x0c\x00\x06\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x80\x00\x40\x00\x20\x00\x10\x00\x18\x00\x1c\x00\x0c\x00\x0e\x00'\
b'\x0e\x00\x06\x00\x07\x00\x07\x00\x07\x00\x07\x00\x07\x00\x07\x00'\
b'\x07\x00\x07\x00\x06\x00\x0e\x00\x0e\x00\x0c\x00\x18\x00\x18\x00'\
b'\x30\x00\x20\x00\x40\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00'\
b'\x00\x00\x0c\x00\x0c\x00\x69\x80\x7b\x80\x0c\x00\x37\x00\x6b\x80'\
b'\x4d\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00'\
b'\x01\x00\x01\x00\x01\x00\x01\x00\x7f\xfc\x01\x00\x01\x00\x01\x00'\
b'\x01\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x70'\
b'\x30\x30\x10\x20\x40\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x70\x70\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x00'\
b'\x02\x00\x02\x00\x02\x00\x02\x00\x04\x00\x04\x00\x04\x00\x04\x00'\
b'\x08\x00\x08\x00\x08\x00\x18\x00\x10\x00\x10\x00\x10\x00\x20\x00'\
b'\x20\x00\x20\x00\x20\x00\x40\x00\x40\x00\x40\x00\x40\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x03\xc0\x0c\x70\x18\x38'\
b'\x38\x18\x38\x1c\x30\x1c\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x70\x0e'\
b'\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x30\x0c\x38\x1c\x18\x1c\x1c\x18'\
b'\x0e\x30\x03\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0b\x00\x00\x00\x00\x00\x06\x00\x1e\x00\x2e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x7f\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x0f\x80\x1f\xe0\x21\xe0\x40\xf0\x00\x70'\
b'\x00\x70\x00\x70\x00\x70\x00\x60\x00\xe0\x00\xc0\x01\x80\x01\x80'\
b'\x03\x00\x06\x00\x0c\x00\x18\x00\x10\x00\x20\x00\x7f\xf0\x7f\xf0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x07\xc0\x1f\xe0\x30\xf0\x40\x70\x00\x70\x00\x70'\
b'\x00\x70\x00\xe0\x01\x80\x0f\x80\x01\xe0\x00\xf0\x00\x70\x00\x38'\
b'\x00\x38\x70\x38\x70\x38\x70\x30\x20\x70\x30\xe0\x0f\x80\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x18\x00\x38\x00\x78\x00\xb8\x00\xb8\x01\x38\x02\x38'\
b'\x02\x38\x04\x38\x04\x38\x08\x38\x10\x38\x10\x38\x20\x38\x7f\xff'\
b'\x7f\xff\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00'\
b'\x0f\xf0\x0f\xf0\x10\x00\x10\x00\x10\x00\x10\x00\x20\x00\x20\x00'\
b'\x27\xc0\x38\xe0\x20\x70\x00\x78\x00\x38\x00\x38\x00\x38\x70\x38'\
b'\x70\x38\x70\x70\x60\x70\x30\xe0\x0f\x80\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x78'\
b'\x01\xc0\x03\x00\x0e\x00\x0c\x00\x1c\x00\x38\x00\x38\x00\x38\x00'\
b'\x73\xe0\x74\x70\x78\x38\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x30\x1c'\
b'\x38\x18\x18\x38\x0c\x70\x07\xc0\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x7f\xf0\x7f\xf0'\
b'\x00\x10\x00\x20\x00\x20\x00\x60\x00\x40\x00\xc0\x00\x80\x00\x80'\
b'\x01\x80\x01\x00\x03\x00\x03\x00\x06\x00\x06\x00\x0e\x00\x0e\x00'\
b'\x0c\x00\x1c\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x0f\xe0\x18\x78\x30\x38'\
b'\x70\x1c\x70\x1c\x70\x1c\x78\x18\x3c\x38\x1f\x60\x0f\xc0\x03\xf0'\
b'\x1c\xf8\x38\x3c\x30\x1e\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x38\x1c'\
b'\x1c\x38\x07\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x00\x00\x00\x00\x00\x07\xc0\x1c\x60\x38\x30\x30\x38'\
b'\x70\x18\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x38\x3c\x3c\x5c\x0f\x9c'\
b'\x00\x38\x00\x38\x00\x38\x00\x70\x00\x60\x00\xe0\x01\x80\x07\x00'\
b'\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00\x00\x00\x00\x00'\
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x70\x70\x00\x00'\
b'\x00\x00\x00\x00\x00\x70\x70\x30\x10\x20\x20\x40\x00\x00\x00\x00'\
b'\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x04\x00\x18\x00\x60\x01\x80\x06\x00\x18\x00\x60\x00'\
b'\x30\x00\x0c\x00\x03\x00\x00\x80\x00\x60\x00\x18\x00\x04\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x3f\xfc\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00'\
b'\x30\x00\x0c\x00\x03\x00\x00\xc0\x00\x30\x00\x0c\x00\x18\x00\x20'\
b'\x00\xc0\x03\x00\x0c\x00\x30\x00\x40\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x38\x00\x7c\x00\x7e\x00\x07\x00\x03\x00\x01\x00\x01\x00\x01\x00'\
b'\x03\x00\x06\x00\x1e\x00\x1c\x00\x18\x00\x20\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x38\x00\x38\x00\x38\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\xc0\x00\x00\xe0\x70\x00\x01\x00'\
b'\x08\x00\x06\x00\x04\x00\x0c\x00\x02\x00\x08\x00\x01\x00\x10\x1e'\
b'\x21\x00\x30\x39\x60\x80\x20\x70\xc0\x80\x20\xe0\xc0\x80\x40\xe0'\
b'\xc0\x80\x40\xe0\xc0\x80\x41\xc1\xc0\x80\x41\xc1\x80\x80\x41\xc1'\
b'\x81\x00\x41\xc1\x81\x00\x41\xc3\x82\x00\x41\xc3\x86\x00\x20\xed'\
b'\x8c\x00\x20\x70\xf0\x00\x30\x00\x00\x00\x10\x00\x00\x00\x08\x00'\
b'\x00\x00\x06\x00\x00\x00\x03\x81\x80\x00\x00\xfe\x00\x00\x00\x00'\
b'\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\xc0\x00'\
b'\x00\xe0\x00\x01\xe0\x00\x01\xe0\x00\x02\x70\x00\x02\x70\x00\x02'\
b'\x70\x00\x04\x38\x00\x04\x38\x00\x04\x38\x00\x08\x1c\x00\x08\x1c'\
b'\x00\x0f\xfc\x00\x10\x0e\x00\x10\x0e\x00\x10\x0e\x00\x20\x07\x00'\
b'\x20\x07\x00\x60\x07\x80\xf8\x1f\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00'\
b'\x00\x00\x00\x00\x00\x00\x7f\xf0\x00\x1c\x1c\x00\x1c\x0e\x00\x1c'\
b'\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x06\x00\x1c\x0e'\
b'\x00\x1c\x18\x00\x1f\xf8\x00\x1c\x1e\x00\x1c\x07\x00\x1c\x03\x80'\
b'\x1c\x03\x80\x1c\x03\x80\x1c\x03\x80\x1c\x03\x00\x1c\x07\x00\x1c'\
b'\x1c\x00\x7f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\xfe\x00\x07\x03\x80\x0e\x01\xc0\x1c\x00\xc0\x38\x00'\
b'\x40\x38\x00\x40\x30\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00'\
b'\x70\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00\x38\x00\x00\x38'\
b'\x00\x40\x1c\x00\x40\x1c\x00\xc0\x0e\x01\xc0\x03\x83\x80\x00\xfe'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x7f\xf0'\
b'\x00\x1c\x1e\x00\x1c\x07\x00\x1c\x03\xc0\x1c\x01\xc0\x1c\x00\xe0'\
b'\x1c\x00\xe0\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c'\
b'\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\xe0\x1c\x00\xe0\x1c\x01'\
b'\xc0\x1c\x03\x80\x1c\x07\x00\x1c\x1c\x00\x7f\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x7f\xfe\x00\x1c\x0e\x00'\
b'\x1c\x02\x00\x1c\x02\x00\x1c\x02\x00\x1c\x00\x00\x1c\x00\x00\x1c'\
b'\x08\x00\x1c\x08\x00\x1c\x18\x00\x1f\xf8\x00\x1c\x18\x00\x1c\x08'\
b'\x00\x1c\x00\x00\x1c\x00\x00\x1c\x01\x00\x1c\x01\x00\x1c\x01\x00'\
b'\x1c\x03\x00\x1c\x07\x00\x7f\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00'\
b'\x00\x00\x00\x00\x7f\xfe\x1c\x0e\x1c\x06\x1c\x02\x1c\x02\x1c\x00'\
b'\x1c\x00\x1c\x10\x1c\x10\x1c\x10\x1f\xf0\x1c\x10\x1c\x10\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x7f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x00\x07\x07\x80\x0e\x01\x80\x1c\x00\x80'\
b'\x38\x00\x80\x38\x00\x80\x30\x00\x00\x70\x00\x00\x70\x00\x00\x70'\
b'\x00\x00\x70\x00\x00\x70\x07\xf0\x70\x01\xc0\x70\x01\xc0\x38\x01'\
b'\xc0\x38\x01\xc0\x18\x01\xc0\x1c\x01\xc0\x0e\x01\xc0\x03\x83\x80'\
b'\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00'\
b'\x7f\x03\xf8\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c'\
b'\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1f\xff'\
b'\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0'\
b'\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x7f\x03\xf8\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x7f\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0a\x00\x00\x00\x00\x00\x3f\x80\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0c\x00\x1c\x00\x18\x00\x18\x00\x20\x00\x40\x00\x00\x00\x00\x00'\
b'\x12\x00\x00\x00\x00\x00\x00\x00\x7f\x07\xc0\x1c\x03\x00\x1c\x02'\
b'\x00\x1c\x04\x00\x1c\x08\x00\x1c\x10\x00\x1c\x10\x00\x1c\x20\x00'\
b'\x1c\x40\x00\x1c\xe0\x00\x1f\xe0\x00\x1c\xf0\x00\x1c\x78\x00\x1c'\
b'\x38\x00\x1c\x3c\x00\x1c\x1c\x00\x1c\x1e\x00\x1c\x0f\x00\x1c\x07'\
b'\x00\x1c\x07\x80\x7f\x03\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x7f\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x02\x1c\x02\x1c\x06\x1c\x06\x1c\x0e\x7f\xfe\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7e\x00\x3f\x00\x1e\x00\x3c\x00\x1e\x00\x3c\x00'\
b'\x17\x00\x5c\x00\x17\x00\x5c\x00\x17\x00\x5c\x00\x13\x80\x9c\x00'\
b'\x13\x80\x9c\x00\x13\x80\x9c\x00\x11\xc1\x1c\x00\x11\xc1\x1c\x00'\
b'\x10\xe2\x1c\x00\x10\xe2\x1c\x00\x10\xe2\x1c\x00\x10\x74\x1c\x00'\
b'\x10\x74\x1c\x00\x10\x74\x1c\x00\x10\x38\x1c\x00\x10\x38\x1c\x00'\
b'\x10\x38\x1c\x00\x7c\x10\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\xf8\x03\xe0\x3c'\
b'\x00\x80\x3e\x00\x80\x2e\x00\x80\x2f\x00\x80\x27\x80\x80\x23\x80'\
b'\x80\x23\xc0\x80\x21\xe0\x80\x20\xe0\x80\x20\xf0\x80\x20\x70\x80'\
b'\x20\x78\x80\x20\x3c\x80\x20\x1c\x80\x20\x1e\x80\x20\x0f\x80\x20'\
b'\x07\x80\x20\x07\x80\x20\x03\x80\xf8\x01\x80\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x16\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x03\x07\x00\x0e\x01'\
b'\xc0\x1c\x00\xe0\x18\x00\x60\x38\x00\x70\x30\x00\x70\x70\x00\x38'\
b'\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70'\
b'\x00\x38\x38\x00\x30\x38\x00\x70\x18\x00\x60\x1c\x00\xe0\x0e\x01'\
b'\xc0\x03\x83\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\
b'\x00\x00\x00\x00\x7f\xf0\x00\x1c\x3c\x00\x1c\x0e\x00\x1c\x0e\x00'\
b'\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c'\
b'\x0e\x00\x1c\x0e\x00\x1c\x38\x00\x1f\xe0\x00\x1c\x00\x00\x1c\x00'\
b'\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00'\
b'\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xfc\x00\x03\x07\x00\x0e\x01\xc0\x1c\x00\xe0\x18\x00\x60\x38'\
b'\x00\x70\x30\x00\x70\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00'\
b'\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x38\x00\x30\x38\x00\x70'\
b'\x18\x00\x60\x1c\x00\xe0\x0e\x01\xc0\x03\x83\x00\x00\xfc\x00\x00'\
b'\x1e\x00\x00\x0f\x00\x00\x07\xc0\x00\x01\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x7f\xf0\x00\x1c'\
b'\x3c\x00\x1c\x0e\x00\x1c\x0f\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07'\
b'\x00\x1c\x07\x00\x1c\x06\x00\x1c\x0e\x00\x1c\x38\x00\x1f\xf0\x00'\
b'\x1c\x78\x00\x1c\x38\x00\x1c\x3c\x00\x1c\x1c\x00\x1c\x0e\x00\x1c'\
b'\x0e\x00\x1c\x07\x00\x1c\x07\x00\x7f\x03\xc0\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0f\x00\x00\x00\x00\x00\x07\xe0\x1c\x38\x38\x18\x70\x08\x70\x08'\
b'\x70\x08\x78\x00\x7c\x00\x3e\x00\x1f\xc0\x0f\xf0\x01\xf8\x00\x78'\
b'\x00\x3c\x00\x1c\x00\x1c\x40\x1c\x40\x18\x60\x38\x78\x70\x0f\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\
b'\x00\x00\x00\x00\x00\x00\xff\xff\x80\xe1\xc3\x80\xc1\xc1\x80\x81'\
b'\xc0\x80\x81\xc0\x80\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0'\
b'\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00'\
b'\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01'\
b'\xc0\x00\x07\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00'\
b'\x00\x00\xfe\x03\xe0\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00'\
b'\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80'\
b'\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38'\
b'\x00\x80\x1c\x01\x00\x1c\x01\x00\x0e\x02\x00\x07\x04\x00\x01\xf8'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\xfe\x07'\
b'\xc0\x78\x01\x80\x38\x01\x00\x38\x01\x00\x1c\x02\x00\x1c\x02\x00'\
b'\x1c\x02\x00\x0e\x04\x00\x0e\x04\x00\x0e\x04\x00\x07\x08\x00\x07'\
b'\x08\x00\x07\x08\x00\x03\x90\x00\x03\x90\x00\x03\x90\x00\x01\xe0'\
b'\x00\x01\xe0\x00\x01\xc0\x00\x00\xc0\x00\x00\xc0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x0f\xe0\xf8'\
b'\x3c\x03\x80\x60\x3c\x03\x80\x60\x1c\x03\x80\x40\x1c\x05\xc0\x40'\
b'\x1c\x05\xc0\x40\x0e\x05\xc0\x80\x0e\x08\xc0\x80\x0e\x08\xe0\x80'\
b'\x07\x08\xe1\x00\x07\x08\x61\x00\x07\x10\x71\x00\x07\x10\x72\x00'\
b'\x03\x90\x72\x00\x03\xa0\x32\x00\x03\xa0\x3a\x00\x01\xe0\x3c\x00'\
b'\x01\xc0\x3c\x00\x01\xc0\x1c\x00\x00\xc0\x18\x00\x00\xc0\x18\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\x07\xc0\x3c\x03\x00\x1c\x02\x00\x1e\x02\x00'\
b'\x0e\x04\x00\x0f\x08\x00\x07\x08\x00\x03\x90\x00\x03\xe0\x00\x01'\
b'\xe0\x00\x00\xe0\x00\x01\xe0\x00\x02\x70\x00\x02\x78\x00\x04\x38'\
b'\x00\x04\x1c\x00\x08\x1c\x00\x10\x0e\x00\x10\x0f\x00\x30\x07\x00'\
b'\xf8\x1f\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00'\
b'\xff\x07\xe0\x3c\x01\x80\x1c\x01\x00\x0e\x02\x00\x0e\x02\x00\x07'\
b'\x04\x00\x07\x04\x00\x03\x88\x00\x03\xd0\x00\x01\xd0\x00\x00\xe0'\
b'\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00'\
b'\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x03\xf8\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x3f\xff\x00\x38'\
b'\x0f\x00\x20\x0e\x00\x20\x1c\x00\x00\x3c\x00\x00\x38\x00\x00\x78'\
b'\x00\x00\x70\x00\x00\xe0\x00\x01\xe0\x00\x01\xc0\x00\x03\xc0\x00'\
b'\x03\x80\x00\x07\x80\x00\x0f\x00\x00\x0e\x00\x00\x1e\x01\x00\x1c'\
b'\x01\x00\x3c\x03\x00\x78\x07\x00\x7f\xff\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0a\x00\x1f\x80\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1f\x80\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x40\x00\x40\x00\x40\x00\x40\x00\x20\x00\x20\x00\x20\x00\x20\x00'\
b'\x10\x00\x10\x00\x10\x00\x10\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x04\x00\x04\x00\x04\x00\x04\x00\x02\x00\x02\x00\x02\x00\x02\x00'\
b'\x01\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x7e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00'\
b'\x01\x00\x02\x80\x02\x40\x04\x40\x08\x20\x08\x10\x10\x10\x20\x08'\
b'\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfc\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x06\x00\x06\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x30\xe0\x70\x70\x70\x70'\
b'\x00\x70\x00\x70\x07\xf0\x18\x70\x30\x70\x70\x70\x70\x70\x70\x70'\
b'\x38\xf0\x1f\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x10\x00\x00\x00\x00\x00\xf8\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x39\xf0\x3e\x38\x38\x1c\x38\x1c\x38\x0e'\
b'\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x1c\x38\x1c\x34\x38'\
b'\x23\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\xc0\x1c\x70\x38\x70\x38\x70\x70\x00\x70\x00'\
b'\x70\x00\x70\x00\x70\x00\x70\x00\x38\x00\x3c\x00\x1e\x10\x07\xe0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'\
b'\x00\x00\x00\x00\x00\x7c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c'\
b'\x00\x1c\x07\xdc\x1c\x3c\x38\x1c\x38\x1c\x70\x1c\x70\x1c\x70\x1c'\
b'\x70\x1c\x70\x1c\x70\x1c\x38\x1c\x38\x1c\x1c\x7c\x0f\x9f\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\xc0\x1c\x70\x38\x30\x30\x38\x70\x38\x7f\xf8\x70\x00\x70\x00'\
b'\x70\x00\x78\x00\x38\x00\x3c\x00\x1e\x10\x07\xe0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x0f\x00\x1f\x80\x3b\x80\x38\x00\x38\x00\x38\x00\x38\x00\x7f\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\xfc\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xfe\x1c\x60'\
b'\x38\x30\x38\x38\x38\x38\x38\x38\x18\x30\x0c\x70\x03\xc0\x1c\x00'\
b'\x30\x00\x30\x00\x3f\xf0\x1f\xf8\x18\x1c\x30\x0c\x70\x0c\x70\x0c'\
b'\x70\x18\x3c\x30\x0f\xc0\x10\x00\x00\x00\x00\x00\xf8\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x39\xf0\x3e\x38\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\xfe\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00\x78'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x7e\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00'\
b'\x78\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x30'\
b'\x30\x60\x40\x80\x0f\x00\x00\x00\x00\x00\xf8\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x7c\x38\x30\x38\x20\x38\x40'\
b'\x38\x80\x39\x80\x3f\x80\x39\xc0\x38\xe0\x38\xe0\x38\x70\x38\x70'\
b'\x38\x38\xfc\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x07\x00\x00\x00\xf8\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9'\
b'\xe1\xe0\x3e\x76\x70\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\xfc\x7c\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf9\xf0\x3e\x38\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\xfe\x7f\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\xe0\x0c\x30\x18\x1c\x38\x1c\x70\x0e\x70\x0e\x70\x0e\x70\x0e'\
b'\x70\x0e\x70\x0e\x38\x1c\x38\x18\x1c\x30\x07\xe0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9\xf0'\
b'\x3e\x38\x38\x1c\x38\x1c\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x0e'\
b'\x38\x0e\x38\x1c\x38\x1c\x3c\x38\x3b\xe0\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\xfe\x00\x10\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xc4\x1c\x2c'\
b'\x38\x1c\x38\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c'\
b'\x38\x1c\x38\x1c\x1c\x7c\x0f\x9c\x00\x1c\x00\x1c\x00\x1c\x00\x1c'\
b'\x00\x1c\x00\x1c\x00\x7f\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9\xc0\x3b\xc0\x3c\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x30\xc0\x60\x40\x60\x40'\
b'\x70\x00\x78\x00\x3f\x00\x07\xc0\x01\xe0\x00\x60\x40\x60\x60\x60'\
b'\x70\xc0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x08\x00\x18\x00\x38\x00\x7f\x80\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x1c\x00'\
b'\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x38\x1c\x78\x3c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x1c\x7c\x0f\x9f'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfe\x7c\x38\x10\x38\x10\x18\x20\x1c\x20\x1c\x20\x0e\x40'\
b'\x0e\x40\x0e\x40\x07\x80\x07\x80\x03\x00\x03\x00\x03\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x7e\x78\x38\x38\x10\x38'\
b'\x38\x20\x38\x5c\x20\x18\x5c\x20\x1c\x4c\x40\x1c\x8e\x40\x0c\x8e'\
b'\x40\x0e\x86\x80\x0f\x07\x80\x07\x07\x80\x07\x03\x00\x06\x03\x00'\
b'\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x78\x38\x30'\
b'\x38\x20\x1c\x40\x0e\x80\x0e\x80\x07\x00\x07\x80\x0b\x80\x09\xc0'\
b'\x10\xe0\x10\xe0\x20\x70\xf1\xfc\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x7c\x78\x10\x38\x10'\
b'\x38\x20\x1c\x20\x1c\x40\x1c\x40\x0e\x40\x0e\x80\x06\x80\x07\x80'\
b'\x07\x00\x03\x00\x02\x00\x02\x00\x02\x00\x04\x00\x04\x00\x0c\x00'\
b'\x08\x00\x18\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xe0\x60\xe0\x41\xc0\x41\x80'\
b'\x03\x80\x07\x00\x07\x00\x0e\x00\x0c\x00\x1c\x00\x18\x20\x38\x20'\
b'\x70\x60\x7f\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x07\x00\x0c\x00\x18\x00\x18\x00\x18\x00\x18\x00'\
b'\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x30\x00\xc0\x00'\
b'\x20\x00\x10\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00'\
b'\x18\x00\x18\x00\x18\x00\x0c\x00\x07\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x70\x00\x18\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00'\
b'\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x06\x00\x01\x80\x02\x00\x04\x00'\
b'\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00'\
b'\x0c\x00\x18\x00\x70\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x1c\x04\x22\x04\x41\x04\x40\x88\x40\x70'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_index =\
b'\x00\x00\x3e\x00\x5e\x00\x7e\x00\xbc\x00\xfa\x00\x38\x01\x94\x01'\
b'\xf0\x01\x10\x02\x4e\x02\x8c\x02\xca\x02\x08\x03\x28\x03\x66\x03'\
b'\x86\x03\xc4\x03\x02\x04\x40\x04\x7e\x04\xbc\x04\xfa\x04\x38\x05'\
b'\x76\x05\xb4\x05\xf2\x05\x30\x06\x50\x06\x70\x06\xae\x06\xec\x06'\
b'\x2a\x07\x68\x07\xe2\x07\x3e\x08\x9a\x08\xf6\x08\x52\x09\xae\x09'\
b'\xec\x09\x48\x0a\xa4\x0a\xe2\x0a\x20\x0b\x7c\x0b\xba\x0b\x34\x0c'\
b'\x90\x0c\xec\x0c\x48\x0d\xa4\x0d\x00\x0e\x3e\x0e\x9a\x0e\xf6\x0e'\
b'\x52\x0f\xcc\x0f\x28\x10\x84\x10\xe0\x10\x1e\x11\x5c\x11\x9a\x11'\
b'\xd8\x11\x16\x12\x54\x12\x92\x12\xd0\x12\x0e\x13\x4c\x13\x8a\x13'\
b'\xc8\x13\x06\x14\x44\x14\x64\x14\x84\x14\xc2\x14\xe2\x14\x3e\x15'\
b'\x7c\x15\xba\x15\xf8\x15\x36\x16\x74\x16\xb2\x16\xf0\x16\x2e\x17'\
b'\x6c\x17\xc8\x17\x06\x18\x44\x18\x82\x18\xc0\x18\xfe\x18\x3c\x19'\
b'\x7a\x19'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 32 + 1) if oc >= 32 and oc <= 126 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 30
return _mvfont[doff + 2:next_offs], 30, width
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/exons2map.py -
======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/exons2map.py --help
Type::
python gpipe/exons2map.py --help
for command line help.
Documentation
-------------
Code
----
'''
import sys
import string
import getopt
import CGAT.Experiment as E
import CGAT.Exons as Exons
import alignlib_lite
USAGE = """python %s [OPTIONS] < psl > predictions
Convert exon list to a map of prediction to genome.
Note: This file takes in forward strand coordinates, but
returns forward/backward coordinates.
Version: $Id: gpipe/exons2map.py 1799 2008-03-28 11:44:19Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-c, --contigs-tsv-file= filename with contig lengths
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "contigs=", "version"]
param_short_options = "v:hc:"
param_trans = None
param_filename_contigs = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-c", "--contigs-tsv-file"):
param_filename_contigs = a
print E.GetHeader()
print E.GetParams()
last_exon = Exons.Exon()
contig_sizes = {}
if param_filename_contigs:
infile = open(param_filename_contigs, "r")
for line in infile:
if line[0] == "#":
continue
sbjct_token, size = line[:-1].split("\t")[:2]
contig_sizes[sbjct_token] = int(size)
map_prediction2genome = alignlib_lite.makeAlignmentSet()
nexons, npairs = 0, 0
for line in sys.stdin:
if line[0] == "#":
continue
this_exon = Exons.Exon()
this_exon.Read(line)
if this_exon.mSbjctStrand == "-":
this_exon.InvertGenomicCoordinates(
contig_sizes[this_exon.mSbjctToken])
nexons += 1
if last_exon.mQueryToken != this_exon.mQueryToken:
if last_exon.mQueryToken:
f = alignlib_lite.AlignmentFormatEmissions(
map_prediction2genome)
print string.join(map(str, (last_exon.mQueryToken,
last_exon.mSbjctToken,
last_exon.mSbjctStrand,
f)), "\t")
npairs += 1
map_prediction2genome.clear()
alignlib_lite.addDiagonal2Alignment(map_prediction2genome,
this_exon.mPeptideFrom + 1,
this_exon.mPeptideTo + 1,
this_exon.mGenomeFrom - this_exon.mPeptideFrom)
last_exon = this_exon
f = alignlib_lite.AlignmentFormatEmissions(map_prediction2genome)
print string.join(map(str, (last_exon.mQueryToken,
last_exon.mSbjctToken,
last_exon.mSbjctStrand,
f)), "\t")
npairs += 1
print "# nexons=%i, npairs=%i" % (nexons, npairs)
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<filename>plotly_visualization/vis.py
# python version 3.5x
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
G_FILENAME_ARR=[]
class VisFunctions :
def vistypeDetection(self, _htmlFileName, _d,_m, _vis, _dimIdxArr, _meaIdxArr, _arrColumn, _arrData, _pdDataset):
self.funcName = "_"+str(_d)+"d"+str(_m)+"m_"+str(_vis)
self.case = getattr(self, "callVisFunctions", lambda:"default")
#print(self.funcName)
return self.case( _htmlFileName, self.funcName, _d,_m, _dimIdxArr, _meaIdxArr, _vis, _arrColumn, _arrData, _pdDataset)
def callVisFunctions(self, _htmlFileName, _funcName, _d,_m, _dimIdxArr, _meaIdxArr, _vis, _arrColumn, _arrData, _pdDataset) :
if _d == 0 : #d==0, m>=1 ##only for pandas format #print(G_FILENAME_ARR, _d,_m, _dimIdxArr, _meaIdxArr, _vis)
tempArrForFuncCall = []
tempArrForFuncCall2 = []
for j in range(len(_meaIdxArr)):
tempArrForFuncCall.append(_arrColumn[_meaIdxArr[j]])
tempArrForFuncCall2.append(_arrData[_meaIdxArr[j]])
t = getattr(self, _funcName, lambda:"default")
if _m==2 :
if _vis=="scatter" :
t(_htmlFileName, tempArrForFuncCall2[0], tempArrForFuncCall2[1])
else :
t(_htmlFileName, _pdDataset, tempArrForFuncCall[0], tempArrForFuncCall[1])
elif _m==3 :
t(_htmlFileName, _pdDataset, tempArrForFuncCall[0], tempArrForFuncCall[1], tempArrForFuncCall[2])
elif _m == 0 : #d=>1, m==0 ##only for pandas format ##disabled for this version #print(G_FILENAME_ARR, _d,_m, _dimIdxArr, _meaIdxArr, _vis)
for i in range(len(_dimIdxArr)):
print(_dimIdxArr[i])
elif _d==1 : #d==1 m>=1
for i in range(len(_dimIdxArr)):
tempArrForFuncCall = []
#print(G_FILENAME_ARR, _d,_m, _dimIdxArr, _meaIdxArr, _vis)
tempArrForFuncCall.append(_arrData[_dimIdxArr[i]])
for j in range(len(_meaIdxArr)):
tempArrForFuncCall.append(_arrColumn[_meaIdxArr[j]])
tempArrForFuncCall.append(_arrData[_meaIdxArr[j]])
t = getattr(self, _funcName, lambda:"default")
if _m==1 :
t(_htmlFileName, tempArrForFuncCall[0], tempArrForFuncCall[1], tempArrForFuncCall[2])
elif _m==2 :
t(_htmlFileName, tempArrForFuncCall[0], tempArrForFuncCall[1], tempArrForFuncCall[2], tempArrForFuncCall[3], tempArrForFuncCall[4])
elif _m==3 :
t(_htmlFileName, tempArrForFuncCall[0], tempArrForFuncCall[1], tempArrForFuncCall[2], tempArrForFuncCall[3], tempArrForFuncCall[4], tempArrForFuncCall[5], tempArrForFuncCall[6])
else : #d>1 m>=1
print("!!!!!!!!")
def _1d1m_bar(self, _FileName, _dim, _name, _data, _color='indianred', _xaxis=None) :
fig = go.Figure()
fig.add_trace(go.Bar(
x=_dim,
y=_data,
name=_name,
marker_color=_color
))
if _xaxis==None :
fig.update_layout(barmode='group', xaxis_tickangle=-45)
else :
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis=_xaxis)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][bar_1d1m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _1d2m_bar(self, _FileName, _dim, _name1, _data1, _name2, _data2, _type='group', _color=['indianred', 'lightsalmon'], _xaxis=None) :
fig = go.Figure()
fig.add_trace(go.Bar(
x=_dim,
y=_data1,
name=_name1,
marker_color=_color[0]
))
fig.add_trace(go.Bar(
x=_dim,
y=_data2,
name=_name2,
marker_color=_color[1]
))
if _xaxis==None :
fig.update_layout(barmode=_type, xaxis_tickangle=-45)
else :
fig.update_layout(barmode=_type, xaxis_tickangle=-45, xaxis=_xaxis)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][bar_1d2m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _1d3m_bar_line(self, _FileName, _dim, _name1, _data1, _name2,_data2, _name3, _data3, _lineMode='lines+markers', _type='group', _color=['mediumturquoise','indianred', 'lightsalmon'], _xaxis=None) :
fig = go.Figure()
fig.add_trace(go.Scatter(
x=_dim,
y=_data1,
name=_name1,
marker_color=_color[0],
mode=_lineMode
))
fig.add_trace(go.Bar(
x=_dim,
y=_data2,
name=_name2,
marker_color=_color[1]
))
fig.add_trace(go.Bar(
x=_dim,
y=_data3,
name=_name3,
marker_color=_color[2]
))
if _xaxis==None :
fig.update_layout(barmode=_type, xaxis_tickangle=-45)
else :
fig.update_layout(barmode=_type, xaxis_tickangle=-45, xaxis=_xaxis)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][bar_line_1d3m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _1d1m_pie(self, _FileName, _dim, _name1, _data) :
_color=['rgb(141,211,199)', 'rgb(255,255,179)', 'rgb(190,186,218)', 'rgb(251,128,114)', 'rgb(128,177,211)', 'rgb(253,180,98)', 'rgb(179,222,105)', 'rgb(252,205,229)', 'rgb(217,217,217)', 'rgb(188,128,189)', 'rgb(204,235,197)', 'rgb(255,237,111)']
fig = go.Figure()
fig.add_trace(go.Pie(
labels=_dim,
values=_data,
text = _data,
textposition='auto'
))
fig.update_traces(hoverinfo='label+percent+name', textinfo='none')
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][pie_1d1m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _0d2m_scatter(self, _FileName, _data1, _data2) :
fig = go.Figure(data=go.Scatter(x=_data1, y=_data2, mode='markers'))
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][scatter_0d2m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _0d2m_pandas_scatter(self, _FileName, _pandasDataset, _xaxis, _yaxis) :
fig = px.scatter(_pandasDataset, x=_xaxis, y=_yaxis)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][scatter_0d2m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _2d0m_pandas_scatter(self, _FileName, _pandasDataset, _xaxis, _yaxis) :
fig = px.scatter(_pandasDataset, x=_xaxis, y=_yaxis)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][scatter_2d0m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _2d1m_pandas_scatter(self, _FileName, _pandasDataset, _xaxis, _yaxis, _coloraxis) :
fig = px.scatter(_pandasDataset, x=_xaxis, y=_yaxis, color=_coloraxis, marginal_y="histogram", marginal_x="histogram")
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][scatter_2d1m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _all_pandas_scatter(self, _FileName, _pandasDataset) :
fig = px.scatter_matrix(_pandasDataset)
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][scatter_all]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _0d2m_pandas_heatmap(self, _FileName, _pandasDataset, _xaxis, _yaxis) :
fig = px.density_heatmap(_pandasDataset, x=_xaxis, y=_yaxis, marginal_y="histogram", marginal_x="histogram")
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][heatmap_0d2m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
def _2d0m_pandas_heatmap(self, _FileName, _pandasDataset, _xaxis, _yaxis) :
fig = px.density_heatmap(_pandasDataset, x=_xaxis, y=_yaxis, marginal_y="histogram", marginal_x="histogram")
flag=True
try:
fig.write_html(_FileName+".html", auto_open=False)
fig.write_image(_FileName+".png")
except OSError:
flag=False
print_str = "[2][heatmap_2d0m]["+("success" if flag else "fail")+"]["+_FileName+".html]"
print(print_str)
#fig.show()
|
<filename>Marquee/python/nyan_cat.py
#!/usr/bin/env python
"""A demo client for Open Pixel Control
http://github.com/zestyping/openpixelcontrol
Every few seconds, a sparkly rainbow washes across the LEDS.
To run:
First start the gl simulator using, for example, the included "wall" layout
make
bin/gl_server layouts/wall.json
Then run this script in another shell to send colors to the simulator
python_clients/nyan_cat.py --layout layouts/wall.json
"""
from __future__ import division
import time
import sys
import optparse
import random
try:
import json
except ImportError:
import simplejson as json
import opc
import color_utils
#-------------------------------------------------------------------------------
# command line
parser = optparse.OptionParser()
parser.add_option('-l', '--layout', dest='layout',
action='store', type='string',
help='layout file')
parser.add_option('-s', '--server', dest='server', default='127.0.0.1:7890',
action='store', type='string',
help='ip and port of server')
parser.add_option('-f', '--fps', dest='fps', default=20,
action='store', type='int',
help='frames per second')
options, args = parser.parse_args()
if not options.layout:
parser.print_help()
print
print 'ERROR: you must specify a layout file using --layout'
print
sys.exit(1)
#-------------------------------------------------------------------------------
# parse layout file
print
print ' parsing layout file'
print
coordinates = []
for item in json.load(open(options.layout)):
if 'point' in item:
coordinates.append(tuple(item['point']))
#-------------------------------------------------------------------------------
# connect to server
client = opc.Client(options.server)
if client.can_connect():
print ' connected to %s' % options.server
else:
# can't connect, but keep running in case the server appears later
print ' WARNING: could not connect to %s' % options.server
print
#-------------------------------------------------------------------------------
# color function
def pixel_color(t, coord, ii, n_pixels, random_values):
"""Compute the color of a given pixel.
t: time in seconds since the program started.
ii: which pixel this is, starting at 0
coord: the (x, y, z) position of the pixel as a tuple
n_pixels: the total number of pixels
random_values: a list containing a constant random value for each pixel
Returns an (r, g, b) tuple in the range 0-255
"""
# make moving stripes for x, y, and z
x, y, z = coord
y += color_utils.cos(x + 0.2*z, offset=0, period=1, minn=0, maxx=0.6)
z += color_utils.cos(x, offset=0, period=1, minn=0, maxx=0.3)
x += color_utils.cos(y + z, offset=0, period=1.5, minn=0, maxx=0.2)
# rotate
x, y, z = y, z, x
# shift some of the pixels to a new xyz location
if ii % 7 == 0:
x += ((ii*123)%5) / n_pixels * 32.12
y += ((ii*137)%5) / n_pixels * 22.23
z += ((ii*147)%7) / n_pixels * 44.34
# make x, y, z -> r, g, b sine waves
r = color_utils.cos(x, offset=t / 4, period=2, minn=0, maxx=1)
g = color_utils.cos(y, offset=t / 4, period=2, minn=0, maxx=1)
b = color_utils.cos(z, offset=t / 4, period=2, minn=0, maxx=1)
r, g, b = color_utils.contrast((r, g, b), 0.5, 1.5)
# a moving wave across the pixels, usually dark.
# lines up with the wave of twinkles
fade = color_utils.cos(t - ii/n_pixels, offset=0, period=7, minn=0, maxx=1) ** 20
r *= fade
g *= fade
b *= fade
# # stretched vertical smears
# v = color_utils.cos(ii / n_pixels, offset=t*0.1, period = 0.07, minn=0, maxx=1) ** 5 * 0.3
# r += v
# g += v
# b += v
# twinkle occasional LEDs
twinkle_speed = 0.07
twinkle_density = 0.1
twinkle = (random_values[ii]*7 + time.time()*twinkle_speed) % 1
twinkle = abs(twinkle*2 - 1)
twinkle = color_utils.remap(twinkle, 0, 1, -1/twinkle_density, 1.1)
twinkle = color_utils.clamp(twinkle, -0.5, 1.1)
twinkle **= 5
twinkle *= color_utils.cos(t - ii/n_pixels, offset=0, period=7, minn=0, maxx=1) ** 20
twinkle = color_utils.clamp(twinkle, -0.3, 1)
r += twinkle
g += twinkle
b += twinkle
# apply gamma curve
# only do this on live leds, not in the simulator
#r, g, b = color_utils.gamma((r, g, b), 2.2)
return (r*256, g*256, b*256)
#-------------------------------------------------------------------------------
# send pixels
print ' sending pixels forever (control-c to exit)...'
print
n_pixels = len(coordinates)
random_values = [random.random() for ii in range(n_pixels)]
start_time = time.time()
while True:
t = time.time() - start_time
pixels = [pixel_color(t*0.6, coord, ii, n_pixels, random_values) for ii, coord in enumerate(coordinates)]
client.put_pixels(pixels, channel=0)
time.sleep(1 / options.fps)
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import cv2
import glob
import time
import pickle
import numpy as np
from .box import Box
#from .fit import predict
from .connected_componentes import *
from .pre_processing import *
from .commonfunctions import *
import skimage.io as io
from PIL import Image
from wand.image import Image
from .segmenter import Segmenter
from wand.display import display
from pathlib import Path
from imutils import resize as im_resize
from scipy.ndimage import binary_fill_holes
from skimage.morphology import skeletonize, thin
from skimage.filters import threshold_otsu, gaussian, median, threshold_yen
from .staff import calculate_thickness_spacing, remove_staff_lines, coordinator
def Slice(cv_img):
start_time = time.time()
img_buffer=None
imgf=None
imgmat=None
segmented_staves=[]
print("===============================BINARIZATION==============================")
with Image.from_array(cv_img) as im:
img_buffer = np.asarray(bytearray(im.make_blob("JPEG")), dtype=np.uint8)
ret, mat = binarize_image(img_buffer)
with Image(blob=mat) as timg:
imgf = mat
#timg.save(filename="otsu.jpg")
timg.deskew(0.4*im.quantum_range)
#timg.save(filename="otsu2.jpg")
imgf = np.array(timg)
img_buffer = np.asarray(bytearray(timg.make_blob("JPEG")), dtype=np.uint8)
imgmat = cv2.imdecode(img_buffer, cv2.IMREAD_UNCHANGED)
print("==================================SLICE==================================")
imgmat = get_thresholded(imgmat, 245)
segmenter = Segmenter(imgmat)
imgs_with_staff = segmenter.regions_with_staff
show_images([imgs_with_staff[0]])
mypath = Path().absolute()
file_path = str(mypath) + '\\segmenter\\output\\'
zip_path = str(mypath) + '\\data\\melody\\'
delete_path = str(mypath) + '\\segmenter\\output'
absolute_path = Path(file_path)
print("Output of slices: " + file_path)
remove_dir = os.listdir(delete_path)
for item in remove_dir:
if item.endswith(".png"):
os.remove(os.path.join(delete_path, item))
print("==================================CROP===================================")
for i, img in enumerate(imgs_with_staff):
plt.rcParams["figure.figsize"] = (20,15)
plt.gca().set_axis_off()
plt.gca().set_title("")
fig=plt.imshow(imgs_with_staff[i],interpolation='nearest')
output_path = file_path+'slice'+str(i)+'.png'
plt.savefig(output_path,
bbox_inches='tight', pad_inches=0, format='png', dpi=600)
zipped_path = zip_path+'slice'+str(i)+'.png'
plt.savefig(zipped_path,
bbox_inches='tight', pad_inches=0, format='png', dpi=600)
print(" ++Image generated in " + str(time.time() - start_time))
crop(output_path)
segmented_staves.append(Path(output_path))
print("PROCESS COMPLETED in: " + str(time.time() - start_time))
return segmented_staves
if __name__ == '__main__':
Slice(r"C:\Users\aroue\Downloads\Documents\@ML\Sheet Music\goodsheet\pgws.png")
|
<filename>tests/test_cast.py
import os
import tempfile
from clicast.cast import Cast, CastReader
CAST_URL = 'https://raw.githubusercontent.com/maxzheng/clicast/master/test/example.cast'
CAST_FILE = os.path.join(os.path.dirname(__file__), 'example.cast')
class TestCast(object):
def test_from_file(self):
cast = Cast.from_content(CAST_FILE)
assert cast.alert == 'We found a big bad bug. Please try not to step on it!! Icky...\nNo worries. It will be fixed soon! :)'
assert cast.alert_exit
assert [m.message for m in cast.messages] == [
'Version 0.1 has been released! If you upgrade, you will get:\n'
'1) Cool feature 1\n'
'2) Cool feature 2\n'
'So what are you waiting for? :)',
'Version 0.2 has been released! Upgrade today to get cool features.',
'There is a small bug over there, so watch out!',
'[-f\\b] A bug that affects the -f option. (applies only if `clicast.filters.match_cli_args` filter is used)'
]
def test_save(self):
from_content = open(CAST_FILE).read()
cast = Cast.from_content(CAST_FILE)
to_cast_file = os.path.join(tempfile.gettempdir(), 'clicast.to_file_test.cast')
try:
cast.save(to_cast_file)
to_content = open(to_cast_file).read()
assert from_content == to_content
finally:
if os.path.exists(to_cast_file):
os.unlink(to_cast_file)
def test_from_url(self):
cast = Cast.from_content(CAST_URL)
assert cast.messages
from remoteconfig import RemoteConfig
c = RemoteConfig(CAST_URL)
assert str(c)
def test_add_msg(self):
cast = Cast()
cast.add_msg('Message 1')
cast.add_msg('Message Alert', alert=True)
cast.add_msg('Message 2')
assert cast.alert == 'Message Alert'
assert not cast.alert_exit
assert [(m.key, m.message) for m in cast.messages] == [
('1', 'Message 1'),
('2', 'Message 2')
]
cast.add_msg('Message Alert Exit', alert_exit=True)
assert cast.alert == 'Message Alert Exit'
assert cast.alert_exit
def test_del_msg(self):
cast = Cast()
cast.add_msg('Message 1')
cast.add_msg('Message 2')
cast.add_msg('Message Alert', alert_exit=True)
cast.del_msg()
assert cast.alert == 'Message Alert'
assert cast.alert_exit
assert [(m.key, m.message) for m in cast.messages] == [('2', 'Message 2')]
del_count = cast.del_msg(100)
assert del_count == 1
cast.add_msg('Message 3')
cast.add_msg('Message 4')
cast.add_msg('Message 5')
cast.del_msg(2)
cast.del_msg(alert=True)
assert not cast.alert
assert not cast.alert_exit
assert [(m.key, m.message) for m in cast.messages] == [('5', 'Message 5')]
cast_file = os.path.join(tempfile.gettempdir(), 'clicast.to_file_test.cast')
try:
cast.save(cast_file)
cast = Cast.from_content(cast_file)
cast.del_msg(100)
cast.save(cast_file)
cast = Cast.from_content(cast_file)
cast.add_msg('Message 6')
assert str(cast) == '[Messages]\n6: Message 6'
finally:
if os.path.exists(cast_file):
os.unlink(cast_file)
def test_set_msg_limit(self):
cast = Cast()
cast.set_msg_limit(2)
for x in range(10):
cast.add_msg('Message %d' % x)
assert str(cast) == '[Messages]\n9: Message 8\n10: Message 9\n_limit: 2'
def test_filter(self):
def msg_filter(msg, alert=False):
if 'small bug' in msg:
return msg
cast = Cast.from_content(CAST_FILE, msg_filter)
assert str(cast) == '[Messages]\n3: There is a small bug over there, so watch out!\n_limit: 5'
class TestCastReader(object):
def setup_class(cls):
CastReader.READ_MSG_FILE = '/tmp/clicast.test.read'
if os.path.exists(CastReader.READ_MSG_FILE):
os.unlink(CastReader.READ_MSG_FILE)
def test_new_messages(self):
cast = Cast.from_content(CAST_FILE)
reader = CastReader(cast)
assert reader.new_messages() == [
'We found a big bad bug. Please try not to step on it!! Icky...\nNo worries. It will be fixed soon! :)',
'Version 0.1 has been released! If you upgrade, you will get:\n'
'1) Cool feature 1\n'
'2) Cool feature 2\n'
'So what are you waiting for? :)',
'Version 0.2 has been released! Upgrade today to get cool features.',
'There is a small bug over there, so watch out!',
'[-f\\b] A bug that affects the -f option. (applies only if `clicast.filters.match_cli_args` filter is used)'
]
|
<filename>source/ETB/util/MCU_AVR.py
#####
# @brief AVR MCU utilities
#
# Module containing AVR MCU utility functions.
#
# @file /etb/util/MCU_AVR.py
# @author $Author: <NAME> $
# @version $Revision: 1.0 $
# @date $Date: 2021/04/08 $
#
# @see https://docs.python.org/3/library/subprocess.html#module-subprocess
#
# @example MCU_flash("path_to/binary.hex") # Flash the given binary on the MCU
# @example MCU_erase() # Erase the MCU
# @example MCU_reset() # Reset the MCU (via RST GPIO pin)
# @example MCU_set_clksrc(MCU_CLK_EXT, False, False) # Select the clock source
#####
##### LIBRARIES #####
# Subprocesss to call AVRDUDE from Python
import subprocess
# File/directory functionality
import os
# time (for sleep method)
import time
# GPIO functionality
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
##### GLOBAL VARIABLES #####
# MCU target specifics
MCU_TOOL = 'avrdude'
MCU_DEFAULT = 'atmega1284p'
PORT_DEFAULT = '/dev/ttyACM0'
# CLK selection
MCU_CLK_INT = 0
MCU_CLK_EXT = 1
### Fuse bytes ###
MCU_FUSE_TAG = {
'extended': 'efuse',
'high': 'hfuse',
'low': 'lfuse'
}
# Fuse extended byte
MCU_FE_BODLEVEL_OFFSET = 0
MCU_FE_BODLEVEL_MASK = 0xF7
# Fuse high byte
MCU_FH_OCDEN_OFFSET = 7
MCU_FH_JTAGEN_OFFSET = 6
MCU_FH_SPIEN_OFFSET = 5
MCU_FH_WDTON_OFFSET = 4
MCU_FH_EESAVE_OFFSET = 3
MCU_FH_BOOTSZ_OFFSET = 1
MCU_FH_BOOTSZ_MASK = 0xF9
MCU_FH_BOOTRST_OFFSET = 0
# Fuse low byte
MCU_FL_CKDIV8_OFFSET = 7
MCU_FL_CKOUT_OFFSET = 6
MCU_FL_SUT_OFFSET = 4
MCU_FL_SUT_MASK = 0xCF
MCU_FL_CKSEL_OFFSET = 0
MCU_FL_CKSEL_MASK = 0xF0
###
# Flash a given binary to the MCU.
#
# @param[in] binary Path to the binary (.hex).
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def MCU_flash(binary, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Check if the given binary exists
if not os.path.isfile(binary):
return False
# Check if the serial interface exists
if not os.path.exists(port):
return False
# Prepare program string
cmd = "%s -p %s -c avrispv2 -P %s -v -U flash:w:%s" % (MCU_TOOL, mcu, port, binary)
# Try to flash the binary
ret = subprocess.run(cmd, shell=True, capture_output=True)
# Check if flashing was successful
if ret.returncode==0:
return True
else:
return False
###
# Read the specified fuses of the MCU.
#
# @param[in] fuse Fuse byte to be read.
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return Fuse byte value in case of success; otherwise False.
def _MCU_get_fuse(fuse, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Check if the serial interface exists
if not os.path.exists(port):
return False
# Check if a valid fuse byte was chosen
if fuse not in MCU_FUSE_TAG:
raise ValueError('Valid fuse bytes are: \'extended\', \'high\', and \'low\'')
# Prepare program string
cmd = "%s -p %s -c avrispv2 -P %s -v -U %s:r:-:d" % (MCU_TOOL, mcu, port, MCU_FUSE_TAG[fuse])
# Try to program the fuses
ret = subprocess.run(cmd, shell=True, capture_output=True)
# Check if programming was successful
if ret.returncode==0:
return int(ret.stdout.decode("utf-8"))
else:
return False
###
# Read the extended fuses of the MCU.
#
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return Fuse byte value in case of success; otherwise False.
def MCU_get_efuse(port=PORT_DEFAULT, mcu=MCU_DEFAULT):
return _MCU_get_fuse('extended', port, mcu)
###
# Read the high fuses of the MCU.
#
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return Fuse byte value in case of success; otherwise False.
def MCU_get_hfuse(port=PORT_DEFAULT, mcu=MCU_DEFAULT):
return _MCU_get_fuse('high', port, mcu)
###
# Read the low fuses of the MCU.
#
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return Fuse byte value in case of success; otherwise False.
def MCU_get_lfuse(port=PORT_DEFAULT, mcu=MCU_DEFAULT):
return _MCU_get_fuse('low', port, mcu)
###
# Program the specified fuses of the MCU.
#
# @param[in] fuse Fuse byte to be programmed.
# @param[in] byte Fuse byte value.
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def _MCU_set_fuse(fuse, byte, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Check if the serial interface exists
if not os.path.exists(port):
return False
# Check if a valid fuse byte was chosen
if fuse not in MCU_FUSE_TAG:
raise ValueError('Valid fuse bytes are: \'extended\', \'high\', and \'low\'')
# Prepare program string
cmd = "%s -p %s -c avrispv2 -P %s -v -U %s:w:0x%02X:m" % (MCU_TOOL, mcu, port, MCU_FUSE_TAG[fuse], byte)
# Try to program the fuses
ret = subprocess.run(cmd, shell=True, capture_output=True)
# Check if programming was successful
if ret.returncode==0:
return True
else:
return False
###
# Program the extended fuses of the MCU.
#
# @param[in] byte Fuse byte value.
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def MCU_set_efuse(byte, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
return _mcu_set_fuse('extended', byte, port, mcu)
###
# Program the high fuses of the MCU.
#
# @param[in] byte Fuse byte value.
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def MCU_set_hfuse(byte, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
return _mcu_set_fuse('high', byte, port, mcu)
###
# Program the low fuses of the MCU.
#
# @param[in] byte Fuse byte value.
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def MCU_set_lfuse(byte, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Try to program the fuses
return _mcu_set_fuse('low', byte, port, mcu)
###
# Program the CLK source of the MCU.
#
# @param[in] src Clock source (INT/EXT).
# @param[in] div8_en Enable clock division by 8.
# @param[in] ckout_en Enable clock output (CKOUT).
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def MCU_set_clksrc(src, div8_en=False, ckout_en=False, port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Check if the serial interface exists
if not os.path.exists(port):
return False
# Check if clock source is valid
if (src<MCU_CLK_INT) or (src>MCU_CLK_EXT):
raise ValueError('Valid clock sources are: MCU_CLK_INT (0) and MCU_CLK_EXT (1)')
# Start with 0x00
byte = 0x00
# Internal oscillator
if src==MCU_CLK_INT:
# Set CKSEL to 0010
byte |= (0b0010<<MCU_FL_CKSEL_OFFSET)
# Set SUT to 10
byte |= (0b10<<MCU_FL_SUT_OFFSET)
# External oscillator
else:
# Set CKSEL to 1111
byte |= (0b1111<<MCU_FL_CKSEL_OFFSET)
# Set SUT to 11
byte |= (0b11<<MCU_FL_SUT_OFFSET)
# Set the CKDIV8 bit
if div8_en is False:
byte |= 0x80
# Set the CKOUT bit
if ckout_en is False:
byte |= 0x40
# Write the low fuses
return MCU_set_lfuse(byte, port, mcu)
###
# Erase the MCU.
#
# @param[in] port Serial port to be used.
# @param[in] mcu MCU model for AVRDUDE.
# @return True in case of success; otherwise False.
def mcu_erase(port=PORT_DEFAULT, mcu=MCU_DEFAULT):
# Check if the serial interface exists
if not os.path.exists(port):
return False
# Prepare erase string
cmd = "%s -p %s -c avrispv2 -P %s -v -e" % (MCU_TOOL, mcu, port)
# Try to erase the MCU
ret = subprocess.run(cmd, shell=True, capture_output=True)
# Check if erasing was successful
if ret.returncode==0:
return True
else:
return False
###
# Reset the MCU (via GPIO).
#
# @param[in] rst_pin GPIO pin for RST signal (BCM; default: 23).
def mcu_reset(rst_pin=23):
# Set RST to output
GPIO.setup(rst_pin, GPIO.OUT)
# Pull the RST line down
GPIO.output(rst_pin, 0)
# Wait for 500ms
time.sleep(0.5)
# Set RST line back to "1"
GPIO.output(rst_pin, 1)
# Release the RST line
GPIO.setup(rst_pin, GPIO.IN)
|
<reponame>Fanto94/aiopika
# Copyright
# (c) 2009-2019, <NAME>, <NAME>, Pivotal Software, Inc and others.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Pika project nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The credentials classes are used to encapsulate all authentication
information for the :class:`~pika.connection.ConnectionParameters` class.
The :class:`~pika.credentials.PlainCredentials` class returns the properly
formatted username and password to the :class:`~pika.connection.Connection`.
To authenticate with Pika, create a :class:`~pika.credentials.PlainCredentials`
object passing in the username and password and pass it as the credentials
argument value to the :class:`~pika.connection.ConnectionParameters` object.
If you are using :class:`~pika.connection.URLParameters` you do not need a
credentials object, one will automatically be created for you.
If you are looking to implement SSL certificate style authentication, you would
extend the :class:`~pika.credentials.ExternalCredentials` class implementing
the required behavior.
"""
import logging
__all__ = ['PlainCredentials', 'ExternalCredentials', 'VALID_TYPES']
LOGGER = logging.getLogger(__name__)
def _as_bytes(value):
if not isinstance(value, bytes):
return value.encode('UTF-8')
return value
class PlainCredentials(object):
"""A credentials object for the default authentication methodology with
RabbitMQ.
If you do not pass in credentials to the ConnectionParameters object, it
will create credentials for 'guest' with the password of '<PASSWORD>'.
If you pass True to erase_on_connect the credentials will not be stored
in memory after the Connection attempt has been made.
:param str username: The username to authenticate with
:param str password: The <PASSWORD>
:param bool erase_on_connect: erase credentials on connect.
"""
TYPE = 'PLAIN'
def __init__(self, username, password, erase_on_connect=False):
"""Create a new instance of PlainCredentials
:param str username: The username to authenticate with
:param str password: The password to authenticate with
:param bool erase_on_connect: erase credentials on connect.
"""
self.username = username
self.password = password
self.erase_on_connect = erase_on_connect
def __eq__(self, other):
if isinstance(other, PlainCredentials):
return (self.username == other.username and
self.password == other.password and
self.erase_on_connect == other.erase_on_connect)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def response_for(self, start):
"""Validate that this type of authentication is supported
:param spec.Connection.Start start: Connection.Start method
:rtype: tuple(str|None, str|None)
"""
if _as_bytes(PlainCredentials.TYPE) not in\
_as_bytes(start.mechanisms).split():
return None, None
return (
PlainCredentials.TYPE,
b'\0' + _as_bytes(self.username) + b'\0' + _as_bytes(self.password))
def erase_credentials(self):
"""Called by Connection when it no longer needs the credentials"""
if self.erase_on_connect:
LOGGER.info("Erasing stored credential values")
self.username = None
self.password = None
class ExternalCredentials(object):
"""The ExternalCredentials class allows the connection to use EXTERNAL
authentication, generally with a client SSL certificate.
"""
TYPE = 'EXTERNAL'
def __init__(self):
"""Create a new instance of ExternalCredentials"""
self.erase_on_connect = False
def __eq__(self, other):
if isinstance(other, ExternalCredentials):
return self.erase_on_connect == other.erase_on_connect
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def response_for(self, start): # pylint: disable=R0201
"""Validate that this type of authentication is supported
:param spec.Connection.Start start: Connection.Start method
:rtype: tuple(str or None, str or None)
"""
if _as_bytes(ExternalCredentials.TYPE) not in\
_as_bytes(start.mechanisms).split():
return None, None
return ExternalCredentials.TYPE, b''
def erase_credentials(self): # pylint: disable=R0201
"""Called by Connection when it no longer needs the credentials"""
LOGGER.debug('Not supported by this Credentials type')
# Append custom credential types to this list for validation support
VALID_TYPES = [PlainCredentials, ExternalCredentials]
|
<filename>data2df.py
"""
This contains functions which take a pandas DataFrame, which must have a column
of HELCATS CME names called 'helcats_name', add data from various sources
corresponding to those CMEs, and return the df with extra data.
"""
from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
import astropy.units as u
from datetime import datetime, timedelta
import popfss_image_processing as ip
#sys.path.insert(0, r'N:\\Documents\\Code\\useful_code')
sys.path.insert(0, r'C:\\Users\\shann\\OneDrive\\Documents\\Research\\Workspace\\Code\\useful_code')
import misc
from data_helcats import HELCATS
from data_heliomas import HelioMAS
from data_corset import CORSET
from data_seeds import SEEDS
from data_cactus import CACTus
from data_yutian_insitu import Yutian_insitu
data_loc = r'C:\\Users\\shann\\OneDrive\\Documents\\Research\\Workspace\\Data'
# Functions to add new columns to the df
def add_col_to_df(df, col_1, col_2, operation, name, abs_col=False):
"""Adds an extra column to df: col_1 operation col_2
e.g. operation = "multiply" new col = col_1 * col_2
:param: df: pandas data frame
:param: col_1: string, first column to use
:param: col_2: string, second column to use
:param: operation: string, 'multiply', 'divide', 'add', 'subtract'
:param: name: string, name of new column to add to df
:param: abs_col: bool, take absolute value of calculated values
"""
new_vals = []
for i in range(len(df)):
try:
if operation == 'add':
result = df[col_1][i] + df[col_2][i]
elif operation == 'subtract':
result = df[col_1][i] - df[col_2][i]
elif operation == 'multiply':
result = df[col_1][i] * df[col_2][i]
elif operation == 'divide':
result = df[col_1][i] / df[col_2][i]
if abs_col == True:
new_vals.append(abs(result))
elif abs_col == False:
new_vals.append(result)
except:
new_vals.append(np.NaN)
# Add new values to df
df[name] = pd.Series(new_vals, index=df.index)
return df
def add_helcats_to_df(df, col, target=None, name=None, astype=None):
"""Finds property h_name from a HELCATS catalogue for CMEs in df.
:param: df: pandas dataframe
:param: h_name: string, helcats name of property to add
:param: cat: string, helcats catalogue to find property h_name from
options: 'hicat', 'higeocat', 'hijoincat', 'kincat', 'linkcat'
:param: name: string, label for new column in dataframe
:param: date: bool True or False, if True format values to datetime objects
"""
hc = HELCATS(data_loc)
col_data = hc.get_col_data(col, list(df.helcats_name.values), target=target)
if name == None:
name = col
df[name] = pd.Series(col_data, index=df.index)
if astype != None:
if astype == datetime:
df[name] = pd.to_datetime(df[name])
else:
df[name] = df[name].astype(astype)
return df
def add_craft_and_time_to_df(df):
"""Adds columns; craft = 'sta' or 'stb'; and time = datetime - appearance
of CME in HI1.
"""
hc = HELCATS(data_loc)
craft_list, time_list = hc.get_cme_details_list(df.helcats_name)
df['craft'] = pd.Series(craft_list, index=df.index)
df['time'] = pd.Series(time_list, index=df.index)
return df
def add_geo_indicator_to_df(df):
"""Adds a column 'geo' which is 1 if the CME was geoeffective, and 0
otherwise.
"""
hc = HELCATS(data_loc)
ind_data = hc.get_geo_indicator(df.helcats_name.values)
df['geo'] = pd.Series(ind_data, index=df.index)
return df
def add_matches_to_df(df):
"""Adds a column 'match' containing the helcats name of the same CME
observed by the other spacecraft, if it was.
"""
hc = HELCATS(data_loc)
match_data = hc.get_matches(df.helcats_name.values)
df['match'] = pd.Series(match_data, index=df.index)
return df
def add_width_to_df(df):
"""Adds an extra column "width" to df which is the angular width of the CME
in degrees.
"""
df = add_helcats_to_df(df, 'PA-N [deg]')
df = add_helcats_to_df(df, 'PA-S [deg]')
df = add_col_to_df(df, 'PA-N [deg]', 'PA-S [deg]', 'subtract', 'width', abs_col=True)
return df
def add_te_track_times_to_df(df):
"""
Adds 'start_time', 'mid_time' and 'end_time' of the CME in HI FOV.
These values are found from the HELCATS time-elongation profiles.
"""
helcats = HELCATS(data_loc)
start, mid, end, mid_el = helcats.get_te_track_times_list(df.helcats_name)
df['start_time'] = pd.Series(start, index=df.index)
df['mid_time'] = pd.Series(mid, index=df.index)
df['end_time'] = pd.Series(end, index=df.index)
df['mid_el'] = pd.Series(mid_el, index=df.index)
return df
def add_corset_to_df(df):
corset = CORSET(misc.get_project_dirs()['data'])
corset = corset.load()
helcats = HELCATS()
window = [timedelta(hours=48), timedelta(hours=-12)]
helcats_names = helcats.match_to_helcats(corset.time, corset.craft,
window=window)
corset['helcats_name'] = pd.Series(helcats_names, index=corset.index)
df = pd.merge(df, corset, on=['helcats_name'])
return df
def add_seeds_to_df(df):
seeds = SEEDS(data_loc)
seeds = seeds.load()
helcats = HELCATS(data_loc)
window = [timedelta(hours=48), timedelta(hours=-12)]
helcats_names = helcats.match_to_helcats(seeds.time, seeds.craft,
window=window)
seeds['helcats_name'] = pd.Series(helcats_names, index=seeds.index)
df = pd.merge(df, seeds, on=['helcats_name'])
return df
def add_cactus_to_df(df):
cactus = CACTus(data_loc)
cactus = cactus.load()
helcats = HELCATS(data_loc)
window = [timedelta(hours=48), timedelta(hours=-12)]
helcats_names = helcats.match_to_helcats(cactus.time, cactus.craft,
window=window)
cactus['helcats_name'] = pd.Series(helcats_names, index=cactus.index)
df = pd.merge(df, cactus, on=['helcats_name'])
return df
def add_yutian_insitu_to_df(df):
y = Yutian_insitu(data_loc)
ydf = y.load()
helcats = HELCATS(data_loc)
window = [timedelta(hours=12), timedelta(hours=12)]
helcats_names = helcats.match_to_helcats(ydf.start_time,
insitu_craft='Wind',
window=window)
ydf['helcats_name'] = pd.Series(helcats_names, index=ydf.index)
df = pd.merge(df, ydf, on=['helcats_name'])
return df
def add_spacecraft_separation_to_df(df):
"""Adds column "sc_sep" which is distance of spacecraft from CME in km.
"""
df = add_helcats_to_df(df, 'SSE Phi [deg]', 'higeocat', 'SSEphi')
scs = []
for i in df.index:
try:
distance = 695700 * 1000
sin_phi = np.sin(df.SSEphi[i])
el = 14
scs_i = (distance * sin_phi) / np.sin(180-el-df.SSEphi[i])
scs.append(scs_i/1000)
except:
scs.append(np.NaN)
# Add spacecraft separation from CME to df
df['sc_sep'] = pd.Series(scs, index=df.index)
return df
def add_image_stats_to_df(df, tag=""):
"""reads in image stats from .csv file created and adds a new column for
each image stat.
"""
df_stats = ip.load_image_stats()
# sort both data frames and make sure the are the same length
if len(df) != len(df_stats):
raise ValueError("Data Frames are not the same length")
df = df.sort_values(by='complexity')
df_stats = df_stats.sort_values(by='complexity')
# remove duplicate columns
df_stats = df_stats.drop(['Unnamed: 0', 'complexity', 'craft',
'helcats_name', 'ranking_place', 'ssw_name',
'time'], axis=1)
merged_df = pd.concat([df, df_stats], axis=1)
return merged_df
def add_heliomas_to_df(df):
df = add_helcats_to_df(df, 'PA-fit [deg]', 'higeocat', name='CPA')
df = add_helcats_to_df(df, 'PA-N [deg]', 'hicat', name='PA-N')
df = add_helcats_to_df(df, 'PA-S [deg]', 'hicat', name='PA-S')
lon_mean = []
lon_std = []
lat_mean = []
lat_std = []
r_mean = []
r_std = []
sw = []
lat_diff = []
lon_diff = []
for i in df.index:
mas = HelioMAS(df.time[i])
if df.craft[i] == 'stb':
CPA = 360 - df['CPA'][i]
PAN = 360 - df['PA-S'][i]
PAS = 360 - df['PA-N'][i]
else:
CPA = df['CPA'][i]
PAN = df['PA-N'][i]
PAS = df['PA-S'][i]
try:
lon_vals = mas.extract('speed', lon=None, lat=CPA*u.deg, r=30*u.R_sun)
lat_vals = mas.extract('speed', lon=df.phi[i]*u.deg, lat=None, r=30*u.R_sun)
r_vals = mas.extract('speed', lon=df.phi[i]*u.deg, lat=CPA*u.deg, r=None)
lat_diff.append(abs(mas.extract('speed', lon=df.phi[i]*u.deg, lat=PAN*u.deg, r=30*u.R_sun) -
mas.extract('speed', lon=df.phi[i]*u.deg, lat=PAS*u.deg, r=30*u.R_sun)).value)
lon_diff.append(abs(mas.extract('speed', lon=df.phi[i]*u.deg, lat=CPA*u.deg, r=15*u.R_sun) -
mas.extract('speed', lon=df.phi[i]*u.deg, lat=CPA*u.deg, r=40*u.R_sun)).value)
lon_mean.append(np.mean(lon_vals.value))
lat_mean.append(np.mean(lat_vals.value))
r_mean.append(np.mean(r_vals.value))
lon_std.append(np.std(lon_vals.value))
lat_std.append(np.std(lat_vals.value))
r_std.append(np.std(r_vals.value))
sw.append(mas.extract(lon=df.phi[i]*u.deg, lat=CPA*u.deg, r=30*u.R_sun))
except:
lon_mean.append(np.NaN)
lat_mean.append(np.NaN)
r_mean.append(np.NaN)
lon_std.append(np.NaN)
lat_std.append(np.NaN)
r_std.append(np.NaN)
sw.append(np.NaN)
lat_diff.append(np.NaN)
lon_diff.append(np.NaN)
df['lon_mean'] = pd.Series(lon_mean, index=df.index)
df['lon_std'] = pd.Series(lon_std, index=df.index)
df['lat_mean'] = pd.Series(lat_mean, index=df.index)
df['lat_std'] = pd.Series(lat_std, index=df.index)
df['r_mean'] = pd.Series(r_mean, index=df.index)
df['r_std'] = pd.Series(r_std, index=df.index)
df['sw'] = pd.Series(sw, index=df.index)
df['lat_diff'] = pd.Series(lat_diff, index=df.index)
df['lon_diff'] = pd.Series(lon_diff, index=df.index)
return df
|
<filename>homework_working/hw7-backprop/code/test_utils.py
"""Computation graph test utilities
Below are functions that can assist in testing the implementation of backward
for individual nodes, as well as the gradient computation in a
ComputationGraphFunction. The approach is to use the secant approximation to
compute the gradient numerically, and this is then compared to the gradient
computed by the node or ComputationGraphFunction. Note that this is really only
verifying that the gradient corresponds to the function value that is being
computed. So this is only useful if you also check that the forward() is
correct in nodes, and that the get_objective() is correct in the
CopmputationGraphFunction.
This computation graph framework was designed and implemented by
<NAME>, <NAME>, and <NAME>.
License: Creative Commons Attribution 4.0 International License
"""
import logging
import graph
import numpy as np
logging.basicConfig(format='%(levelname)s: %(message)s',level=logging.DEBUG)
def relative_error(a,b):
if a==0.0 and b==0.0:
return 0.0
# approach described here http://cs231n.github.io/neural-networks-3/
rel_err = np.abs(a-b) / max(np.abs(a), np.abs(b))
return rel_err
def test_node_backward(node, init_vals, delta=1e-7):
for parent_node in node.get_predecessors():
parent_node.out = init_vals[parent_node.node_name]
out = graph.forward_graph(node)
d_out = np.random.standard_normal(out.shape) # simulate a random derivative w.r.t. out
node.d_out = d_out
node.backward() # sets partials in parent.d_node for each parent
# Numerically estimate partial derivative of J w.r.t. each entry of each parent_node.out,
# Assuming partial of J w.r.t. node output is given by node.d_out.
overall_max_rel_err = -1
for parent in node.get_predecessors():
parent_out = np.copy(parent.out) # save original parent.out
it = np.nditer(parent_out, flags=['multi_index'])
max_rel_err = -1
while not it.finished:
parent.out[it.multi_index] = parent_out[it.multi_index] + delta
out_plus_delta = node.forward()
parent.out[it.multi_index] = parent_out[it.multi_index] - delta
out_minus_delta = node.forward()
parent.out[it.multi_index] = parent_out[it.multi_index] # set it back to how it
local_partial_est = (out_plus_delta - out_minus_delta) / (2.0 * delta)
partial_est = np.sum(d_out * local_partial_est) # this is the chain rule
partial_backward = parent.d_out[it.multi_index] # partial as computed by backward
#pdb.set_trace()
rel_err = relative_error(partial_est, partial_backward)
max_rel_err = max(max_rel_err, rel_err)
it.iternext()
logging.debug("(Node %s) Max rel error for partial deriv w.r.t. %s is %s." % (node.node_name, parent.node_name, max_rel_err))
overall_max_rel_err = max(max_rel_err, overall_max_rel_err)
return overall_max_rel_err
def test_ComputationGraphFunction(graph, input_vals, outcome_vals, parameter_vals, delta=1e-7):
graph.set_parameters(parameter_vals)
_, gradients = graph.get_gradients(input_vals, outcome_vals)
overall_max_rel_err = -1
for param in parameter_vals:
val = parameter_vals[param]
it = np.nditer(val, flags=['multi_index'])
max_rel_err = -1
while not it.finished:
step = np.zeros(val.shape)
step[it.multi_index] = delta
graph.increment_parameters({param:step})
obj_plus_delta = graph.get_objective(input_vals, outcome_vals)
step[it.multi_index] = -2 * delta #so a step of -delta from original value
graph.increment_parameters({param:step})
obj_minus_delta = graph.get_objective(input_vals, outcome_vals)
step[it.multi_index] = delta #bring it back to original value
graph.increment_parameters({param:step})
partial_est = (obj_plus_delta - obj_minus_delta) / (2.0 * delta)
partial_backprop = gradients[param][it.multi_index] # partial from ComputationGraphFunction
rel_err = relative_error(partial_est, partial_backprop)
max_rel_err = max(max_rel_err, rel_err)
it.iternext()
logging.debug("(Parameter %s) Max rel error for partial deriv %s." % (param, max_rel_err))
overall_max_rel_err = max(overall_max_rel_err, max_rel_err)
return overall_max_rel_err
|
"""
TLIO Stochastic Cloning Extended Kalman Filter
Input: IMU data
Measurement: window displacement estimates from networks
Filter states: position, velocity, rotation, IMU biases
"""
import argparse
import datetime
import json
import os
# silence NumbaPerformanceWarning
import warnings
from pprint import pprint
import numpy as np
from numba.core.errors import NumbaPerformanceWarning
from tracker.imu_tracker_runner import ImuTrackerRunner
from utils.argparse_utils import add_bool_arg
from utils.git_version import git_version
from utils.logging import logging
from utils.profile import profile
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# ----------------------- io params -----------------------
io_groups = parser.add_argument_group("io")
io_groups.add_argument(
"--root_dir", type=str, default=None, help="Path to data directory"
)
io_groups.add_argument("--data_list", type=str, default=None)
io_groups.add_argument("--dataset_number", type=int, default=None)
io_groups.add_argument("--model_path", type=str, default=None)
io_groups.add_argument("--model_param_path", type=str, default=None, required=True)
io_groups.add_argument("--out_dir", type=str, default=".")
io_groups.add_argument("--out_filename", type=str, default="not_vio_state.txt")
io_groups.add_argument("--save_as_npy", action="store_true")
io_groups.add_argument("--sim_data_path", type=str, default="imu-sim.txt")
io_groups.add_argument(
"--start_from_ts", type=int, default=None
) # dataloader loading data from timestamp (us)
add_bool_arg(io_groups, "erase_old_log", default=False)
# ----------------------- network params -----------------------
net_groups = parser.add_argument_group("network")
net_groups.add_argument("--cpu", action="store_true")
# ----------------------- filter params -----------------------
filter_group = parser.add_argument_group("filter tuning:")
filter_group.add_argument("--update_freq", type=float, default=20.0) # (Hz)
filter_group.add_argument(
"--sigma_na", type=float, default=np.sqrt(1e-3)
) # accel noise m/s^2
filter_group.add_argument(
"--sigma_ng", type=float, default=np.sqrt(1e-4)
) # gyro noise rad/s
filter_group.add_argument(
"--ita_ba", type=float, default=1e-4
) # accel bias noise m/s^2/sqrt(s)
filter_group.add_argument(
"--ita_bg", type=float, default=1e-6
) # gyro bias noise rad/s/sqrt(s)
filter_group.add_argument(
"--init_attitude_sigma", type=float, default=10.0 / 180.0 * np.pi
) # rad
filter_group.add_argument(
"--init_yaw_sigma", type=float, default=0.1 / 180.0 * np.pi
) # rad
filter_group.add_argument("--init_vel_sigma", type=float, default=1.0) # m/s
filter_group.add_argument("--init_pos_sigma", type=float, default=0.001) # m
filter_group.add_argument(
"--init_bg_sigma", type=float, default=0.0001
) # rad/s 0.001
filter_group.add_argument("--init_ba_sigma", type=float, default=0.2) # m/s^2 0.02
filter_group.add_argument("--g_norm", type=float, default=9.81)
filter_group.add_argument("--meascov_scale", type=float, default=1.0)
add_bool_arg(
filter_group, "initialize_with_vio", default=True
) # initialize state with gt state
add_bool_arg(
filter_group, "initialize_with_offline_calib", default=False
) # initialize bias state with offline calib or 0
filter_group.add_argument(
"--mahalanobis_fail_scale", type=float, default=0
) # if nonzero then mahalanobis gating test would scale the covariance by this scale if failed
# ----------------------- debug params -----------------------
debug_groups = parser.add_argument_group("debug")
# covariance alternatives (note: if use_vio_meas is true, meas constant with default value 1e-4)
add_bool_arg(debug_groups, "use_const_cov", default=False)
debug_groups.add_argument(
"--const_cov_val_x", type=float, default=np.power(0.1, 2.0)
)
debug_groups.add_argument(
"--const_cov_val_y", type=float, default=np.power(0.1, 2.0)
)
debug_groups.add_argument(
"--const_cov_val_z", type=float, default=np.power(0.1, 2.0)
)
# measurement alternatives (note: if use_vio_meas is false, add_sim_meas_noise must be false)
add_bool_arg(
debug_groups,
"use_vio_meas",
default=False,
help='If using "vio" measurement for filter update instead of ouptut network',
)
add_bool_arg(debug_groups, "debug_using_vio_ba", default=False)
add_bool_arg(
debug_groups, "add_sim_meas_noise", default=False
) # adding noise on displacement measurement when using vio measurement
debug_groups.add_argument(
"--sim_meas_cov_val", type=float, default=np.power(0.01, 2.0)
)
debug_groups.add_argument(
"--sim_meas_cov_val_z", type=float, default=np.power(0.01, 2.0)
)
add_bool_arg(debug_groups, "do_profile", default=False, help="Run the profiler")
args = parser.parse_args()
np.set_printoptions(linewidth=2000)
logging.info("Program options:")
logging.info(pprint(vars(args)))
# run filter
with open(args.data_list) as f:
data_names = [
s.strip().split("," or " ")[0]
for s in f.readlines()
if len(s) > 0 and s[0] != "#"
]
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
param_dict = vars(args)
param_dict["git_version"] = git_version()
param_dict["date"] = str(datetime.datetime.now())
with open(args.out_dir + "/parameters.json", "w") as parameters_file:
parameters_file.write(json.dumps(param_dict, indent=4, sort_keys=True))
# load offline calibration for IMU
with profile(filename="./profile.prof", enabled=args.do_profile):
if args.dataset_number is not None:
logging.info("Running in one-shot mode")
logging.info("Using dataset {}".format(data_names[args.dataset_number]))
trackerRunner = ImuTrackerRunner(args, data_names[args.dataset_number])
trackerRunner.run_tracker(args)
else:
logging.info("Running in batch mode")
# add metadata for logging
n_data = len(data_names)
for i, name in enumerate(data_names):
logging.info(f"Processing {i} / {n_data} dataset {name}")
try:
trackerRunner = ImuTrackerRunner(args, name)
trackerRunner.run_tracker(args)
except FileExistsError as e:
print(e)
continue
except OSError as e:
print(e)
continue
|
# Copyright 2014 VMWare.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import adapter
class Client(object):
"""Client for the Congress v1 API.
Example::
from keystoneauth1.identity import v2
from keystoneauth1 import session
from congressclient.v1 import client
auth = v2.Password(auth_url=AUTH_URL, username=USERNAME,
password=PASSWORD, tenant_name=TENANT_NAME)
sess = session.Session(auth=auth)
congress = client.Client(session=sess,
auth=None,
interface='publicURL',
service_type='policy',
region_name='RegionOne')
congress.create_policy_rule(..)
"""
policy = '/v1/policies'
policy_path = '/v1/policies/%s'
policy_rules = '/v1/policies/%s/rules'
policy_rules_path = '/v1/policies/%s/rules/%s'
policy_tables = '/v1/policies/%s/tables'
policy_table_path = '/v1/policies/%s/tables/%s'
policy_rows = '/v1/policies/%s/tables/%s/rows'
policy_rows_trace = '/v1/policies/%s/tables/%s/rows?trace=True'
policies = '/v1/policies'
policy_action = '/v1/policies/%s?%s'
datasources = '/v1/data-sources'
datasource_path = '/v1/data-sources/%s'
datasource_tables = '/v1/data-sources/%s/tables'
datasource_table_path = '/v1/data-sources/%s/tables/%s'
datasource_status = '/v1/data-sources/%s/status'
datasource_actions = '/v1/data-sources/%s/actions'
datasource_schema = '/v1/data-sources/%s/schema'
datasource_table_schema = '/v1/data-sources/%s/tables/%s/spec'
datasource_rows = '/v1/data-sources/%s/tables/%s/rows'
driver = '/v1/system/drivers'
driver_path = '/v1/system/drivers/%s'
policy_api_versions = '/'
def __init__(self, **kwargs):
super(Client, self).__init__()
kwargs.setdefault('user_agent', 'python-congressclient')
self.httpclient = adapter.LegacyJsonAdapter(**kwargs)
def create_policy(self, body):
resp, body = self.httpclient.post(
self.policy, body=body)
return body
def delete_policy(self, policy):
resp, body = self.httpclient.delete(
self.policy_path % policy)
return body
def show_policy(self, policy):
resp, body = self.httpclient.get(
self.policy_path % policy)
return body
def create_policy_rule(self, policy_name, body=None):
resp, body = self.httpclient.post(
self.policy_rules % policy_name, body=body)
return body
def delete_policy_rule(self, policy_name, rule_id):
resp, body = self.httpclient.delete(
self.policy_rules_path % (policy_name, rule_id))
return body
def show_policy_rule(self, policy_name, rule_id):
resp, body = self.httpclient.get(
self.policy_rules_path % (policy_name, rule_id))
return body
def list_policy_rows(self, policy_name, table, trace=None):
if trace:
query = self.policy_rows_trace
else:
query = self.policy_rows
resp, body = self.httpclient.get(query % (policy_name, table))
return body
def list_policy_rules(self, policy_name):
resp, body = self.httpclient.get(self.policy_rules % (policy_name))
return body
def list_policy(self):
resp, body = self.httpclient.get(self.policies)
return body
def list_policy_tables(self, policy_name):
resp, body = self.httpclient.get(self.policy_tables % (policy_name))
return body
def execute_policy_action(self, policy_name, action, trace, delta, body):
uri = "?action=%s&trace=%s&delta=%s" % (action, trace, delta)
resp, body = self.httpclient.post(
(self.policy_path % policy_name) + str(uri), body=body)
return body
def show_policy_table(self, policy_name, table_id):
resp, body = self.httpclient.get(self.policy_table_path %
(policy_name, table_id))
return body
def list_datasources(self):
resp, body = self.httpclient.get(self.datasources)
return body
def show_datasource(self, datasource_name):
"""Get a single datasource
Intended for use by Horizon. Not available in CLI
"""
resp, body = self.httpclient.get(self.datasource_path %
(datasource_name))
return body
def list_datasource_tables(self, datasource_name):
resp, body = self.httpclient.get(self.datasource_tables %
(datasource_name))
return body
def list_datasource_rows(self, datasource_name, table_name):
resp, body = self.httpclient.get(self.datasource_rows %
(datasource_name, table_name))
return body
def update_datasource_rows(self, datasource_name, table_name, body=None):
"""Update rows in a table of a datasource.
Args:
datasource_name: Name or id of the datasource
table_name: Table name for updating
body: Rows for update.
"""
resp, body = self.httpclient.put(self.datasource_rows %
(datasource_name, table_name),
body=body)
return body
def list_datasource_status(self, datasource_name):
resp, body = self.httpclient.get(self.datasource_status %
datasource_name)
return body
def list_datasource_actions(self, datasource_name):
resp, body = self.httpclient.get(self.datasource_actions %
datasource_name)
return body
def show_datasource_schema(self, datasource_name):
resp, body = self.httpclient.get(self.datasource_schema %
datasource_name)
return body
def show_datasource_table_schema(self, datasource_name, table_name):
resp, body = self.httpclient.get(self.datasource_table_schema %
(datasource_name, table_name))
return body
def show_datasource_table(self, datasource_name, table_id):
resp, body = self.httpclient.get(self.datasource_table_path %
(datasource_name, table_id))
return body
def create_datasource(self, body=None):
resp, body = self.httpclient.post(
self.datasources, body=body)
return body
def delete_datasource(self, datasource):
resp, body = self.httpclient.delete(
self.datasource_path % datasource)
return body
def execute_datasource_action(self, service_name, action, body):
uri = "?action=%s" % (action)
resp, body = self.httpclient.post(
(self.datasource_path % service_name) + str(uri), body=body)
return body
def list_drivers(self):
resp, body = self.httpclient.get(self.driver)
return body
def show_driver(self, driver):
resp, body = self.httpclient.get(self.driver_path %
(driver))
return body
def request_refresh(self, driver, body=None):
resp, body = self.httpclient.post(self.datasource_path %
(driver) + "?action=request-refresh",
body=body)
return body
def list_api_versions(self):
resp, body = self.httpclient.get(self.policy_api_versions)
return body
|
<filename>transliterate.py
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Author: <EMAIL>
#
# A comman line script to transliterate text
#
# ml transliterate aztranslate [<text>]
#
# https://github.com/MicrosoftTranslator/Text-Translation-API-V3-Python
#
# ----------------------------------------------------------------------
# Import the required libraries.
# ----------------------------------------------------------------------
import sys
import os
import argparse
import requests
import uuid
import json
from mlhub.pkg import azkey
# ----------------------------------------------------------------------
# Parse command line arguments: text to
# ----------------------------------------------------------------------
option_parser = argparse.ArgumentParser(add_help=False)
option_parser.add_argument(
'text',
nargs="*",
help='text to translate')
option_parser.add_argument(
'--header',
action='store_true')
option_parser.add_argument(
'-l', '--lang',
help='language')
option_parser.add_argument(
'-f', '--source', '--from',
help='source script')
option_parser.add_argument(
'-t', '--to',
help='target script')
option_parser.add_argument(
'-k', '--keep',
action="store_true",
help='keep original text in output')
args = option_parser.parse_args()
lang = args.lang
fr = args.source
to = args.to if args.to != None else 'latn'
# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------
SERVICE = "Text Translator"
KEY_FILE = os.path.join(os.getcwd(), "private.txt")
key, endpoint = azkey(KEY_FILE, SERVICE, verbose=False, baseurl=True)
# ----------------------------------------------------------------------
# Build the REST API URLs.
# ----------------------------------------------------------------------
path = '/transliterate?api-version=3.0'
url = endpoint + path
headers = {
'Ocp-Apim-Subscription-Key': key,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# ------------------------------------------------------------------------
# Helper function.
# ------------------------------------------------------------------------
def helper(txt, lang, fr, to):
smpl = [{'text': txt}]
# Auto determine LANG and then use first script in the SUPPORTED
# output as default for FR
if lang is None:
durl = endpoint + '/detect?api-version=3.0'
result = requests.post(durl, headers=headers, json=smpl).json()
lang = result[0]['language']
# print(lang)
if fr is None:
turl = endpoint + '/languages?api-version=3.0'
response = requests.get(turl, headers=headers).json()['transliteration']
fr = response[lang]['scripts'][0]['code']
# print(fr)
params = f"&language={lang}&fromScript={fr}&toScript={to}"
# print(params, txt)
request = requests.post(url + params, headers=headers, json=smpl)
result = request.json()
# print(result)
if request.ok:
sys.stdout.write(f"{lang.lower()},{fr.lower()},{to.lower()},{result[0]['text']}")
else:
sys.stdout.write(f"Error code {result['error']['code']}: {result['error']['message']}")
# ------------------------------------------------------------------------
# Translate text obtained from command line, pipe, or interactively.
# ------------------------------------------------------------------------
txt = " ".join(args.text)
if txt != "":
if args.header: print("language,from,to,transliteration")
helper(txt, lang, fr, to)
print()
elif not sys.stdin.isatty():
if args.header: print("language,from,to,transliteration")
for txt in sys.stdin.readlines():
helper(txt, lang, fr, to)
else:
print("Enter text to be analysed. Quit with Empty or Ctrl-d.\n")
prompt = '> '
try:
txt = input(prompt)
except EOFError:
print()
sys.exit(0)
while txt != '':
if args.header: print("language,from,to,transliteration")
helper(txt, lang, fr, to)
try:
print()
txt = input(prompt)
except EOFError:
print()
sys.exit(0)
|
<reponame>amar-enkhbat/AutoGCN
import os
import pickle
import json
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
def load_data(dataset_path):
"""
Load train, test data from pickle file
"""
dataset = pickle.load(open(dataset_path, 'rb'))
y_train = dataset['y_train']
X_train = dataset['X_train'].astype(np.float32)
y_test = dataset['y_test']
X_test = dataset['X_test'].astype(np.float32)
return X_train, y_train, X_test, y_test
def print_classification_report(y_true, y_preds, class_names):
"""
Compute classification report
"""
num_classes = len(class_names)
# Calculate accuracy, precision, recall etc
cr = classification_report(y_true, y_preds, output_dict=True, target_names=class_names)
cm = confusion_matrix(y_true, y_preds)
# One hot encode y_preds
y_preds_ohe = np.zeros((len(y_preds), num_classes))
for i, j in enumerate(y_preds):
y_preds_ohe[i, j] = 1
# One hot encode y_true
y_true_ohe = np.zeros((len(y_true), num_classes))
for i, j in enumerate(y_true):
y_true_ohe[i, j] = 1
# Calculate ROC AUC
auroc = roc_auc_score(y_true_ohe, y_preds_ohe, multi_class='ovo')
return cr, cm, auroc
def plot_history(history, save_path):
"""
Plot loss and accuracy then save as html file.
"""
df = pd.DataFrame(history)
df["Epochs"] = range(len(df))
fig = px.line(df, x='Epochs', y=['loss', 'val_loss'], labels={'value': 'Loss'})
fig.write_html(os.path.join(save_path, 'history_loss.html'))
fig = px.line(df, x='Epochs', y=['acc', 'val_acc'], labels={'value': 'Loss'})
fig.write_html(os.path.join(save_path, 'history_acc.html'))
def plot_cm(cm, class_names, save_path):
"""
Plot confusion matrix as image.
"""
plt.figure(figsize=(7, 5))
cm_df = pd.DataFrame(cm, columns=class_names, index=class_names)
# cm_df = pd.DataFrame(cm)
sns.heatmap(cm_df, annot=True, fmt='g')
plt.ylabel('True')
plt.xlabel('Pred')
plt.tight_layout()
plt.savefig(os.path.join(save_path, 'cm.png'))
# plt.show()
plt.clf()
plt.close()
def plot_adj(adj, save_path):
"""
Plot adjacency matrix as image
"""
plt.figure(figsize=(7, 5))
sns.heatmap(adj, fmt='g')
plt.ylabel('Features')
plt.xlabel('Features')
plt.tight_layout()
plt.savefig(save_path)
# plt.show()
plt.clf()
plt.close()
def show_metrics(time_now, model_names, dataset_names, random_seeds):
"""Save model metrics per run, per dataset and per model"""
final_results = []
for model_name in model_names:
for dataset_name in dataset_names:
subject_idc = json.load(open(os.path.join('./dataset/train', dataset_name + '.json'), 'r'))
accs = []
precs_macro = []
precs_weighted = []
recalls_macro = []
recalls_weighted = []
aurocs = []
n_trainable_params = []
for random_seed in random_seeds:
path = os.path.join('output', time_now, model_name, dataset_name.rstrip('.pickle').lstrip('./dataset/train/'), str(random_seed), 'results.pickle')
results = pickle.load(open(path, 'rb'))
accs.append(results['cr']['accuracy'])
precs_macro.append(results['cr']['macro avg']['precision'])
precs_weighted.append(results['cr']['weighted avg']['precision'])
recalls_macro.append(results['cr']['macro avg']['recall'])
recalls_weighted.append(results['cr']['weighted avg']['recall'])
aurocs.append(results['auroc'])
n_trainable_params.append(results['n_params'])
result_df = pd.DataFrame({'model_name': [model_name for i in random_seeds], 'dataset_name': [dataset_name for i in random_seeds],'train_idc': [subject_idc['train_idc'] for i in random_seeds], 'test_idc': [subject_idc['test_idc'] for i in random_seeds], 'random_seed': random_seeds, 'accuracy': accs, 'precision_macro': precs_macro, 'precision_weighted': precs_weighted, 'recall_macro': recalls_macro, 'recall_weighted': recalls_weighted, 'AUROC': aurocs, 'n_params': n_trainable_params})
final_results.append(result_df)
final_results = pd.concat(final_results).reset_index(drop=True)
final_results.to_csv(os.path.join('./output', time_now, 'results.csv'), index=False)
std_per_dataset = final_results.groupby(['model_name', 'dataset_name']).std().drop(columns=['random_seed'])
std_per_dataset = std_per_dataset.rename(columns={'accuracy': 'accuracy_std',
'precision_macro': 'precision_macro_std',
'precision_weighted': 'precision_weighted_std',
'recall_macro': 'recall_macro_std',
'recall_weighted': 'recall_weighted_std'})
std_per_dataset = std_per_dataset.drop(columns=['AUROC', 'n_params'])
mean_per_dataset = final_results.groupby(['model_name', 'dataset_name']).mean().drop(columns='random_seed')
results_per_dataset = pd.concat([mean_per_dataset, std_per_dataset], axis=1)
results_per_dataset.to_csv(os.path.join('./output', time_now, 'results_per_dataset.csv'))
std_per_model = final_results.groupby(['model_name']).std().drop(columns=['random_seed'])
std_per_model = std_per_model.rename(columns={'accuracy': 'accuracy_std',
'precision_macro': 'precision_macro_std',
'precision_weighted': 'precision_weighted_std',
'recall_macro': 'recall_macro_std',
'recall_weighted': 'recall_weighted_std'})
std_per_model = std_per_model.drop(columns=['AUROC', 'n_params'])
mean_per_model = final_results.groupby(['model_name']).mean().drop(columns='random_seed')
results_per_model = pd.concat([mean_per_model, std_per_model], axis=1)
results_per_model.to_csv(os.path.join('./output', time_now, 'results_per_model.csv'))
print(results_per_model)
return final_results |
"""
Data management
===============
The :class:`Directory` class provides an interface for creating hierarchical
filesystem directories and files within those directories using either an absolute
or relative path.
.. autosummary::
:nosignatures:
Directory
.. autoclass:: Directory
:members:
"""
import os
import shutil
from . import mpi
class Directory:
"""Context for a filesystem directory.
The directory specified by ``path`` (which can be either absolute or relative)
is created if it does not already exist. This process is recursive, so
``path`` may include multiple directories that do not yet exist. This object
represents the final directory in ``path``.
A :class:`Directory` is a context that can be used to manage the current
working directory. Entering the context changes the current working
directory to ``path``, and exiting restores the working directory before the
context was entered.
Parameters
----------
path : str
Absolute or relative directory path.
Raises
------
OSError
If the specified path is not a valid directory.
Examples
--------
Creating a directory::
d = Directory('foo')
Using the context to open a file ``foo/bar.txt`` in a directory::
with Directory('foo') as d:
f = open('bar.txt')
"""
def __init__(self, path):
self._start = []
# ensure path exists at time directory is created (synchronizing)
path = os.path.realpath(path)
if mpi.world.rank_is_root:
if not os.path.exists(path):
os.makedirs(path)
dir_error = not os.path.isdir(path)
else:
dir_error = None
mpi.world.bcast(dir_error)
if dir_error:
raise OSError('The specified path is not a valid directory')
self._path = path
@classmethod
def cast(cls, directory):
"""Try to cast an object to a directory.
Ensure that a `str` or :class:`Directory` is a :class:`Directory`. No
action is taken if the object is already a :class:`Directory`. Otherwise,
a new one is constructed.
Parameters
----------
directory : str or :class:`Directory`
Object to ensure is a directory
Returns
-------
:class:`Directory`
The cast object.
"""
if not isinstance(directory, Directory):
directory = Directory(directory)
return directory
def __enter__(self):
"""Enter the directory context.
The working directory is changed to the ``path`` of this object.
Returns
-------
:class:`Directory`
This directory.
"""
self._start.append(os.getcwd())
os.chdir(self.path)
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Exit the directory context.
If possible, the working directory is reset to the path before entering
the context. The change is silently ignored if the original directory
no longer exists.
"""
try:
os.chdir(self._start.pop())
except OSError:
pass
def _in_context(self):
"""bool: True if object is being used as a context."""
return len(self._start) > 0
@property
def path(self):
"""str: Real path to the directory."""
return self._path
def file(self, name):
"""Get the absolute path to a file in the directory.
This method is convenient for abstracting references to a file in the
directory.
Parameters
----------
name : str
Name of the file.
Returns
-------
str
The absolute path to the file ``name``.
Examples
--------
Opening a file by absolute path::
d = Directory('foo')
f = open(d.file('bar.txt'))
"""
return os.path.join(self.path, name)
def directory(self, name):
"""Get a child directory.
This method is convenient for abstracting references to child
directories.
Parameters
----------
name : str
Name of the directory.
Returns
-------
:class:`Directory`
A new directory relative to this one.
Examples
--------
Making nested directories ``foo/bar``::
foo = Directory('foo')
bar = foo.directory('bar')
"""
return Directory(os.path.join(self.path, name))
def clear_contents(self):
r"""Clear the contents of a directory.
This method **removes** all the contents of a directory (files and
directories), so it should be used carefully!
"""
# delete on root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
if entry.is_file():
os.remove(entry.path)
elif entry.is_dir():
shutil.rmtree(entry.path)
mpi.world.barrier()
def move_contents(self, dest):
"""Move the contents of the directory.
Parameters
----------
dest : :class:`Directory` or :class:`str`
Destination directory.
"""
dest = Directory.cast(dest)
# move on root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
shutil.move(entry.path, dest.path)
mpi.world.barrier()
def copy_contents(self, dest):
"""Copy the contents of the directory.
Parameters
----------
dest : :class:`Directory` or :class:`str`
Destination directory.
"""
dest = Directory.cast(dest)
# copy using root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
if entry.is_file():
shutil.copy2(entry.path, dest.path)
elif entry.is_dir():
shutil.copytree(entry.path, os.path.join(dest.path,entry.name))
mpi.world.barrier()
|
# You need an image testsuite to run this, for information see:
# kivy/tools/image-testsuite/README.md
import os
import re
import sys
import unittest
from collections import defaultdict
from kivy.core.image import ImageLoader
DEBUG = False
ASSETDIR = 'image-testsuite'
LOADERS = {x.__name__: x for x in ImageLoader.loaders}
if 'ImageLoaderPygame' not in LOADERS:
try:
from kivy.core.image.img_pygame import ImageLoaderPygame
LOADERS['ImageLoaderPygame'] = ImageLoaderPygame
except:
pass
# Kivy image test protocol v0: Pixel values
v0_PIXELS = { # NOTE: 't' is not included here, see match_prediction()
'w': [0xFF, 0xFF, 0xFF], 'x': [0x00, 0x00, 0x00], 'r': [0xFF, 0x00, 0x00],
'g': [0x00, 0xFF, 0x00], 'b': [0x00, 0x00, 0xFF], 'y': [0xFF, 0xFF, 0x00],
'c': [0x00, 0xFF, 0xFF], 'p': [0xFF, 0x00, 0xFF], '0': [0x00, 0x00, 0x00],
'1': [0x11, 0x11, 0x11], '2': [0x22, 0x22, 0x22], '3': [0x33, 0x33, 0x33],
'4': [0x44, 0x44, 0x44], '5': [0x55, 0x55, 0x55], '6': [0x66, 0x66, 0x66],
'7': [0x77, 0x77, 0x77], '8': [0x88, 0x88, 0x88], '9': [0x99, 0x99, 0x99],
'A': [0xAA, 0xAA, 0xAA], 'B': [0xBB, 0xBB, 0xBB], 'C': [0xCC, 0xCC, 0xCC],
'D': [0xDD, 0xDD, 0xDD], 'E': [0xEE, 0xEE, 0xEE], 'F': [0xFF, 0xFF, 0xFF]}
# Kivy image test protocol v0: File name
# width x height _ pattern _ alpha _ fmtinfo _ testname _ encoder . ext
v0_FILE_RE = re.compile('^v0_(\d+)x(\d+)_' '([wxrgbycptA-F0-9]+)_'
'([0-9a-fA-F]{2})_' '([a-zA-Z0-9\-]+)_'
'([a-zA-Z0-9\-]+)_' '([a-zA-Z0-9\-]+)'
'\.([a-z]+)$')
def asset(*fn):
return os.path.abspath(os.path.join(os.path.dirname(__file__), *fn))
def has_alpha(fmt):
return fmt in ('rgba', 'bgra', 'argb', 'abgr')
def bytes_per_pixel(fmt):
if fmt in ('rgb', 'bgr'):
return 3
if fmt in ('rgba', 'bgra', 'argb', 'abgr'):
return 4
raise Exception('bytes_per_pixel: unknown format {}'.format(fmt))
def get_pixel_alpha(pix, fmt):
if fmt in ('rgba', 'bgra'):
return pix[3]
elif fmt in ('abgr', 'argb'):
return pix[0]
return 0xFF
# Converts (predicted) rgba pixels to the format claimed by image loader
def rgba_to(pix_in, target_fmt, w, h, pitch=None):
if not isinstance(pix_in, (bytes, bytearray)):
pix_in = bytearray(pix_in)
assert w > 0 and h > 0, "Must specify w and h"
assert len(pix_in) == w * h * 4, "Invalid rgba data {}".format(pix_in)
assert target_fmt in ('rgba', 'bgra', 'argb', 'abgr', 'rgb', 'bgr')
if target_fmt == 'rgba':
return pix_in
pixels = [pix_in[i:i + 4] for i in range(0, len(pix_in), 4)]
if target_fmt == 'bgra':
return b''.join([bytes(p[:3][::-1] + p[3:]) for p in pixels])
elif target_fmt == 'abgr':
return b''.join([bytes(p[3:] + p[:3][::-1]) for p in pixels])
elif target_fmt == 'argb':
return b''.join([bytes(p[3:] + p[:3]) for p in pixels])
# rgb/bgr, default to 4 byte alignment
if pitch is None:
pitch = ((3 * w) + 3) & ~3
# Assume pitch 0 == unaligned
elif pitch == 0:
pitch = 3 * w
out = b''
padding = b'\x00' * (pitch - w * 3)
for row in [pix_in[i:i + w * 4] for i in range(0, len(pix_in), w * 4)]:
pixelrow = [row[i:i + 4] for i in range(0, len(row), 4)]
if target_fmt == 'rgb':
out += b''.join([bytes(p[:3]) for p in pixelrow])
elif target_fmt == 'bgr':
out += b''.join([bytes(p[:3][::-1]) for p in pixelrow])
out += padding
return out
def match_prediction(pixels, fmt, fd, pitch):
assert len(fd['alpha']) == 2
assert len(fd['pattern']) > 0
bpp = bytes_per_pixel(fmt)
rowlen = fd['w'] * bpp
if pitch is None:
pitch = (rowlen + 3) & ~3
elif pitch == 0:
pitch = fd['w'] * bpp
pitchalign = pitch - rowlen
errors = []
fail = errors.append
if len(pixels) != pitch * fd['h']:
fail("Pitch errror: pitch {} * {} height != {} pixelbytes"
.format(pitch, fd['h'], len(pixels)))
ptr = 0
pixnum = 0
for char in fd['pattern']:
pix = list(bytearray(pixels[ptr:ptr + bpp]))
# print("PIXNUM {} ptr={} bpp={} : {}".format(pixnum, ptr, bpp, pix))
if len(pix) != bpp:
fail("Want {} bytes per pixel, got {}: {}"
.format(bpp, len(pix), pix))
break
if char == 't':
if get_pixel_alpha(pix, fmt) != 0:
fail("pixel {} nonzero 't' pixel alpha {:02X}: {}".format(
pixnum, get_pixel_alpha(pix, fmt), pix))
else:
srcpix = v0_PIXELS[char] + list(bytearray.fromhex(fd['alpha']))
predict = rgba_to(srcpix, fmt, 1, 1, pitch=0)
predict = list(bytearray(predict))
if not predict or not pix or predict != pix:
fail("pixel {} {} format mismatch: want {} ({}) -- got {}"
.format(pixnum, fmt, predict, char, pix))
if pitchalign and (pixnum + 1) % fd['w'] == 0:
check = list(bytearray(pixels[ptr + bpp:ptr + bpp + pitchalign]))
if check != [0] * pitchalign:
fail("Want {} 0x00 pitch align pixnum={}, pos={} got: {}"
.format(pitchalign, pixnum, ptr + bpp, check))
ptr += pitchalign
ptr += bpp
pixnum += 1
if ptr != len(pixels):
fail("Excess data: pixnum={} ptr={} bytes={}, bpp={} pitchalign={}"
.format(pixnum, ptr, len(pixels), bpp, pitchalign))
return (len(errors) == 0, errors)
class _TestContext(object):
def __init__(self, loadercls):
self.loadercls = loadercls
self._fd = None
self._fn = None
self._ok = 0
self._skip = 0
self._fail = 0
self._stats = defaultdict(dict)
@property
def stats(self):
return self._stats
@property
def results(self):
return (self._ok, self._skip, self._fail, self._stats)
def start(self, fn, fd):
assert not self._fn, "unexpected ctx.start(), already started"
assert isinstance(fd, dict)
self._fn = fn
self._fd = fd
def end(self, fn=None):
assert not fn or self._fn == fn, "unexpected ctx.end(), fn mismatch"
self._fn = None
self._fd = None
def ok(self, info):
assert self._fn, "unexpected ctx.ok(), fn=None"
self._ok += 1
self.dbg('PASS', info)
self._incstat('ok')
self.end(self._fn)
def skip(self, info):
assert self._fn, "unexpected ctx.skip(), fn=None"
self._skip += 1
self.dbg('SKIP', info)
self._incstat('skip')
self.end(self._fn)
def fail(self, info):
assert self._fn, "unexpected ctx.fail(), fn=None"
self._fail += 1
self.dbg('FAIL', info)
self._incstat('fail')
self.end(self._fn)
def dbg(self, msgtype, info):
assert self._fn, "unexpected ctx.dbg(), fn=None"
if DEBUG:
print("{} {} {}: {}"
.format(self.loadercls.__name__, msgtype, self._fn, info))
def _incstat(self, s):
assert self._fd, "unexpected ctx._incstat(), fd=None"
fd = self._fd
def IS(key):
self._stats.setdefault(s, defaultdict(int))[key] += 1
IS('total')
IS('extension:{}'.format(fd['ext']))
IS('encoder:{}'.format(fd['encoder']))
IS('fmtinfo:{}'.format(fd['fmtinfo']))
IS('testname:{}'.format(fd['testname']))
IS('testname+ext:{}+{}'.format(fd['testname'], fd['ext']))
IS('encoder+ext:{}+{}'.format(fd['encoder'], fd['ext']))
IS('encoder+testname:{}+{}'.format(fd['encoder'], fd['testname']))
IS('fmtinfo+ext:{}+{}'.format(fd['fmtinfo'], fd['ext']))
@unittest.skipIf(not os.path.isdir(asset(ASSETDIR)),
"Need 'make image-testsuite' to run test")
class ImageLoaderTestCase(unittest.TestCase):
def setUp(self):
self._context = None
self._prepare_images()
def tearDown(self):
if not DEBUG or not self._context:
return
ctx = self._context
il = ctx.loadercls.__name__
stats = ctx.stats
keys = set([k for x in stats.values() for k in x.keys()])
sg = stats.get
for k in sorted(keys):
ok, skip, fail = sg('ok', {}), sg('skip', {}), sg('fail', {})
print("REPORT {} {}: ok={}, skip={}, fail={}".format(
il, k, ok.get(k, 0), skip.get(k, 0), fail.get(k, 0)))
def _prepare_images(self):
if hasattr(self, '_image_files'):
return
self._image_files = {}
for filename in os.listdir(asset(ASSETDIR)):
matches = v0_FILE_RE.match(filename)
if not matches:
continue
w, h, pat, alpha, fmtinfo, tst, encoder, ext = matches.groups()
self._image_files[filename] = {
'filename': filename,
'w': int(w),
'h': int(h),
'pattern': pat,
'alpha': alpha,
'fmtinfo': fmtinfo,
'testname': tst,
'encoder': encoder,
'ext': ext,
'require_alpha': 'BINARY' in tst or 'ALPHA' in tst,
}
def _test_imageloader(self, loadercls, extensions=None):
if not loadercls:
return
if not extensions:
extensions = loadercls.extensions()
ctx = _TestContext(loadercls)
self._context = ctx
for filename in sorted(self._image_files.keys()):
filedata = self._image_files[filename]
if filedata['ext'] not in extensions:
continue
try:
ctx.start(filename, filedata)
result = loadercls(asset(ASSETDIR, filename), keep_data=True)
if not result:
raise Exception('invalid result')
except:
ctx.skip('Error loading file, result=None')
continue
self._test_image(filedata, ctx, loadercls, result)
ctx.end()
ok, skip, fail, stats = ctx.results
if fail:
self.fail('{}: {} passed, {} skipped, {} failed'
.format(loadercls.__name__, ok, skip, fail))
return ctx
def _test_image(self, fd, ctx, loadercls, imgdata):
w, h, pixels, pitch = imgdata._data[0].get_mipmap(0)
fmt = imgdata._data[0].fmt
# required for FFPy memview
# FIXME: bytearray() for py2 compat, I can't be bothered to research
if not isinstance(pixels, bytes):
pixels = bytearray(pixels)
def debug():
if not DEBUG:
return
print(" format: {}x{} {}".format(w, h, fmt))
print(" pitch: got {}, want {}".format(pitch, want_pitch))
print(" want: {} in {}".format(fd['pattern'], fmt))
print(" got: {}".format(bytearray(pixels)))
# Assume pitch 0 = unaligned
want_pitch = (pitch == 0) and bytes_per_pixel(fmt) * w or pitch
if pitch == 0 and bytes_per_pixel(fmt) * w * h != len(pixels):
ctx.dbg("PITCH", "pitch=0, expected fmt={} to be "
"unaligned @ ({}bpp) = {} bytes, got {}"
.format(fmt, bytes_per_pixel(fmt),
bytes_per_pixel(fmt) * w * h,
len(pixels)))
elif pitch and want_pitch != pitch:
ctx.dbg("PITCH", "fmt={}, pitch={}, expected {}"
.format(fmt, pitch, want_pitch))
success, msgs = match_prediction(pixels, fmt, fd, pitch)
if not success:
if not msgs:
ctx.fail("Unknown error")
elif len(msgs) == 1:
ctx.fail(msgs[0])
else:
for m in msgs:
ctx.dbg('PREDICT', m)
ctx.fail('{} errors, see debug output: {}'
.format(len(msgs), msgs[-1]))
debug()
elif fd['require_alpha'] and not has_alpha(fmt):
ctx.fail('Missing expected alpha channel')
debug()
elif fd['w'] != w:
ctx.fail('Width mismatch, want {} got {}'
.format(fd['w'], w))
debug()
elif fd['h'] != h:
ctx.fail('Height mismatch, want {} got {}'
.format(fd['h'], h))
debug()
elif w != 1 and h != 1:
ctx.fail('v0 test protocol mandates w=1 or h=1')
debug()
else:
ctx.ok("Passed test as {}x{} {}".format(w, h, fmt))
sys.stdout.flush()
def test_ImageLoaderSDL2(self):
loadercls = LOADERS.get('ImageLoaderSDL2')
# GIF format not listed as supported in sdl2 loader
if loadercls:
exts = list(loadercls.extensions()) + ['gif']
ctx = self._test_imageloader(loadercls, exts)
def test_ImageLoaderPIL(self):
loadercls = LOADERS.get('ImageLoaderPIL')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderPygame(self):
loadercls = LOADERS.get('ImageLoaderPygame')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderFFPy(self):
loadercls = LOADERS.get('ImageLoaderFFPy')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderGIF(self):
loadercls = LOADERS.get('ImageLoaderGIF')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderDDS(self):
loadercls = LOADERS.get('ImageLoaderDDS')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderTex(self):
loadercls = LOADERS.get('ImageLoaderTex')
ctx = self._test_imageloader(loadercls)
def test_ImageLoaderImageIO(self):
loadercls = LOADERS.get('ImageLoaderImageIO')
ctx = self._test_imageloader(loadercls)
def test_missing_tests(self):
for loader in ImageLoader.loaders:
key = 'test_{}'.format(loader.__name__)
msg = "Missing ImageLoader test case: {}".format(key)
self.assertTrue(hasattr(self, key), msg)
self.assertTrue(callable(getattr(self, key)), msg)
class ConverterTestCase(unittest.TestCase):
def test_internal_converter_2x1(self):
correct = {
'rgba': b'\x01\x02\x03\xA1\x04\x05\x06\xA2',
'abgr': b'\xA1\x03\x02\x01\xA2\x06\x05\x04',
'bgra': b'\x03\x02\x01\xA1\x06\x05\x04\xA2',
'argb': b'\xA1\x01\x02\x03\xA2\x04\x05\x06',
'rgb': b'\x01\x02\x03\x04\x05\x06',
'bgr': b'\x03\x02\x01\x06\x05\x04',
'rgb_align4': b'\x01\x02\x03\x04\x05\x06\x00\x00',
'bgr_align4': b'\x03\x02\x01\x06\x05\x04\x00\x00'}
src = correct.get
rgba = src('rgba')
self.assertEqual(rgba_to(rgba, 'rgba', 2, 1, 0), src('rgba'))
self.assertEqual(rgba_to(rgba, 'abgr', 2, 1, 0), src('abgr'))
self.assertEqual(rgba_to(rgba, 'bgra', 2, 1, 0), src('bgra'))
self.assertEqual(rgba_to(rgba, 'argb', 2, 1, 0), src('argb'))
self.assertEqual(rgba_to(rgba, 'rgb', 2, 1, 0), src('rgb'))
self.assertEqual(rgba_to(rgba, 'bgr', 2, 1, 0), src('bgr'))
self.assertEqual(rgba_to(rgba, 'rgb', 2, 1, None), src('rgb_align4'))
self.assertEqual(rgba_to(rgba, 'bgr', 2, 1, None), src('bgr_align4'))
def test_internal_converter_3x1(self):
pad6 = b'\x00' * 6
correct = {
'rgba': b'\x01\x02\x03\xFF\x04\x05\x06\xFF\x07\x08\x09\xFF',
'abgr': b'\xFF\x03\x02\x01\xFF\x06\x05\x04\xFF\x09\x08\x07',
'bgra': b'\x03\x02\x01\xFF\x06\x05\x04\xFF\x09\x08\x07\xFF',
'argb': b'\xFF\x01\x02\x03\xFF\x04\x05\x06\xFF\x07\x08\x09',
'rgb_align2': b'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x00',
'bgr_align2': b'\x03\x02\x01\x06\x05\x04\x09\x08\x07\x00',
'rgb_align8': b'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x00' + pad6,
'bgr_align8': b'\x03\x02\x01\x06\x05\x04\x09\x08\x07\x00' + pad6}
src = correct.get
rgba = src('rgba')
self.assertEqual(rgba_to(rgba, 'bgra', 3, 1, 0), src('bgra'))
self.assertEqual(rgba_to(rgba, 'argb', 3, 1, 0), src('argb'))
self.assertEqual(rgba_to(rgba, 'abgr', 3, 1, 0), src('abgr'))
self.assertEqual(rgba_to(rgba, 'rgb', 3, 1, 10), src('rgb_align2'))
self.assertEqual(rgba_to(rgba, 'bgr', 3, 1, 10), src('bgr_align2'))
self.assertEqual(rgba_to(rgba, 'rgb', 3, 1, 16), src('rgb_align8'))
self.assertEqual(rgba_to(rgba, 'bgr', 3, 1, 16), src('bgr_align8'))
def test_internal_converter_1x3(self):
pad5 = b'\x00' * 5
correct = {
'rgba': b'\x01\x02\x03\xFF\x04\x05\x06\xFF\x07\x08\x09\xFF',
'rgb_raw': b'\x01\x02\x03\x04\x05\x06\x07\x08\x09',
'bgr_raw': b'\x03\x02\x01\x06\x05\x04\x09\x08\x07',
'rgb_align2': b'\x01\x02\x03\x00\x04\x05\x06\x00\x07\x08\x09\x00',
'bgr_align2': b'\x03\x02\x01\x00\x06\x05\x04\x00\x09\x08\x07\x00',
'rgb_align4': b'\x01\x02\x03\x00\x04\x05\x06\x00\x07\x08\x09\x00',
'bgr_align4': b'\x03\x02\x01\x00\x06\x05\x04\x00\x09\x08\x07\x00',
'rgb_align8': (b'\x01\x02\x03' + pad5 +
b'\x04\x05\x06' + pad5 +
b'\x07\x08\x09' + pad5),
'bgr_align8': (b'\x03\x02\x01' + pad5 +
b'\x06\x05\x04' + pad5 +
b'\x09\x08\x07' + pad5),
}
src = correct.get
rgba = src('rgba')
self.assertEqual(rgba_to(rgba, 'rgb', 1, 3, 4), src('rgb_align2'))
self.assertEqual(rgba_to(rgba, 'bgr', 1, 3, 4), src('bgr_align2'))
self.assertEqual(rgba_to(rgba, 'rgb', 1, 3, None), src('rgb_align4'))
self.assertEqual(rgba_to(rgba, 'bgr', 1, 3, None), src('bgr_align4'))
self.assertEqual(rgba_to(rgba, 'rgb', 1, 3, 0), src('rgb_raw'))
self.assertEqual(rgba_to(rgba, 'bgr', 1, 3, 0), src('bgr_raw'))
self.assertEqual(rgba_to(rgba, 'rgb', 1, 3, 8), src('rgb_align8'))
self.assertEqual(rgba_to(rgba, 'bgr', 1, 3, 8), src('bgr_align8'))
if __name__ == '__main__':
import sys
accept_filter = ['ImageLoader{}'.format(x) for x in sys.argv[1:]]
if accept_filter:
LOADERS = {x: LOADERS[x] for x in accept_filter}
DEBUG = True
unittest.main(argv=sys.argv[:1])
|
__author__ = '<NAME>'
from colour import Color
def rawrgb2rgb(a,b,c):
return Color(rgb = (a/255,b/255,c/255))
blues = dict(
blue1 = rawrgb2rgb(113,199,236),
blue2 = rawrgb2rgb(30,187,215),
blue3 = rawrgb2rgb(24,154,211),
blue4 = rawrgb2rgb(16,125,172),
blue5 = rawrgb2rgb(0,80,115)
)
reds = dict(
red1 = rawrgb2rgb(236,30,30),
red2 = rawrgb2rgb(204,29,29),
red3 = rawrgb2rgb(159,37,37),
red4 = rawrgb2rgb(120,31,31),
red5 = rawrgb2rgb(94,22,22)
)
greys = dict(
grey1 = rawrgb2rgb(162,162,162),
grey2 = rawrgb2rgb(81,81,81),
grey3 = rawrgb2rgb(59,59,59),
grey4 = rawrgb2rgb(37,37,37),
grey5 = rawrgb2rgb(16,16,16)
)
forrest = dict(
forrest1 = rawrgb2rgb(186,221,215),
forrest2 = rawrgb2rgb(44,53,73),
forrest3 = rawrgb2rgb(48,74,90),
forrest4 = rawrgb2rgb(94,131,110),
forrest5 = rawrgb2rgb(135,171,112)
)
bluegreys = dict(
bluegrey1 = rawrgb2rgb(194,205,216),
bluegrey2 = rawrgb2rgb(161,169,180),
bluegrey3 = rawrgb2rgb(56,129,184),
bluegrey4 = rawrgb2rgb(35,81,116),
bluegrey5 = rawrgb2rgb(29,43,73)
)
coffee = dict(
coffee1 = rawrgb2rgb(236,224,209),
coffee2 = rawrgb2rgb(219,193,172),
coffee3 = rawrgb2rgb(216,197,166),
coffee4 = rawrgb2rgb(112,64,65),
coffee5 = rawrgb2rgb(56,34,15)
)
pinks = dict(
pink1 = rawrgb2rgb(250,236,230),
pink2 = rawrgb2rgb(238,207,200),
pink3 = rawrgb2rgb(217,178,169),
pink4 = rawrgb2rgb(163,126,113)
)
browns = dict(
brown1 = rawrgb2rgb(219,201,184),
brown2 = rawrgb2rgb(161,126,97),
brown3 = rawrgb2rgb(133,88,50),
brown4 = rawrgb2rgb(116,72,42),
brown5 = rawrgb2rgb(54,41,37)
)
browngreen = dict(
browngreen1 = rawrgb2rgb(221,213,199),
browngreen2 = rawrgb2rgb(184,171,139),
browngreen3 = rawrgb2rgb(139,138,104),
browngreen4 = rawrgb2rgb(105,103,61),
browngreen5 = rawrgb2rgb(60,56,34)
)
purplybrown = dict(
purplybrown1 = rawrgb2rgb(182,138,130),
purplybrown2 = rawrgb2rgb(161,125,132),
purplybrown3 = rawrgb2rgb(139,114,134),
purplybrown4 = rawrgb2rgb(116,99,124),
purplybrown5 = rawrgb2rgb(92,89,114)
)
junglegreen = dict(
green1 = rawrgb2rgb(133,170,155),
green2 = rawrgb2rgb(88,139,118),
green3 = rawrgb2rgb(41,95,72),
green4 = rawrgb2rgb(32,76,57),
green5 = rawrgb2rgb(24,57,43)
)
greens = dict(
green1 = rawrgb2rgb(148,206,152),
green2 = rawrgb2rgb(97,175,102),
green3 = rawrgb2rgb(56,142,62),
green4 = rawrgb2rgb(27,112,33),
green5 = rawrgb2rgb(6,78,10)
)
maroons = dict(
maroon1 = rawrgb2rgb(193,113,113),
maroon2 = rawrgb2rgb(169,76,76),
maroon3 = rawrgb2rgb(146,68,68),
maroon4 = rawrgb2rgb(109,54,54),
maroon5 = rawrgb2rgb(86,36,36)
)
cyans = dict(
cyan1 = rawrgb2rgb(138,187,187),
cyan2 = rawrgb2rgb(101,155,150),
cyan3 = rawrgb2rgb(60,131,132),
cyan4 = rawrgb2rgb(29,91,95),
cyan5 = rawrgb2rgb(0,78,82)
) |
<filename>get_plink_subsets.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 09:17:42 2020
Get PLINK subsets for clumping
@author: nbaya
"""
import hail as hl
import argparse
import hailtop.batch as hb
from ukbb_pan_ancestry.resources.genotypes import get_filtered_mt
from ukbb_pan_ancestry.resources.results import get_pheno_manifest_path
from ukbb_pan_ancestry import POPS, bucket
# MIN_CASES = 50
# MIN_CASES_ALL = 100
# MIN_CASES_EUR = 100
POP_DICT = {
'AFR': 6637, # dict with sample counts for each population
'AMR': 982,
'CSA': 8876,
'EAS': 2709,
'EUR': 420542,
'MID': 1599
}
chroms = list(range(1,23))+['X']
def get_pops_list(pops: str, paridx: int = 0, parsplit: int = 1):
r'''
Generates list of population combinations. If `pops`=None, this will read
the phenotype manifest to get all population combinations. `pops` should be
a string in the format "POP1-POP2-POP3" (for instance, "AFR-EUR-MID")
`paridx` and `parsplit` are parameters to run this command across separate
batches of phenotyeps. Batches are zero-indexed, such that `paridx`=0
corresponds to the first batch. `parsplit` indicates the number of batches.
`paridx` should generally be less than `parsplit`. `parsplit` should be less
than the total number of phenotypes to run across all batches. The default
values for `paridx` and `parsplit` will run all phenotypes in a single batch.
'''
if pops is None:
pheno_manifest = hl.import_table(get_pheno_manifest_path())
pops_list_all = pheno_manifest.pops.collect()
pops_list_all = sorted(set(pops_list_all))
pops_list_all = [p.split(',') for p in pops_list_all] # list of lists of strings
idx = range(paridx, len(pops_list_all), parsplit)
pops_list = [pops for i, pops in enumerate(pops_list_all) if i in idx]
else:
pops = sorted(set(pops.upper().split('-')))
assert set(pops).issubset(POPS), f'Invalid populations: {set(pops).difference(POPS)}'
pops_list = [pops] # list of list of strings
print(f'''\n\npops: {'-'.join(pops) if len(pops_list)==1 else f"{len(pops_list)} of {len(pops_list_all)} combinations"}\n''')
return pops_list
def get_bfile_chr_path(bfile_prefix, chrom):
return f'{bfile_prefix}.chr{chrom}'
def get_mt_filtered_by_pops(pops: list,
chrom: str = 'all',
imputed: bool = True,
min_mac: int = 20,
entry_fields=('GP',),
filter_mac_instead_of_ac: bool = False):
r'''
Wraps `get_filtered_mt()` from ukbb_pan_ancestry.resources.genotypes
This filters to samples from populations listed in `pops`.
NOTE: If chrom='all', this loads all autosomes and chrX.
'''
assert len(pops)>0 and set(pops).issubset(POPS)
kwargs = {'pop': 'all' if len(pops)>1 else pops[0],
'imputed': imputed,
'min_mac': min_mac,
'entry_fields': entry_fields,
'filter_mac_instead_of_ac': filter_mac_instead_of_ac
}
mt = get_filtered_mt(chrom=chrom, **kwargs) # in this case chrom='all' gets autosomes
if chrom=='all':
mt_x = get_filtered_mt(chrom='X', **kwargs)
mt = mt.union_rows(mt_x)
if len(pops)>1:
mt = mt.filter_cols(hl.set(pops).contains(mt.pop))
return mt
def get_pop_prop_dict(pop_dict: dict, pops: list) -> (dict, int):
r'''
Get population proportions in `pop_dict` for a list of populations `pops`
'''
tmp_pop_dict = {pop:n_pop for pop,n_pop in pop_dict.items() if pop in pops}
n_total = sum(tmp_pop_dict.values())
pop_prop_dict = {k: v/n_total for k,v in tmp_pop_dict.items()}
return pop_prop_dict, n_total
def get_subset(mt_pop, pop_dict: dict, pops: list, n_max: int):
r'''
Get Hail table sample of max size = `n_max` for list of populations `pops`.
'''
pop_prop_dict, n_total = get_pop_prop_dict(pop_dict=pop_dict,
pops=pops)
limiting_pop = min(pop_prop_dict, key=pop_prop_dict.get)
n_sample = int(min(pop_dict[limiting_pop]/pop_prop_dict[limiting_pop], n_max))
if n_sample != n_max:
print(f'Using sample size of {n_sample} instead of {n_max} due to limiting population size in {limiting_pop}')
print({k:v*n_sample for k,v in pop_prop_dict.items()}) # prints expectation for number of samples per population
cols = mt_pop.cols()
if len(pops)==1 and n_sample == pop_dict[pops[0]]: # if sampling a single population `pop` and n_sample is the same as the population's size.
ht_sample = cols
else:
cols = cols.annotate(tmp_rand = hl.rand_norm())
cols = cols.order_by('tmp_rand')
cols = cols.add_index(name = 'rand_idx')
ht_sample = cols.filter(cols.rand_idx<n_sample)
ht_sample = ht_sample.drop('tmp_rand','rand_idx')
ht_sample = ht_sample.key_by('s')
ht_sample = ht_sample.select('pop') # keyed by 's', thus the two remaining fields are 'pop' and 's'
return ht_sample
def to_plink(pops: list,
subsets_dir,
mt,
ht_sample,
bfile_path,
overwrite=False):
r'''
Exports matrix table to PLINK2 files
NOTE: These files will need to split up by chromosome before plink_clump.py
can be run.
'''
assert 'GT' in mt.entry and mt.GT.dtype==hl.tcall, "mt must have 'GT' as an entry field and be of type `Call`"
if not overwrite and all([hl.hadoop_exists(f'{bfile_path}.{suffix}') for suffix in ['bed','bim']]):
print(f'\nPLINK .bed and .bim files already exist for {bfile_path}')
print(bfile_path)
else:
print(f'Saving to bfile prefix {bfile_path}')
mt_sample = mt.annotate_rows(varid = hl.str(mt.locus)+':'+mt.alleles[0]+':'+mt.alleles[1])
mt_sample = mt_sample.filter_cols(hl.is_defined(ht_sample[mt_sample.s]))
hl.export_plink(dataset = mt_sample,
output = bfile_path,
ind_id = mt_sample.s,
varid = mt_sample.varid) # varid used to be rsid
def export_varid(args):
r'''
Only used to check varids
'''
n_max = 5000
subsets_dir = f'{bucket}/ld_prune/subsets_{round(n_max/1e3)}k'
mt = get_mt_filtered_by_pops(chrom='all',
pop='all',
entry_fields=('GT',)) # default entry_fields will be 'GP', we need 'GT' for exporting to PLINK
mt_sample = mt.annotate_rows(chrom = mt.locus.contig,
pos = mt.locus.position,
varid = hl.str(mt.locus)+':'+mt.alleles[0]+':'+mt.alleles[1])
mt_sample.rows().key_by().select('chrom','pos','varid').export(f'{subsets_dir}/varid.txt',delimiter=' ')
def batch_split_by_chrom(args):
r'''
Splits bfiles by chromosome, for later use by plink_clump.py
About $0.06 per population set
'''
hl.init(default_reference='GRCh38',
spark_conf={'spark.hadoop.fs.gs.requester.pays.mode': 'AUTO',
'spark.hadoop.fs.gs.requester.pays.project.id': 'ukbb-diversepops-neale'})
pops_list = get_pops_list(args.pops)
n_max = 5000 # maximum number of samples in subset (equal to final sample size if there are sufficient samples for each population)
subsets_dir = f'{bucket}/ld_prune/subsets_{round(n_max/1e3)}k'
backend = hb.ServiceBackend(billing_project='ukb_diverse_pops',
bucket='ukbb-diverse-temp-30day/nb-batch-tmp')
# backend = batch.LocalBackend(tmp_dir='/tmp/batch/')
b = hb.batch.Batch(name='split_by_chrom', backend=backend,
default_image='gcr.io/ukbb-diversepops-neale/nbaya_plink:0.1',
default_storage='30G', default_cpu=8)
for pops in pops_list:
pops_str = '-'.join(pops)
bfile_prefix = f'{subsets_dir}/{pops_str}/{pops_str}'
master_bfile_paths = [f'{bfile_prefix}.{suffix}' for suffix in ['bed','bim','fam']]
master_fam_path = f'{bfile_prefix}.fam'
bfile_chr_paths = [f'{get_bfile_chr_path(bfile_prefix, chrom)}.{suffix}' for chrom in chroms for suffix in ['bed','bim']]
if not args.overwrite_plink and all(map(hl.hadoop_is_file,
[master_fam_path]+bfile_chr_paths)):
print(f'\nAll per-chrom PLINK files created for {pops_str}')
else:
if not all(map(hl.hadoop_is_file, master_bfile_paths)):
print(f'\nWARNING: Insufficient files for {pops_str} to split into per-chrom bed/bim files, skipping\n')
continue
else:
print(f'\n... Running bfile per-chrom split for {pops_str} ...')
prefix = f'{subsets_dir}/{pops_str}/{pops_str}'
bfile = b.read_input_group(
**{suffix:f'{prefix}.{suffix}' for suffix in ['bed','bim','fam']}
)
split = b.new_job(name=f'split_by_chrom_{pops_str}')
for chrom in chroms:
split.declare_resource_group(**{f'ofile_{chrom}':{'bed': '{root}.bed',
'bim': '{root}.bim'}}) # exclude fam file to avoid redundancy
split.command(
f'''
plink \\
--bfile {bfile} \\
--chr {chrom} \\
--output-chr M \\
--make-bed \\
--out {split[f"ofile_{chrom}"]}
'''
)
# print(f"saving to {get_bfile_chr_path(bfile_prefix, chrom)}")
b.write_output(split[f'ofile_{chrom}'], get_bfile_chr_path(bfile_prefix, chrom))
b.run(open=True)
backend.close()
def get_plink_subsets(args):
hl.init(log='/tmp/hail.log')
n_max = 5000 # maximum number of samples in subset (equal to final sample size if there are sufficient samples for each population)
subsets_dir = f'{bucket}/ld_prune/subsets_{round(n_max/1e3)}k'
pops_list = get_pops_list(args)
print(f'overwrite_plink: {args.overwrite_plink}')
for pops in pops_list:
pops_str = '-'.join(pops)
ht_sample_path = f'{subsets_dir}/{pops_str}/{pops_str}.ht'
bfile_prefix = f'{subsets_dir}/{pops_str}/{pops_str}'
master_bfile_paths = [f'{bfile_prefix}.{suffix}' for suffix in ['bed','bim','fam']]
if not args.overwrite_plink and all(map(hl.hadoop_is_file,
[f'{ht_sample_path}/_SUCCESS']+master_bfile_paths)):
continue
else:
print(f'\n... Starting PLINK exports for {pops_str} ...')
mt_pop = get_mt_filtered_by_pops(pops=pops,
chrom='all', # chrom='all' includes autosomes and chrX
entry_fields=('GT',)) # default entry_fields will be 'GP', we need 'GT' for exporting to PLINK
if hl.hadoop_is_file(f'{ht_sample_path}/_SUCCESS'):
ht_sample = hl.read_table(ht_sample_path)
ht_sample_ct = ht_sample.count()
print(f'... Subset ht already exists for pops={pops_str} ...')
print(f'\nSubset ht sample ct: {ht_sample_ct}\n\n')
else:
print(f'\n\n... Getting sample subset ({pops_str}) ...\n')
ht_sample = get_subset(mt_pop = mt_pop,
pop_dict = POP_DICT,
pops = pops,
n_max = n_max)
ht_sample_ct = ht_sample.count()
print(f'\n\nht_sample_ct: {ht_sample_ct}\n\n')
ht_sample = ht_sample.checkpoint(ht_sample_path, overwrite=args.overwrite)
print(f'... Exporting to PLINK ({pops_str}) ...')
to_plink(pops = pops,
subsets_dir=subsets_dir,
mt = mt_pop,
ht_sample = ht_sample,
bfile_path = bfile_prefix,
overwrite=args.overwrite_plink)
def main(args):
if args.export_varid:
export_varid(args=args)
if args.batch_split_by_chrom:
batch_split_by_chrom(args)
else:
get_plink_subsets(args=args)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pops', type=str, help='population to use')
parser.add_argument('--overwrite_plink', action='store_true', help='whether to overwrite existing PLINK files')
parser.add_argument('--export_varid', action='store_true', help='export varids')
parser.add_argument('--batch_split_by_chrom', action='store_true', help='Whether to split PLINK files into per-chrom files')
parser.add_argument('--parsplit', type=int, default=1, help="number of parallel batches to split pop combinations into")
parser.add_argument('--paridx', type=int, default=0, help="which of the parallel batches to run (zero-indexed)")
args = parser.parse_args()
main(args)
|
<reponame>justant/justant-market-maker-master
from copy import deepcopy
import linecache
import sys
import threading
from time import sleep
import settings
from market_maker.utils.singleton import singleton_data
from market_maker.utils import log
logger = log.setup_custom_logger('root')
execept_logger = log.setup_custom_logger('exception')
execute_logger = log.setup_custom_logger('order')
class SellThread(threading.Thread):
def __init__(self, custom_strategy):
logger.info("[SellThread][run] __init__")
threading.Thread.__init__(self)
self.custom_strategy = custom_strategy
singleton_data.instance().setAllowBuy(False)
singleton_data.instance().setSellThread(True)
# default(20.0) * (current_quantity / max_order_quantity)
# The maximum value is the default even if the quantity you have now is greater than max_order.
# MAX = default(20.0)
# The more bulk_net_sell orders, the higher the price.
currentQty = self.custom_strategy.exchange.get_currentQty()
logger.info("[SellThread][run] MAX_ORDER_QUENTITY : " + str(settings.MAX_ORDER_QUENTITY))
self.minSellingGap = 50.0
if currentQty > settings.MAX_ORDER_QUENTITY:
#self.minSellingGap = self.minSellingGap + settings.MIN_SELLING_GAP
self.minSellingGap = self.minSellingGap
else :
#self.minSellingGap = self.minSellingGap + float(settings.MIN_SELLING_GAP) * float(currentQty / settings.MAX_ORDER_QUENTITY)
self.minSellingGap = self.minSellingGap + float(settings.MIN_SELLING_GAP) - float(settings.MIN_SELLING_GAP) * float(currentQty / settings.MAX_ORDER_QUENTITY)
logger.info("[SellThread][run] minSellingGap : " + str(self.minSellingGap))
self.waiting_sell_order = {}
self.wait_cnt = 0
#self.allow_stop_loss = False
#self.exchange = ExchangeInterface(settings.DRY_RUN)
def PrintException(self):
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logger.info("[SellThread][run] " + str('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)))
execept_logger.info("[SellThread][run] " + str('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)))
#def retry_sell(self):
#def ammend_sell(self):
def run(self):
logger.info("[SellThread][run]")
while not singleton_data.instance().getAllowBuy():
try:
# realized profit
current_price = self.custom_strategy.exchange.get_instrument()['lastPrice']
avgCostPrice = self.custom_strategy.exchange.get_avgCostPrice()
avgCostPrice_copy = deepcopy(avgCostPrice)
currentQty = self.custom_strategy.exchange.get_currentQty()
logger.info("[SellThread][run] [" + str(self.wait_cnt) +"] current_price : " + str(current_price) + ", avgCostPrice : " + str(avgCostPrice) + ", currentQty : " + str(currentQty))
if len(self.waiting_sell_order) == 0:
# selling condition
if float(current_price) > float(avgCostPrice) + float(self.minSellingGap):
logger.info("[SellThread][run] current_price(" + str(current_price) +") > avgCostPrice(" + str(avgCostPrice) + ") + minSellingGap(" + str(self.minSellingGap) + ")")
self.waiting_sell_order = self.make_sell_order()
self.waiting_sell_order_copy = deepcopy(self.waiting_sell_order)
logger.info("[SellThread][run] NEW : waiting_sell_order : " + str(self.waiting_sell_order))
# waiting (default:15) secs condition
else:
self.wait_cnt += 1
if self.wait_cnt > settings.SELLING_WAIT:
logger.info("[SellThread][run] stop selling thread because cnt > " + str(settings.SELLING_WAIT))
# exit sell thread
break
#check selling order
elif len(self.waiting_sell_order) > 0:
if self.check_sell_order(avgCostPrice):
singleton_data.instance().setAllowBuy(True)
sleep(5)
#report
margin = self.custom_strategy.exchange.get_user_margin()
margin_str = '수익 정리\n'
margin_str += '판매가 : ' + str(self.waiting_sell_order_copy['price']) + '\n'
margin_str += '평균가 : ' + str(avgCostPrice_copy) + '\n'
margin_str += '수량 : ' + str(self.waiting_sell_order_copy['orderQty']) + '\n'
margin_str += '실현 수익 : ' + str(margin['prevRealisedPnl']/100000000)[:7] + '\n'
singleton_data.instance().sendTelegram(margin_str)
break
sleep(1)
except Exception as ex:
self.PrintException()
break
sleep(1)
# Cancel all sell orders
try:
self.custom_strategy.exchange.cancel_all_orders('Sell')
except Exception as ex:
self.PrintException()
finally:
singleton_data.instance().setSellThread(False)
def make_sell_order(self):
logger.info("[SellThread][make_sell_order] start")
# if it couldn't oder, retry it
cancel_retryCnt = 0
current_sell_order = {}
try:
# buy는 냅두고 sell order만으로 바꿔야한다
# 그래야지 실패하더라도 buy는 냅둬서 다시 주워담을수 있다
# 모든걸 취소하게되면 팔리지 않을시 최악의 경우에는 기존 buy는 모두 취소되고 selling만을 기다려야 한다
self.custom_strategy.exchange.cancel_all_orders('Sell')
#self.custom_strategy.exchange.cancel_all_orders('All')
while len(current_sell_order) == 0:
sell_orders = []
current_price = self.custom_strategy.exchange.get_instrument()['lastPrice']
avgCostPrice = self.custom_strategy.exchange.get_avgCostPrice()
currentQty = self.custom_strategy.exchange.get_currentQty()
logger.info("[SellThread][make_sell_order] current_price : " + str(current_price) + ", currentQty : " + str(currentQty))
if currentQty == 0:
logger.info("[BuyThread][make_buy_order] The first order was established before the order was cancelled")
break
if cancel_retryCnt < 10:
#sell_orders.append({'price': current_price, 'orderQty': currentQty, 'side': "Sell", 'execInst': "ParticipateDoNotInitiate"})
current_sell_order = self.custom_strategy.exchange.create_order('Sell', currentQty, current_price)
sleep(0.2)
else :
#sell_orders.append({'price': current_price + 0.5, 'orderQty': currentQty, 'side': "Sell", 'execInst': "ParticipateDoNotInitiate"})
current_sell_order = self.custom_strategy.exchange.create_order('Sell', currentQty, current_price + 0.5)
sleep(0.1)
logger.info("[SellThread][make_sell_order] current_sell_order : " + str(current_sell_order))
# for remaining buy order
#for i in range (len(response_order)):
# if response_order[i]['side'] == 'Sell':
# current_sell_order.append(response_order[i])
#logger.info("[SellThread][make_sell_order] current_sell_order : " + str(current_sell_order))
#if len(current_sell_order) == 1:
if current_sell_order['ordStatus'] == 'Canceled':
cancel_retryCnt += 1
logger.info("[SellThread][make_sell_order] order Status == Canceled")
logger.info("[SellThread][make_sell_order] reason : " + str(current_sell_order['text']))
logger.info("[SellThread][make_sell_order] sell order retry : " + str(cancel_retryCnt))
current_sell_order = {}
sleep(0.5)
elif current_sell_order['ordStatus'] == 'New':
logger.info("[SellThread][make_sell_order] order Status == New")
self.expectedProfit = (float(current_price) - float(avgCostPrice)) * float(currentQty)
logger.info("[SellThread][make_sell_order] expectedProfit : " + str(self.expectedProfit))
break
else :
logger.info("[SellThread][make_sell_order] Abnormal ordStatus : " + str(current_sell_order['ordStatus']))
except Exception as ex:
self.PrintException()
return current_sell_order
def check_sell_order(self, avgCostPrice):
# checking whether or not it's sold
ret = False
sell_orders = self.custom_strategy.exchange.get_orders('Sell')
logger.info("[SellThread][check_sell_order] sell_orders : " + str(sell_orders))
if len(sell_orders) == 0:
# selling complete
logger.info("[SellThread][check_sell_order] selling complete!")
self.custom_strategy.exchange.cancel_all_orders('All')
singleton_data.instance().setAveDownCnt(0)
# expectedProfit 수정 필요
#logger.info("[SellThread][check_sell_order] ###### profit : + " + str(self.expectedProfit) + "$ ######")
#execute_logger.info("###### profit : + " + str(self.expectedProfit) + "$ ######")
ret = True
self.waiting_sell_order = {}
elif len(sell_orders) == 1:
current_price = self.custom_strategy.exchange.get_instrument()['lastPrice']
if not float(current_price) > float(avgCostPrice) + float(self.minSellingGap):
logger.info("[SellThread][check_sell_order] current_price(" + str(current_price) +") > avgCostPrice(" + str(avgCostPrice) + ") + minSellingGap(" + str(self.minSellingGap) + ")")
self.waiting_sell_order = {}
self.custom_strategy.exchange.cancel_all_orders('Sell')
ret = False
# 3.0 move to settings
elif float(sell_orders[0]['price']) - float(current_price) > 3.0:
# flee away 3$ form first oder_price, amend order
# reorder
self.waiting_sell_order = self.make_sell_order()
self.waiting_sell_order_copy = deepcopy(self.waiting_sell_order)
logger.info("[SellThread][check_sell_order] reorder current_price - 3$ : waiting_sell_order : " + str(self.waiting_sell_order))
else :
logger.info("[SellThread][check_sell_order] The price you ordered has not dropped by more than $ 3 from the current price.")
logger.info("[SellThread][check_sell_order] not yet selling")
return ret
|
# Passing untrusted user input may have unintended consequences.
# Not designed to consume input from unknown sources (i.e.,
# the public internet).
import sys
import numpy as np
import cvxpy as cvx
from scipy import sparse
from .feature import _Feature
import pickle
class _CategoricalFeature(_Feature):
def __init__(self, name=None, regularization=None, load_from_file=None):
"""Initialize feature model (independent of data).
This function is called when the user adds a feature to a
Model, before any data has been specified.
Parameters
----------
name : string
Name for feature, used to make plots.
regularization : dictionary
Description of regularization terms. Three kinds of
regularization are supported: l1, l2, and Network Lasso.
To include one or more of these types of regularization,
include a key/value specifying the details in this dictionary.
The keys should be in ['l1', 'l2', 'network_lasso']. The
corresponding value provides additional details on the
regularization, as described below. One additional key
is recognized in this dictionary, 'prior', specifying
a prior estimate of the parameters. See below for more
details.
load_from_file : string or None.
Filename for this Feature, to be used for loading parameters. If
None (default), parameters are not loaded. If this parameter
is specified, any other parameters are ignored.
l1 Regularization Parameters
----------------------------
Description
l1 regularization is said to encourage sparsity, in the sense
that the resulting parameter estimates will typically match
some of the prior estimates perfectly. On the other hand,
it does not heavily discourage large deviations.
coef : float or dictionary
Coefficient associated with this regularization term, applied
to all parameter estimates (if float), or with a separate
coefficient for multiple parameters (if dictionary). If a
dictionary is used, only parameters with specified coefficients
are regularized.
prior : dictionary
Prior estimate of parameters associated with different categories.
Only parameters with specified prior estimates are regularized.
If this variable is not specified at all, an estimate of zero
for all categories is assumed. Note: this variable applies to both
l1 and l2 regularization, so if both are included, it seems silly
to specify it twice. Therefore, and perhaps confusingly, this
variable is specified in the top level regularization dictionary,
not in the individual regularization term dictionary. This is
best illustrated by example:
regularization = {'l1': {'coef': 0.3},
'l2': {'coef': {'male': 0.1, 'female': 0.2}},
'network_lasso': {'coef': 0.3, 'edges': edges},
'prior': {'male': -3.0, 'female': 5.0}
}
This incorporates all three types of regularization, with a coefficient
of 0.3 applied to all terms in the l1 term, a coefficient of 0.1 (0.2)
applied to the male (female) components of the l2 term, a prior estimate
of -3.0 (5.0) for the parameters corresponding to males (females), which
is applicable to both the l1 and l2 norms, and finally, a Network Lasso
term with overall coefficient 0.3, and edges specified in the pandas
DataFrame.
l2 Regularization Parameters
----------------------------
Description
l2 regularization discourages large deviations from the prior
estimate.
coef : float or dictionary
See l1 Regularization Parameters.
prior: dictionary
See l1 Regularization Parameters.
Network Lasso Regularization Parameters
---------------------------------------
Description
The Network Lasso encourages similar categories to have similar
parameter values, where the similarity of categories is defined
by the user.
coef : float
See l1 Regularization Parameters; but! Only floats are permitted
here.
edges : pandas DataFrame
Specifies which categories are believed to be similar to other
categories. Dataframe must have at least two columns called 'country1'
and 'country2'. An optional third column called 'weight' specifies the
strength of the belief. All values will be scaled to have maximum value
equal to 1 (so only relative strengths are used). Use coef to capture
the overall strength of these beliefs. If the 'weight' column is not
present, weights of 1 will be used for all parameters.
Optional Parameters
-------------------
use_cvx : bool
Flag specifying whether to use CVXPY to solve the
optimization problem. Defaults to True, and since no other
method has been implemented, does nothing.
solver : string
Solver to use with CVXPY. Defaults to 'ECOS'.
"""
if load_from_file is not None:
self._load(load_from_file)
return
if name is None:
raise ValueError('Feature must have a name.')
_Feature.__init__(self, name)
self._use_cvx = True
self._solver = 'ECOS'
self._categories = []
self._has_l1 = False
self._has_l2 = False
self._has_network_lasso = False
if regularization is not None:
if 'l1' in regularization:
self._has_l1 = True
if 'coef' in regularization['l1']:
self._coef1 = regularization['l1']['coef']
else:
raise ValueError('No coefficient specified for l1 regularization term.')
if 'l2' in regularization:
self._has_l2 = True
if 'coef' in regularization['l2']:
self._coef2 = regularization['l2']['coef']
else:
raise ValueError('No coefficient specified for l2 regularization term.')
if 'network_lasso' in regularization:
self._has_network_lasso = True
if 'coef' in regularization['network_lasso']:
self._lambda_network_lasso = regularization['network_lasso']['coef']
else:
raise ValueError('No coefficient specified for Network Lasso regularization term.')
if 'edges' in regularization['network_lasso']:
self._edges = regularization['network_lasso']['edges']
self._num_edges, em = self._edges.shape
# Loop over edges to see if a node is listed there that isn't
# present in the data. This matters! That means a category exists;
# we simply have no hard data about the probability associated with
# that category. We will *still* be able to predict a probability
# by the graph structure, provided it is connected to at least one
# node for which we *do* have hard data.
for index, row in self._edges.iterrows():
if row['country1'] not in self._categories:
self._categories.append(row['country1'])
if row['country2'] not in self._categories:
self._categories.append(row['country2'])
# Wait to create D until we have the data
else:
raise ValueError('Edges not specified for Network Lasso regularization term.')
if (self._has_l1 or self._has_l2) and 'prior' in regularization:
self._has_prior = True
self._prior = regularization['prior']
else:
self._has_prior = False
def initialize(self, x, smoothing=1.0, save_flag=False, save_prefix=None, na_signifier=None, verbose=False):
"""Initialize variables once data has been specified.
Parameters
----------
x : array
Observation corresponding to this feature.
smoothing : float
Overall smoothing factor, on top of the relative
smoothing specified in __init__. Defaults to 1.0,
indicating no change to the default smoothing.
save_flag : boolean
Flag indicating whether to save intermediate results.
save_prefix : string or None.
Prefix for filename. Actual filename will use this prefix
followed by the name of this feature, with a .pckl extension.
na_signifier : string or None
Indicating how missing data is marked. If specified, missing
data is handled by a special category whose parameter is
fixed at 0. Defaults to None.
verbose : bool
Flag specifying whether to print mildly helpful info.
Defaults to False.
"""
self._num_obs = len(x)
self._verbose = verbose
# Final list of categories consists of all categories specified via
# regularization terms as well as all the categories listed in the data
self._categories = list(set(x).union(self._categories))
self._num_categories = len(self._categories)
self._category_hash = {key: i for (key, i) in zip(self._categories, range(self._num_categories))}
# If we are using the Network Lasso, compute the relevant matrix, D.
if self._has_network_lasso:
D = np.zeros((self._num_edges, self._num_categories))
ne, em = self._edges.shape
ir = 0
for index, row in self._edges.iterrows():
# Check that nodes are in data
i = self._category_hash[row['country1']]
j = self._category_hash[row['country2']]
if em >= 3:
lmbda = row['weight']
else:
lmbda = 1.
D[ir, i] = lmbda
D[ir, j] = -lmbda
ir += 1
self._D = sparse.coo_matrix(D).tocsr()
self._lambda_network_lasso *= smoothing
# If there is a value corresponding to "unknown category", label it
# appropriately. We will make sure the corresponding parameter is
# always 0, since we make no prediction in this case.
if na_signifier is not None and na_signifier in self._categories:
self._na_index = self._category_hash[na_signifier]
else:
self._na_index = -1
# Store, not the data themselves, but integers representing the
# categories.
self.x = np.zeros(self._num_obs, dtype=np.int)
cnt = np.zeros(self._num_categories, dtype=np.int)
for (ix, i) in zip(x, range(self._num_obs)):
cnt[self._category_hash[ix]] += 1
self.x[i] = self._category_hash[ix]
# Replace coef1 (a float or dictionary) with lambda1 (a vector of weights)
# Possibility #1: coef1 is a float, no prior provided. In this case,
# all categories get the same weight, coef1 * smoothing.
#
# Possibility #2: coef1 is a float, prior provided. In this case,
# lambda should put weight zero on any categories not represented in the prior.
# all categories with prior specified get the same weight, coef1 * smoothing.
#
# Possibility #3: coef1 is a dictionary, no prior. In this case,
# lambda should put weight zero on any categories not represented in coef1.
# Any categories represented in coef1 get weight coef1[category] * smoothing.
#
# Possibility #4: coef1 is a dictionary, prior provided. In this case,
# lambda should put weight zero on any categories not represented *in both*
# coef1 and prior. Any categories represented in both coef1 and prior
# get weight coef1[category] * smoothing
if self._has_l1:
if type(self._coef1) == float:
if self._has_prior:
self._lambda1 = np.zeros(self._num_categories)
l = self._coef1 * smoothing
for key in self._prior:
self._lambda1[self._category_hash[key]] = l
else:
self._lambda1 = np.full(self._num_categories, self._coef1 * smoothing)
else:
if self._has_prior:
self._lambda1 = np.zeros(self._num_categories)
for key, value in self._coef1.iteritems():
if key in self._prior:
self._lambda1[self._category_hash[key]] = value * smoothing
else:
self._lambda1 = np.zeros(self._num_categories)
for key, value in self._coef1.iteritems():
self._lambda1[self._category_hash[key]] = value * smoothing
if self._has_l2:
if type(self._coef2) == float:
if self._has_prior:
self._lambda2 = np.zeros(self._num_categories)
l = self._coef2 * smoothing
for key in self._prior:
self._lambda2[self._category_hash[key]] = l
else:
self._lambda2 = np.full(self._num_categories, self._coef2 * smoothing)
else:
if self._has_prior:
self._lambda2 = np.zeros(self._num_categories)
for key, value in self._coef2.iteritems():
if key in self._prior:
self._lambda2[self._category_hash[key]] = value * smoothing
else:
self._lambda2 = np.zeros(self._num_categories)
for key, value in self._coef2.iteritems():
self._lambda2[self._category_hash[key]] = value * smoothing
if (self._has_l1 or self._has_l2) and self._has_prior:
prior = np.zeros(self._num_categories)
for key, value in self._prior.iteritems():
prior[self._category_hash[key]] = value
self._prior = prior
self._AtA = sparse.dia_matrix((cnt, 0), shape=(self._num_categories, self._num_categories), dtype=np.int)
self.p = np.zeros(self._num_categories)
if self._verbose:
print 'Number of categories: {0:d}'.format(self._num_categories)
if self._has_edges:
print 'Number of edges: {0:d}'.format(self._num_edges)
if save_flag:
self._save_self = True
if save_prefix is None:
self._filename = '{0:s}.pkcl'.format(self._name)
else:
self._filename = '{0:s}_{1:s}.pkcl'.format(save_prefix, self._name)
self._save()
else:
self._filename = None
self._save_self = False
def _save(self):
"""Save parameters so model fitting can be continued later."""
mv = {}
mv['num_obs'] = self._num_obs
mv['categories'] = self._categories
mv['num_categories'] = self._num_categories
mv['category_hash'] = self._category_hash
mv['has_l1'] = self._has_l1
if self._has_l1:
mv['lambda1'] = self._lambda1
mv['has_l2'] = self._has_l2
if self._has_l2:
mv['lambda2'] = self._lambda2
mv['has_network_lasso'] = self._has_network_lasso
if self._has_network_lasso:
mv['num_edges'] = self._num_edges
mv['D'] = self._D
mv['lambda_network_lasso'] = self._lambda_network_lasso
mv['has_prior'] = self._has_prior
if self._has_prior:
mv['prior'] = self._prior
mv['na_index'] = self._na_index
mv['x'] = self.x
mv['p'] = self.p
mv['AtA'] = self._AtA
mv['verbose'] = self._verbose
mv['use_cvx'] = self._use_cvx
mv['solver'] = self._solver
mv['name'] = self._name
mv['save_self'] = self._save_self
f = open(self._filename, 'w')
pickle.dump(mv, f)
f.close()
def _load(self, filename):
"""Load parameters from a previous model fitting session."""
f = open(filename)
mv = pickle.load(f)
f.close()
self._filename = filename
self._num_obs = mv['num_obs']
self._categories = mv['categories']
self._num_categories = mv['num_categories']
self._category_hash = mv['category_hash']
self._has_l1 = mv['has_l1']
if self._has_l1:
self._lambda1 = mv['lambda1']
self._has_l2 = mv['has_l2']
if self._has_l2:
self._lambda2 = mv['lambda2']
self._has_network_lasso = mv['has_network_lasso']
if self._has_network_lasso:
self._num_edges = mv['num_edges']
self._D = mv['D']
self._lambda_network_lasso = mv['lambda_network_lasso']
self._has_prior = mv['has_prior']
if self._has_prior:
self._prior = mv['prior']
self._na_index = mv['na_index']
self.x = mv['x']
self.p = mv['p']
self._AtA = mv['AtA']
self._verbose = mv['verbose']
self._use_cvx = mv['use_cvx']
self._solver = mv['solver']
self._name = mv['name']
self._save_self = mv['save_self']
def optimize(self, fpumz, rho):
"""Optimize this Feature's parameters.
Solves the optimization problem:
minimize \rho/2 * \| A*q + b \|_2^2
+ \| diag(\lambda_1) * (q - q_prior) \|_1
+ \| diag(\lambda_2)^(1/2) * (q - q_prior) \|_2^2
+ \lambda_nl * \| D*q \|_1
s.t. 1' * A * q = 0
where b = \bar{f}^k + u^k - \bar{z}^k - A*q^k
Parameters
----------
fpumz : (m,) ndarray
Vector representing \bar{f}^k + u^k - \bar{z}^k
rho : float
ADMM parameter. Must be positive.
Returns
-------
fkp1 : (m,) ndarray
Vector representing this Feature's contribution to the response.
Notes
-----
The unconstrained problem listed above has a non-smooth
objective. It is equivalent to the following quadratic
program with linear inequality constraints:
minimize q' * (A'*A + (2 / \rho) * diag(\lambda_2)) * q
+ 2 * q' * (A' * b - (2 / \rho) * diag(\lambda_2) * q_prior)
+ (2 / \rho) * \lambda_1' * t
+ (2 / \rho) * \lambda_nl * sum(s)
subject to c' * q = 0
0 <= t
0 <= s
-t <= q - q_prior <= t
-s <= D*q <= s
with optimization variables q, t, s. We use cvxpy to solve this
equivalent problem. The size of this problem is independent of
the number of training examples, and is governed strictly by the
number of categories, and the specified number of connections.
In our tests, cvxpy performs admirably. In situations where we
have a large number of categories, or the number of connections
is large, it might make sense to invest the time to write a
custom solver that takes into account the structure of the
problem. Specifically, noting that A'*A is a diagonal matrix,
and D has a specific sparsity pattern: each row of D contains
two non-zero elements, one equal to +1, the other equal to -1.
"""
b = fpumz - self._compute_Az(self.p)
Atb = self._compute_Atz(b)
if self._has_l2 and self._has_prior:
Atb -= (2. / rho) * np.multiply(self._lambda2, self._prior)
q = cvx.Variable(self._num_categories)
if self._has_l2:
AtA = cvx.Constant(self._AtA + sparse.dia_matrix( ((2. / rho) * self._lambda2, 0), shape=(self._num_categories, self._num_categories), dtype=np.float))
else:
AtA = cvx.Constant(self._AtA)
Atb = cvx.Constant(Atb)
obj = cvx.quad_form(q, AtA) + 2. * (Atb.T * q)
c = cvx.Constant(self._AtA.diagonal())
constraints = [c.T * q == 0]
if self._has_l1:
t = cvx.Variable(self._num_categories)
lmbda1 = cvx.Constant((2 / rho) * self._lambda1)
obj += (self._lambda1.T * t)
constraints += [0 <= t[:],
0 <= t[:] + q[:] - q_prior[:],
0 <= t[:] - q[:] + q_prior[:]]
if self._has_network_lasso:
s = cvx.Variable(self._num_edges)
D = cvx.Constant(self._D)
obj += (2 / rho) * self._lambda_network_lasso * cvx.sum_entries(s)
constraints += [0 <= s[:],
0 <= s[:] + D * q[:],
0 <= s[:] - D * q[:]]
prob = cvx.Problem(cvx.Minimize(obj), constraints)
prob.solve(verbose=self._verbose, solver=self._solver)#, abstol=1e-3, reltol=1e-3, feastol=1e-3)
if prob.status != cvx.OPTIMAL and prob.status != cvx.OPTIMAL_INACCURATE:
print "Categorical variable failed to converge."
sys.exit()
self.p = q.value.A.squeeze()
if self._na_index >= 0:
self.p[self.naIndex] = 0
if self._save_self:
self._save()
return self._compute_Az(self.p)
#@cython.boundscheck(False)
#@cython.wraparound(False)
#def _compute_Az(self, np.ndarray[DTYPE_t, ndim=1] z):
def _compute_Az(self, z):
"""Compute A*z.
The sparsity pattern of A enables us to compute A*z in time
proportional to the number of elements of z, regardless of the
number of rows of A.
"""
#cdef long i, ix
#cdef long m = self._num_obs
#cdef long[:] x = self.x
#cdef np.ndarray[DTYPE_t, ndim=1] Az = np.zeros(m)
m = self._num_obs
x = self.x
Az = np.zeros(m)
for i in range(m):
ix = x[i]
Az[i] = z[ix]
return Az
#@cython.boundscheck(False)
#@cython.wraparound(False)
#def _compute_Atz(self, np.ndarray[DTYPE_t, ndim=1] z):
def _compute_Atz(self, z):
"""Compute A' * z.
The sparsity pattern of A enables us to compute A' * z in time
proportional to the number of elements of z, regardless of the
number of columns of A.
"""
#cdef long i, ix
#cdef long m = self._num_obs
#cdef long K = self._num_categories
#cdef long[:] x = self.x
#cdef np.ndarray[DTYPE_t, ndim=1] Atz = np.zeros(K)
m = self._num_obs
K = self._num_categories
x = self.x
Atz = np.zeros(K)
for i in range(m):
ix = x[i]
Atz[ix] += z[i]
return Atz
def compute_dual_tol(self, y):
"""Computes this Feature's contribution to the dual residual tolerance.
See gamdist.
"""
Aty = self._compute_Atz(y)
return Aty.dot(Aty)
def num_params(self):
"""Number of parameters.
Returns the number of parameters used in this component of the
model. This is for a different purpose than the degrees of
freedom. The latter is used for model selection; the former for
assessing convergence. Thus, the presence of regularization does
not impact this function.
"""
return len(self.p)
def dof(self):
"""Degrees of freedom.
Returns the degrees of freedom associated with this component of
the model. Categorical features nominally have 1 degree of
freedom for each category (known as "levels" among
statisticians). Since we include an affine term in the overall
model, there is an ambiguity in the parameters that can be
resolved in various ways. We apply the constraint that the
average contribution, over all observations in the training set,
is zero. This has the effect of reducing the DOF by 1. Any
regularization included reduces the effective DOF even further,
but I have not had a chance to research this properly. Thus we
just return the number of levels minus 1.
Returns
dof : float
Degrees of freedom.
"""
return len(self.p) - 1
def predict(self, x):
"""Apply fitted model to feature.
Parameters
----------
X : array
Data for this feature.
Returns
-------
f_j : array
The contribution of this feature to the predicted
response.
"""
prediction = np.zeros(len(x))
for i in range(len(x)):
if x[i] in self._category_hash:
prediction[i] = self.p[self._category_hash[x[i]]]
return prediction
def plot(self):
''' Creates a bar chart showing the transformed response for each level. '''
# For categorical features, plot a bar graph for each category
# showing the estimated values, confidence intervals, and number
# of observations in each category. If Family is Binomial,
# show 0/1 observations on top/bottom of graph. Otherwise
# show observations on bottom of graph.
#
# We could permit fancier plots for geographic data e.g.
# Choropleth maps.
pass
def __str__(self):
''' Print confidence intervals on level values, significance tests? '''
# For categorical features, print value associated
# with each category, a confidence interval, and
# a p-value against the null hypothesis that the
# parameter is 0. For Network Lasso, print p-value
# against null hypothesis that related categories
# have the same value.
desc = 'Feature {0:s}\n'.format(self._name)
for i in self._categories:
desc += ' {0:s}: {1:.06g}\n'.format(i, self.p[self._category_hash[i]])
return desc
|
"""
Tests for the :mod:`fiftyone.utils.cvat` module.
You must run these tests interactively as follows::
python tests/intensive/cvat_tests.py
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from bson import ObjectId
from collections import defaultdict
import numpy as np
import os
import unittest
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.utils.cvat as fouc
import fiftyone.zoo as foz
from fiftyone.core.expressions import ViewField as F
def _find_shape(anno_json, label_id):
shape = _parse_shapes(anno_json["shapes"], label_id)
if shape is not None:
return shape
for track in anno_json["tracks"]:
shape = _parse_shapes(track["shapes"], label_id)
if shape is not None:
return shape
def _parse_shapes(shapes, label_id):
for shape in shapes:
for attr in shape["attributes"]:
if attr["value"] == label_id:
return shape
def _get_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
return _find_shape(anno_json, label_id)
def _delete_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
del_json = {"version": 1, "tags": [], "shapes": [shape], "tracks": []}
del_url = api.task_annotation_url(task_id) + "?action=delete"
api.patch(del_url, json=del_json)
def _get_label(api, task_id, label=None):
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if isinstance(label, str):
label = class_id_map[label]
else:
label = list(class_id_map.values())[0]
return label
def _create_annotation(
api, task_id, shape=None, tag=None, track=None, points=None, _type=None
):
if points is None:
points = [10, 20, 30, 40]
if _type is None:
_type = "rectangle"
shapes = []
tags = []
tracks = []
if shape is not None:
if not isinstance(shape, dict):
label = _get_label(api, task_id, label=shape)
shape = {
"type": _type,
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
"points": points,
"occluded": False,
}
shapes = [shape]
if tag is not None:
if not isinstance(tag, dict):
label = _get_label(api, task_id, label=tag)
tag = {
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
}
tags = [tag]
if track is not None:
if not isinstance(track, dict):
label = _get_label(api, task_id, label=track)
if isinstance(track, tuple):
start, end = track
else:
start, end = 0, -1
track = {
"frame": start,
"label_id": label,
"group": 0,
"shapes": [
{
"type": _type,
"occluded": False,
"points": points,
"frame": start,
"outside": False,
"attributes": [],
"z_order": 0,
}
],
"attributes": [],
}
if end > start:
track["shapes"].append(
{
"type": _type,
"occluded": False,
"points": points,
"frame": end,
"outside": True,
"attributes": [],
"z_order": 0,
}
)
tracks.append(track)
create_json = {
"version": 1,
"tags": tags,
"shapes": shapes,
"tracks": tracks,
}
create_url = api.task_annotation_url(task_id) + "?action=create"
api.patch(create_url, json=create_json)
def _update_shape(
api,
task_id,
label_id,
label=None,
points=None,
attributes=None,
occluded=None,
):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
if points is not None:
shape["points"] = points
if occluded is not None:
shape["occluded"] = occluded
if attributes is not None:
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if label is None:
label_id = shape["label_id"]
attr_id_map = attr_id_map[label_id]
else:
label_id = class_id_map[label]
prev_attr_id_map = attr_id_map[shape["label_id"]]
prev_attr_id_map = {v: k for k, v in prev_attr_id_map.items()}
attr_id_map = attr_id_map[label_id]
shape["label_id"] = label_id
for attr in shape["attributes"]:
spec = prev_attr_id_map[attr["spec_id"]]
attr["spec_id"] = attr_id_map[spec]
for attr_name, attr_val in attributes:
if attr_name in attr_id_map:
shape["attributes"].append(
{"spec_id": attr_id_map[attr_name], "value": attr_val}
)
update_json = {
"version": 1,
"tags": [],
"shapes": [shape],
"tracks": [],
}
update_url = api.task_annotation_url(task_id) + "?action=update"
api.patch(update_url, json=update_json)
class CVATTests(unittest.TestCase):
def test_upload(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("ground_truth.detections.id", unwind=True),
)
# Test Videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_ids = dataset.values(
"frames.detections.detections.id", unwind=True
)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="frames.detections",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().frames[1].detections.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("frames.detections.detections.id", unwind=True),
)
def test_detection_labelling(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:2
].clone()
previous_dataset = dataset.clone()
previous_label_ids = dataset.values(
"ground_truth.detections.id", unwind=True
)
anno_key = "anno_key"
attributes = {"test": {"type": "text"}}
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
attributes=attributes,
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_label_id = previous_label_ids[0]
updated_label_id = previous_label_ids[1]
_delete_shape(api, task_id, deleted_label_id)
_create_annotation(api, task_id, shape=True)
_update_shape(
api, task_id, updated_label_id, attributes=[("test", "1")]
)
dataset.load_annotations(anno_key, cleanup=True)
label_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(label_ids), len(previous_label_ids))
added_label_ids = list(set(label_ids) - set(previous_label_ids))
self.assertEqual(len(added_label_ids), 1)
deleted_label_ids = list(set(previous_label_ids) - set(label_ids))
self.assertEqual(len(deleted_label_ids), 1)
updated_sample = dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
prev_updated_sample = previous_dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
self.assertEqual(len(updated_sample.ground_truth.detections), 1)
self.assertEqual(len(prev_updated_sample.ground_truth.detections), 1)
self.assertEqual(
updated_sample.ground_truth.detections[0].id,
prev_updated_sample.ground_truth.detections[0].id,
)
self.assertEqual(updated_sample.ground_truth.detections[0].test, 1)
api.close()
def test_multiple_fields(self):
dataset = foz.load_zoo_dataset(
"open-images-v6",
split="validation",
label_types=["detections", "segmentations", "classifications"],
classes=["Person"],
max_samples=10,
).clone()
prev_dataset = dataset.clone()
anno_key = "anno_key"
label_schema = {
"detections": {},
"segmentations": {"type": "instances"},
"positive_labels": {},
"negative_labels": {},
}
results = dataset.annotate(
anno_key,
backend="cvat",
label_schema=label_schema,
classes=["Person"],
)
api = results.connect_to_api()
task_id = results.task_ids[0]
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def _remove_bbox(dataset, label_field):
view = dataset.set_field(
"%s.detections" % label_field,
F("detections").map(
F().set_field("bounding_box", []).set_field("mask", None)
),
)
return view
# Ensure ids and attrs are equal
view = _remove_bbox(dataset, "detections")
prev_view = _remove_bbox(prev_dataset, "detections")
self.assertListEqual(
view.values("detections", unwind=True),
prev_view.values("detections", unwind=True),
)
view = _remove_bbox(dataset, "segmentations")
prev_view = _remove_bbox(prev_dataset, "segmentations")
self.assertListEqual(
view.values("segmentations", unwind=True),
prev_view.values("segmentations", unwind=True),
)
self.assertListEqual(
dataset.values("positive_labels", unwind=True),
prev_dataset.values("positive_labels", unwind=True),
)
self.assertListEqual(
dataset.values("negative_labels", unwind=True),
prev_dataset.values("negative_labels", unwind=True),
)
def test_task_creation_arguments(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=4)
.select_fields("ground_truth")
.clone()
)
user = fo.annotation_config.backends.get("cvat", {})
user = user.get("username", None)
users = [user] if user is not None else None
anno_key = "anno_key"
bug_tracker = "test_tracker"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
task_size=2,
segment_size=1,
task_assignee=users,
job_assignees=users,
job_reviewers=users,
issue_tracker=bug_tracker,
)
task_ids = results.task_ids
api = results.connect_to_api()
self.assertEqual(len(task_ids), 2)
for task_id in task_ids:
task_json = api.get(api.task_url(task_id)).json()
self.assertEqual(task_json["bug_tracker"], bug_tracker)
self.assertEqual(task_json["segment_size"], 1)
if user is not None:
self.assertEqual(task_json["assignee"]["username"], user)
for job in api.get(api.jobs_url(task_id)).json():
job_json = api.get(job["url"]).json()
if user is not None:
self.assertEqual(job_json["assignee"]["username"], user)
if api.server_version == 1:
self.assertEqual(
job_json["reviewer"]["username"], user
)
results.print_status()
status = results.get_status()
self.assertEqual(
status["ground_truth"][task_ids[0]]["assignee"]["username"], user,
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_project(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=2)
.select_fields("ground_truth")
.clone()
)
anno_key = "anno_key"
project_name = "cvat_unittest_project"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
api = results.connect_to_api()
project_id = api.get_project_id(project_name)
self.assertIsNotNone(project_id)
self.assertIn(project_id, results.project_ids)
anno_key2 = "anno_key2"
results2 = dataset.annotate(
anno_key2,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
self.assertNotIn(project_id, results2.project_ids)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key2, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
api.delete_project(project_id)
api.close()
api = results.connect_to_api()
self.assertIsNone(api.get_project_id(project_name))
api.close()
def test_example_add_new_label_fields(self):
# Test label field arguments
dataset = foz.load_zoo_dataset("quickstart", max_samples=10).clone()
view = dataset.take(1)
anno_key = "cvat_new_field"
results = view.annotate(
anno_key,
label_field="new_classifications",
label_type="classifications",
classes=["dog", "cat", "person"],
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="dog")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "dog")
# Test label schema
anno_key = "cvat_new_field_schema"
label_schema = {
"new_classifications_2": {
"type": "classifications",
"classes": ["dog", "cat", "person"],
}
}
results = view.annotate(anno_key, label_schema=label_schema)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api.close()
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="person")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications_2.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "person")
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_example_restricting_label_edits(self):
dataset = foz.load_zoo_dataset("quickstart").clone()
# Grab a sample that contains at least 2 people
view = dataset.match(
F("ground_truth.detections")
.filter(F("label") == "person")
.length()
> 1
).limit(1)
previous_labels = view.values("ground_truth.detections", unwind=True)
previous_person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
anno_key = "cvat_edit_restrictions"
# The new attributes that we want to populate
attributes = {
"sex": {"type": "select", "values": ["male", "female"],},
"age": {"type": "text",},
}
results = view.annotate(
anno_key,
label_field="ground_truth",
classes=["person", "test"],
attributes=attributes,
allow_additions=False,
allow_deletions=False,
allow_label_edits=False,
allow_spatial_edits=False,
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
task_id = results.task_ids[0]
api = results.connect_to_api()
# Delete label
deleted_id = previous_person_labels[0].id
_delete_shape(api, task_id, deleted_id)
# Add label
_create_annotation(api, task_id, shape="person")
# Edit label and bounding box
edited_id = previous_person_labels[1].id
_update_shape(
api,
task_id,
edited_id,
label="test",
points=[10, 20, 30, 40],
attributes=[("sex", "male")],
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
labels = view.values("ground_truth.detections", unwind=True)
person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
self.assertListEqual(
[d.label for d in labels], [d.label for d in previous_labels],
)
self.assertListEqual(
[d.bounding_box for d in labels],
[d.bounding_box for d in previous_labels],
)
self.assertListEqual(
[d.id for d in labels], [d.id for d in previous_labels],
)
self.assertEqual(
len(dataset.filter_labels("ground_truth", F("sex") == "male")), 1,
)
def test_issue_1634(self):
# tests: https://github.com/voxel51/fiftyone/issues/1634
dataset = (
foz.load_zoo_dataset("quickstart-video", max_samples=1)
.select_fields("frames.detections")
.clone()
)
anno_key = "issue_1634_test"
results = dataset.annotate(
anno_key,
label_field="frames.ground_truth",
label_type="detections",
classes=["test"],
)
task_id = results.task_ids[0]
api = results.connect_to_api()
# Create overlapping tracks of different type
_create_annotation(
api,
task_id,
track=(0, 30),
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api, task_id, track=(20, 40),
)
api.close()
imported_dataset = fo.Dataset()
with etau.TempDir() as tmp:
fouc.import_annotations(
imported_dataset,
task_ids=[task_id],
data_path=tmp,
download_media=True,
)
imported_dataset.compute_metadata()
self.assertEqual(
imported_dataset.first().metadata.total_frame_count,
dataset.first().metadata.total_frame_count,
)
imported_dataset.export(
export_dir=tmp, dataset_type=fo.types.CVATVideoDataset
)
filename = os.path.splitext(
os.path.basename(imported_dataset.first().filepath)
)[0]
labels_filepath = os.path.join(tmp, "labels", "%s.xml" % filename)
with open(labels_filepath, "r") as f:
label_file_info = f.read()
track_1 = '<track id="1" label="test">'
track_2 = '<track id="2" label="test">'
polygon_frame_0 = '<polygon frame="0"'
polygon_frame_30 = '<polygon frame="30"'
box_frame_20 = '<box frame="20"'
box_frame_40 = '<box frame="40"'
self.assertTrue(track_1 in label_file_info)
self.assertTrue(track_2 in label_file_info)
self.assertTrue(polygon_frame_0 in label_file_info)
self.assertTrue(polygon_frame_30 in label_file_info)
self.assertTrue(box_frame_20 in label_file_info)
self.assertTrue(box_frame_40 in label_file_info)
cvat_video_dataset = fo.Dataset.from_dir(
dataset_dir=tmp, dataset_type=fo.types.CVATVideoDataset,
)
detections = cvat_video_dataset.values(
"frames.detections", unwind=True
)
detections = [i for i in detections if i is not None]
self.assertEqual(len(detections), 20)
polylines = cvat_video_dataset.values(
"frames.polylines", unwind=True
)
polylines = [i for i in polylines if i is not None]
self.assertEqual(len(polylines), 30)
dataset.load_annotations(anno_key, cleanup=True)
def test_deleted_tasks(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
api.delete_task(task_id)
status = results.get_status()
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
dataset.values("ground_truth.detections.id", unwind=True),
prev_ids,
)
def test_occluded_attr(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
anno_key = "cvat_occluded_widget"
# Populate a new `occluded` attribute on the existing `ground_truth` labels
# using CVAT's occluded widget
label_schema = {
"ground_truth": {"attributes": {"occluded": {"type": "occluded",}}}
}
results = dataset.annotate(
anno_key, label_schema=label_schema, backend="cvat"
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_update_shape(api, task_id, shape_id, occluded=True)
dataset.load_annotations(anno_key, cleanup=True)
id_occ_map = dict(
zip(
*dataset.values(
[
"ground_truth.detections.id",
"ground_truth.detections.occluded",
],
unwind=True,
)
)
)
self.assertTrue(id_occ_map.pop(shape_id))
self.assertFalse(any(id_occ_map.values()))
def test_map_view_stage(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:1
].clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
# Set one of the detections to upper case
sample = dataset.first()
label = sample.ground_truth.detections[0].label
sample.ground_truth.detections[0].label = label.upper()
sample.save()
prev_unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
labels = dataset.distinct("ground_truth.detections.label")
label_map = {l: l.upper() for l in labels}
view = dataset.map_labels("ground_truth", label_map)
anno_key = "anno_key"
results = view.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_id = prev_ids[0]
self.assertIsNotNone(_get_shape(api, task_id, deleted_id))
_create_annotation(api, task_id, shape=labels[0].upper())
_delete_shape(api, task_id, deleted_id)
dataset.load_annotations(anno_key, cleanup=True)
loaded_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(loaded_ids), len(prev_ids))
# We expect existing labels to have been updated according to the
# mapping
unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertNotEqual(unchanged_label, prev_unchanged_label)
# Expect newly created labels to retain whatever class they were
# annotated as
new_id = list(set(loaded_ids) - set(prev_ids))[0]
new_label = dataset.select_labels(ids=new_id).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertEqual(labels[0].upper(), new_label)
def test_dest_field(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field",
)
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field.detections.id", unwind=True)),
)
# Test dict
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
label_schema = {
"ground_truth": {},
"new_points": {"type": "keypoints", "classes": ["test"],},
"new_polygon": {"type": "polygons", "classes": ["test2"],},
}
results = dataset.annotate(anno_key, label_schema=label_schema)
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(
api,
task_id,
shape="test",
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api,
task_id,
shape="test2",
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
dest_field = {
"ground_truth": "test_field_1",
"new_points": "test_field_2",
}
dataset.load_annotations(
anno_key, cleanup=True, dest_field=dest_field,
)
self.assertFalse(dataset.has_sample_field("new_points"))
self.assertTrue(dataset.has_sample_field("new_polygon"))
self.assertTrue(dataset.has_sample_field("test_field_1"))
self.assertTrue(dataset.has_sample_field("test_field_2"))
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field_1.detections.id", unwind=True)),
)
self.assertEqual(
len(dataset.values("test_field_2.keypoints.id", unwind=True)), 1,
)
self.assertEqual(
len(dataset.values("new_polygon.polylines.id", unwind=True)), 1,
)
# Test modification
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_delete_shape(api, task_id, shape_id)
_create_annotation(api, task_id, shape=True)
_create_annotation(
api,
task_id,
shape=True,
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field", unexpected="keep",
)
self.assertListEqual(
sorted(prev_ids),
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
)
test_ids = dataset.values("test_field.detections.id", unwind=True)
self.assertEqual(len(set(test_ids) - set(prev_ids)), 1)
self.assertEqual(len(set(prev_ids) - set(test_ids)), 1)
# Test videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_labels = dataset.values("frames.detections", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="frames.detections")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="frames.test_field",
)
self.assertListEqual(
prev_labels, dataset.values("frames.detections", unwind=True),
)
self.assertListEqual(
sorted(
dataset.values("frames.detections.detections.id", unwind=True)
),
sorted(
dataset.values("frames.test_field.detections.id", unwind=True)
),
)
if __name__ == "__main__":
fo.config.show_progress_bars = False
unittest.main(verbosity=2)
|
<reponame>honzamach/mydojo
"""New tables: users and groups
Revision ID: fe560e5dba27
Revises:
Create Date: 2019-02-08 13:38:38.469487
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fe<PASSWORD>e5dba27'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('createtime', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('enabled', sa.Boolean(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['groups.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_groups_name'), 'groups', ['name'], unique=True)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('createtime', sa.DateTime(), nullable=True),
sa.Column('login', sa.String(length=50), nullable=True),
sa.Column('fullname', sa.String(length=100), nullable=False),
sa.Column('email', sa.String(length=250), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String(length=20), dimensions=1), nullable=False),
sa.Column('enabled', sa.Boolean(), nullable=False),
sa.Column('password', sa.String(), nullable=True),
sa.Column('apikey', sa.String(), nullable=True),
sa.Column('locale', sa.String(length=20), nullable=True),
sa.Column('timezone', sa.String(length=50), nullable=True),
sa.Column('logintime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_apikey'), 'users', ['apikey'], unique=False)
op.create_index(op.f('ix_users_login'), 'users', ['login'], unique=True)
op.create_table('asoc_group_managers',
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('group_id', 'user_id')
)
op.create_table('asoc_group_members',
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('group_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('asoc_group_members')
op.drop_table('asoc_group_managers')
op.drop_index(op.f('ix_users_login'), table_name='users')
op.drop_index(op.f('ix_users_apikey'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_groups_name'), table_name='groups')
op.drop_table('groups')
# ### end Alembic commands ###
|
<reponame>kruus/pymde
import numpy as np
import scipy.sparse as sp
import torch
from pymde import problem
from pymde.preprocess.graph import Graph
from pymde.preprocess.preprocess import sample_edges
from pymde import util
def distances(data, retain_fraction=1.0, verbose=False):
"""Compute distances, given data matrix.
This function computes distances between some pairs of items, given
a data matrix. Each row in the data matrix is treated as an item.
Arguments
---------
data: torch.Tensor, np.ndarray, or scipy.sparse matrix
The data matrix, shape ``(n_items, n_features)``.
retain_fraction: float, optional
A float between 0 and 1, specifying the fraction of all ``(n_items
choose 2)`` to compute. For example, if ``retain_fraction`` is 0.1,
only 10 percent of the edges will be stored.
verbose:
If ``True``, print verbose output.
Returns
-------
pymde.Graph
A graph object holding the distances and corresponding edges.
Access the distances with ``graph.distances``, and the edges
with ``graph.edges``.
"""
if not sp.issparse(data) and not isinstance(
data, (np.ndarray, torch.Tensor)
):
raise ValueError(
"`data` must be a scipy.sparse matrix, NumPy array, "
"or torch tensor"
)
n_items = int(data.shape[0])
all_edges = n_items * (n_items - 1) / 2
max_distances = int(retain_fraction * all_edges)
if max_distances is None:
max_distances = np.inf
elif max_distances <= 0:
raise ValueError("max_distances must be positive")
if n_items * (n_items - 1) / 2 < max_distances:
edges = util.all_edges(n_items)
else:
if verbose:
problem.LOGGER.info(f"Sampling {int(max_distances)} edges")
edges = sample_edges(n_items, int(max_distances))
if sp.issparse(data):
if not isinstance(data, sp.csr_matrix):
data = data.tocsr()
edges = edges.cpu().numpy()
if verbose:
problem.LOGGER.info(f"Computing {int(edges.shape[0])} distances")
delta = torch.tensor(
sp.linalg.norm(data[edges[:, 0]] - data[edges[:, 1]], axis=1),
dtype=torch.float,
)
edges = torch.tensor(edges)
elif isinstance(data, (np.ndarray, torch.Tensor)):
if isinstance(data, np.ndarray):
data = torch.tensor(data, dtype=torch.float, device="cpu")
edges = edges.to(data.device)
if verbose:
problem.LOGGER.info(f"Computing {int(edges.shape[0])} distances")
# TODO(akshayka): Batch this computation when the number of edges
# and/or the number of features is large.
delta = (
(data[edges[:, 0]] - data[edges[:, 1]])
.pow(2)
.sum(dim=1)
.float()
.sqrt()
)
return Graph.from_edges(edges, delta, n_items=n_items)
def k_nearest_neighbors(data, k, max_distance=None, verbose=False):
"""Compute k-nearest neighbors for each row in data matrix.
Computes the k-nearest neighbor graph of data matrix, under
the Euclidean distance. Each row in the data matrix is treated as an item.
Arguments
---------
data: {torch.Tensor, np.ndarray, scipy.sparse matrix}(
shape=(n_items, n_features))
The data matrix
k: int
The number of nearest neighbors per item
max_distance: float (optional)
If not None, neighborhoods are restricted to have a radius
no greater than `max_distance`.
verbose: bool
If True, print verbose output.
Returns
-------
pymde.Graph
a neighborhood graph
"""
# lazy import, because importing pynndescent takes some time
import pynndescent
if isinstance(data, torch.Tensor):
device = data.device
data = data.cpu().numpy()
else:
device = "cpu"
n = data.shape[0]
if n < 10000:
import sklearn.neighbors
if verbose:
problem.LOGGER.info("Exact nearest neighbors by brute force ")
nn = sklearn.neighbors.NearestNeighbors(
n_neighbors=k + 1, algorithm="brute"
)
nn.fit(data)
distances, neighbors = nn.kneighbors(data)
else:
# TODO default params (n_trees, max_candidates)
index = pynndescent.NNDescent(
data,
n_neighbors=k + 1,
verbose=verbose,
max_candidates=60,
)
neighbors, distances = index.neighbor_graph
neighbors = neighbors[:, 1:]
distances = distances[:, 1:]
n = data.shape[0]
items = np.arange(n)
items = np.repeat(items, k)
edges = np.stack([items, neighbors.flatten()], axis=1)
flip_idx = edges[:, 0] > edges[:, 1]
edges[flip_idx] = np.stack(
[edges[flip_idx][:, 1], edges[flip_idx][:, 0]], axis=1
)
duplicated_edges_mask = edges[:, 0] == edges[:, 1]
if duplicated_edges_mask.any():
problem.LOGGER.warning(
"Your dataset appears to contain duplicated items (rows); "
"when embedding, you should typically have unique items."
)
problem.LOGGER.warning(
"The following items have duplicates "
f"{edges[duplicated_edges_mask][:, 0]}"
)
edges = edges[~duplicated_edges_mask]
weights = torch.ones(edges.shape[0], device=device, dtype=torch.float)
if max_distance is not None:
weights[
torch.tensor(distances.ravel(), device=device, dtype=torch.float)
> max_distance
] = 0.0
# weights for duplicated edges will be summed.
edges = torch.tensor(edges, device=device)
return Graph.from_edges(edges, weights)
# TODO(akshayka) figure out this api ...
def _neighborhood_graph(
data, n_neighbors=None, threshold=None, max_distances=None
):
if n_neighbors is None and threshold is None:
n_neighbors = 15
elif n_neighbors is not None and threshold is not None:
raise ValueError(
"only one of n_neighbors and threshold can be non-none"
)
if n_neighbors is not None:
return k_nearest_neighbors(data, n_neighbors)
else:
# TODO: move from recipes to here
raise NotImplementedError
def _distances(index):
pass
def _distance_matrix(data, max_distances=None):
"""Compute a distance matrix from a data matrix"""
# TODO(akshayka): move from recipes to here
# return vector of distances/edges, or a graph, or distance matrix?
raise NotImplementedError
|
#!/usr/bin/env python
from __future__ import print_function
import textwrap
import argparse
import errno
import sys
from binho import binhoHostAdapter
from ..utils import binhoDFUManager
def print_core_info(device):
""" Prints the core information for a device. """
if device.inBootloaderMode:
print("Found a {}".format(device.productName) + " [in DFU Mode]")
print(" Port: {}".format(device.commPort))
print(" Device ID: {}".format(device.deviceID))
print(
" Note: This device is in DFU Mode! It will not respond to USB commands until a firmware update\n\r"
" is completed or it is power cycled."
)
elif device.inDAPLinkMode:
print("Found a {}".format(device.productName) + " [in DAPLink Mode]")
print(" Port: {}".format(device.commPort))
print(" Device ID: {}".format(device.deviceID))
print(
" Note: This device is in DAPlink Mode! It can be returned to host adapter (normal) mode\n\r"
" by issuing 'binho daplink -q' command."
)
else:
fwVersion = device.firmwareVersion
print("Found a {}".format(device.productName))
print(" Port: {}".format(device.commPort))
print(" Device ID: {}".format(device.deviceID))
print(" CMD Version: {}".format(device.commandVersion))
if device.FIRMWARE_UPDATE_URL:
latestVersion = binhoDFUManager.getLatestFirmwareVersion(device.FIRMWARE_UPDATE_URL, True)
if latestVersion:
(latestVerMajor, latestVerMinor, latestVerRev,) = binhoDFUManager.parseVersionString(latestVersion)
(currVerMajor, currVerMinor, currVerRev,) = binhoDFUManager.parseVersionString(fwVersion)
newFwVerAvail = False
if currVerMajor < latestVerMajor:
newFwVerAvail = True
elif currVerMinor < latestVerMinor:
newFwVerAvail = True
elif currVerRev < latestVerRev:
newFwVerAvail = True
if newFwVerAvail:
print(
" Firmware Version: {} [A newer version is available! Use 'binho dfu' shell command to "
"update.]".format(fwVersion)
)
else:
print(" Firmware Version: {} [Up To Date]".format(fwVersion))
else:
print(" Firmware Version: {}".format(fwVersion))
else:
print(" Firmware Version: {}".format(fwVersion))
# If this board has any version warnings to display, dipslay them.
warnings = device.version_warnings()
if warnings:
wrapped_warnings = textwrap.wrap(warnings)
wrapped_warnings = "\n".join([" {}".format(line) for line in wrapped_warnings])
print("\n !!! WARNING !!!\n{}\n".format(wrapped_warnings))
def main():
# Set up a simple argument parser.
parser = argparse.ArgumentParser(
description="Utility for gathering information about connected Binho host Adapters"
)
parser.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="Prints only the device name and port of detected Binho host adapters",
)
args = parser.parse_args()
# Try to find all existing devices
devices = binhoHostAdapter(find_all=True)
if not devices:
print("No Binho host adapters found!", file=sys.stderr)
sys.exit(errno.ENODEV)
# Print the board's information...
for device in devices:
if device.inBootloaderMode:
if args.quiet:
print(device.productName + " [DFU] (" + device.commPort + ")")
device.close()
continue
# Otherwise, print the core information.
print_core_info(device)
elif device.inDAPLinkMode:
if args.quiet:
print(device.productName + " [DAPLink] (" + device.commPort + ")")
device.close()
continue
print_core_info(device)
else:
# If we're in quiet mode, print only the serial number and abort.
if args.quiet:
print(device.productName + " (" + device.commPort + ")")
device.close()
continue
# Otherwise, print the core information.
print_core_info(device)
print(" ")
device.close()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
dialoguss.core
==============
"""
import logging
import os
import os.path
import re
import sys
import yaml
import random
import requests
from abc import ABCMeta, abstractmethod
from argparse import ArgumentParser
SID_MIN = 1000000
SID_MAX = 10000000
LOGGER = logging.getLogger(__name__)
class SessionCollector(object):
"""Collects information about a session, such as no of requests etc.."""
def __init__(self, session):
self.session = session
class Step(object):
def __init__(self, step_no, text, expect, session=None):
self.step_no = step_no
self.text = text
self.expect = expect
self.session = session
self.is_last = True
def send_request(self, data):
"""Sends a request to the http service
:param: data A dict containing `sessionId`, `phoneNumber`, `text` and `channel` keys
:return: string or None
"""
res = requests.post(self.session.url, data)
response_text = str(res.text)
if res.status_code not in (200, 201):
LOGGER.debug('RESPONSE ERROR (%s) : Got an error: %s', res.status_code, response_text)
response_text = None
return response_text
def execute(self, step_input=None):
"""Executes a step and returns the result of the request
May return an empty string ("") upon failure
"""
LOGGER.debug("Processing step no: %s", self.step_no)
text = step_input
if step_input is None:
text = ""
data = {
'sessionId': self.session.session_id,
'phoneNumber': self.session.phone_number,
'text' : text,
'channel': self.session.channel
}
response_text = self.send_request(data)
if re.search(r'^CON\s?', response_text) is not None:
# strip out the CONTINUE
response_text = response_text.replace("CON ", "")
response_text = response_text.rstrip()
self.is_last = False
elif re.search(r'^END\s?', response_text) is not None:
response_text = response_text.replace("END", "")
response_text = response_text.rstrip()
self.is_last = True
return response_text
class DialStep(Step):
"""DialStep is the first step in the session, dials the USSD service"""
def __init__(self, expect, text="", session=None):
super().__init__(0, text, expect, session)
class Session(metaclass=ABCMeta):
def __init__(self, **kwargs):
self.url = kwargs['url']
self.phone_number = kwargs['phone_number']
self.session_id = kwargs['session_id']
self.channel = kwargs['channel']
self.collector = SessionCollector(self)
self.steps = []
def add_step(self, step):
"""Add a step for this session"""
self.steps.append(step)
@abstractmethod
def run(self):
pass
class InteractiveSession(Session):
"""InteractiveSession runs an interactive `USSD` session via the CLI"""
def run(self):
step_no = 0
response_text = DialStep("", "", self).execute()
sys.stdout.write(response_text + '\n')
while response_text is not None:
step_no += 1
step_input = input("> ")
a_step = Step(step_no, step_input, "", self)
response_text = a_step.execute(step_input)
sys.stdout.write(response_text + '\n')
if a_step.is_last:
response_text = None
class AutomatedSession(Session):
"""AutomatedSession runs an automated session that contains pre-defined
steps (and their expectations)
"""
def run(self):
sys.stdout.write("Running tests for session: {}\n".format(self.session_id))
had_error = False
for step in self.steps:
step.session = self
if isinstance(step, DialStep):
result = step.execute()
else:
result = step.execute(step.text)
if result != step.expect:
sys.stderr.write(
"StepAssertionError:\n\tExpected={}\n\tGot={}\n".format(step.expect, result))
if not had_error:
sys.stdout.write("All tests successful tests for session: {}\n".format(self.session_id))
class Dialoguss:
"""Dialoguss is an application that can have one or more pseudo-ussd sessions"""
def __init__(self, yamlCfg, is_interactive=False):
self.config = yamlCfg
self.is_interactive = is_interactive
self.session_url = None
self.dial = None
self.sessions = []
def run(self):
"""Runs the main dialoguss application"""
if self.is_interactive:
with open(self.config) as f:
yaml_cfg = yaml.load(f)
session = InteractiveSession(
session_id=random.randrange(SID_MIN, SID_MAX),
phone_number=yaml_cfg['phoneNumber'],
channel=yaml_cfg['dial'],
url=yaml_cfg['url']
)
self.sessions.append(session)
session.run()
else:
self.load_sessions()
for session in self.sessions:
session.run()
def load_sessions(self):
"""Loads the sessions for this application"""
with open(self.config) as f:
yaml_cfg = yaml.load(f)
self.session_url = yaml_cfg['url']
self.dial = yaml_cfg['dial']
if 'sessions' in yaml_cfg:
for s in yaml_cfg['sessions']:
session = AutomatedSession(
session_id=s['id'],
phone_number=s['phoneNumber'],
channel=self.dial,
url=self.session_url
)
first_step = True
for i, step in enumerate(s['steps']):
if first_step:
# session.add_step(DialStep(step.text, step.expect))
session.add_step(DialStep(step['expect']))
first_step = False
continue
session.add_step(Step(i, step['text'], step['expect']))
self.sessions.append(session)
def main():
"""Entry point for the CLI program"""
parser = ArgumentParser(prog="dialoguss")
parser.add_argument("-i", "--interactive", const='interactive', action='store_const', default=False)
parser.add_argument("-f", "--file", default="dialoguss.yaml")
args = parser.parse_args()
dialoguss_app = Dialoguss(args.file, args.interactive)
dialoguss_app.run()
if __name__ == "__main__":
main()
|
"""
Main module.
Provides a class with utility methods for fetching data from the Global Health Observatory.
"""
import xmltodict
import pandas as pd
import requests
import io
from pprint import pprint
BASE_URL = 'http://apps.who.int/gho/athena/api/'
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
class GHOSession:
def __init__(self):
resp = requests.get(BASE_URL)
self.data = xmltodict.parse(resp.text)
def get_available_datasets(self):
'''
Check for available datasets
Returns
-------
datasets : list
list of datasets.
'''
datasets = []
for k, v in self.data['GHO']['Metadata'].items():
if k == 'Dataset':
datasets.extend([d for d in v])
return datasets
def get_attributes(self):
'''
Lists attributes used on datasets
Returns
-------
attributes : list
list of attributes represented as dictionaries.
'''
attributes = []
for k, v in self.data['GHO']['Metadata'].items():
if k == 'Attribute':
attributes.extend([d for d in v])
return attributes
def get_dimensions(self, format='dataframe'):
'''
List dimensions of data
Parameters
----------
format : str, output format, of `dataframe` (default) or `list`
DESCRIPTION. The default is 'dataframe'.
Returns
-------
dimensions: dataframe or list
description of every variable in the datasets and their dimension.
'''
dimensions = []
for k, v in self.data['GHO']['Metadata'].items():
if k == 'Dimension':
dimensions.extend([d for d in v])
if format == 'dataframe':
return pd.DataFrame(dimensions)
return dimensions
def get_region_codes(self):
'''
Returns region codes
Returns
-------
regions : dictionary
Dictionary with code, description pairs.
'''
url = BASE_URL + 'REGION'
data = self._fetch_data_as_dict(url)
regions = {c['@Label']: c['Display'] for c in data['GHO']['Metadata']['Dimension']['Code']}
return regions
def get_countries(self, format='dataframe'):
'''
Returns a dataframe with country codes and metadata
Parameters
----------
format : str, optional
The default is 'dataframe'.
Returns list if `format` is full
-------
data: dataframe by default
country info.
'''
url = BASE_URL + 'COUNTRY'
data = self._fetch_data_as_dict(url)
if format == 'dataframe':
lines = []
for d in data['GHO']['Metadata']['Dimension']['Code']:
rec = {k: v for k, v in d.items() if k != 'Attr'}
if 'Attr' in d:
for attr in d['Attr']:
rec[attr['@Category']] = attr['Value']['Display']
lines.append(rec)
return pd.DataFrame(lines)
elif format == 'full':
return data['GHO']['Metadata']['Dimension']['Code']
return data
def fetch_data_from_codes(self, code=None, like='MALARIA'):
"""
Fetches data for a specific indicator code or a list of indicators matching the substring in `like`
:param code: Indicator code to fetch (for a full list of available codes use `get_data_codes` method.)
:param like: substring of the codes desired
:return: Dataframe with table.
"""
url = BASE_URL + 'GHO/'
if code is None:
codes = [c for c in self.get_data_codes(format='label') if like.lower() in c.lower()]
elif isinstance(code, (list, tuple)):
codes = list(code)
elif isinstance(code, str):
codes=[code]
url += ','.join(codes)
url += '&format=csv' if '?' in url else '?format=csv'
response = requests.get(url, headers=header)
file_object = io.StringIO(response.content.decode('utf-8'))
data = pd.read_csv(file_object)
return data
def _fetch_data_as_dict(self, url):
"""
Downloads XML data, parses it and return as dict.
:param url:
"""
resp = requests.get(url)
data = xmltodict.parse(resp.text)
return data
def get_data_codes(self, format='dataframe'):
"""
Get Codes that can be fetched as indicators.
:param format: either 'full', 'label' or 'url'
:return: list of dicts when `format` is 'full', a Dataframe when it is 'dataframe' or a list of strings otherwise.
"""
url = BASE_URL + 'GHO'
data = self._fetch_data_as_dict(url)
# codes = [d for d in data['GHO']['Metadata']['Dimension']['Code']]
if format == 'full':
return [d for d in data['GHO']['Metadata']['Dimension']['Code']]
if format == 'dataframe':
return pd.DataFrame([d for d in data['GHO']['Metadata']['Dimension']['Code']])
elif format == 'label':
return [d['@Label'] for d in data['GHO']['Metadata']['Dimension']['Code']]
elif format == 'url':
return [d['@URL'] for d in data['GHO']['Metadata']['Dimension']['Code']]
if __name__ == "__main__":
GC = GHOSession()
# pprint(GC.get_available_datasets())
# print(len(GC.get_available_datasets()))
# pprint(GC.get_attributes())
# pprint(GC.get_dimensions())
pprint(GC.get_data_codes(format='dataframe'))
# pprint(GC.get_region_codes())
# print(GC.get_countries())
# pprint(GC.fetch_data_from_codes())
|
#!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alicloud_vpc_facts
version_added: "2.4"
short_description: Gather facts on vpcs of Alibaba Cloud.
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the vpc itself.
options:
vpc_ids:
description:
- A list of vpc ids.
aliases: ["ids"]
author:
- "<NAME> (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Fetch vpc details according to setting different filters
- name: Fetch vpc details example
hosts: localhost
vars:
alicloud_access_key: <your-alicloud-access-key>
alicloud_secret_key: <your-alicloud-secret-key>
alicloud_region: cn-beijing
vpc_ids:
- xxxxxxxxxxxxx
- xxxxxxxxxxxxx
tasks:
- name: Find all vpcs in the specified region
alicloud_vpc_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
register: vpcs_by_region
- debug: var=vpcs_by_region
- name: Find all vpcs in the specified region by vpc_ids
alicloud_vpc_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
vpc_ids: '{{ vpc_ids }}'
register: vpcs_by_ids
- debug: var=vpcs_by_ids
'''
RETURN = '''
vpc_ids:
description: List all vpc's id after operating vpc.
returned: when success
type: list
sample: [ "vpc-2zegusms7jwd94lq7ix8o", "vpc-2ze5hrb3y5ksx5oa3a0xa" ]
vpcs:
description: Details about the vpcs that were created.
returned: when success
type: list
sample: [
{
"cidr_block": "172.17.0.0/16",
"description": "System created default VPC.",
"is_default": true,
"region_id": "cn-beijing",
"status": "Available",
"tags": {},
"user_cidrs": {
"user_cidr": []
},
"vpc_id": "vpc-2zegusms7jwd94lq7ix8o",
"vpc_name": "",
"vrouter_id": "vrt-2zepnt8dmohmif634a85l",
"vswitch_ids": {
"vswitch_id": [
"vsw-2zepee91iv5sl6tg85xnl",
"vsw-2zeuo4b8jx8tdg9esy8m7",
"vsw-2ze0qexkkuocpru16yh5p"
]
}
},
{
"cidr_block": "192.168.0.0/16",
"description": "",
"is_default": false,
"region_id": "cn-beijing",
"status": "Available",
"tags": {},
"user_cidrs": {
"user_cidr": []
},
"vpc_id": "vpc-2ze5hrb3y5ksx5oa3a0xa",
"vpc_name": "dmeo_vpc",
"vrouter_id": "vrt-2ze60agfbr2wcyt08jfov",
"vswitch_ids": {
"vswitch_id": [
"vsw-2zewmmqum64hvlrididef",
"vsw-2zeob1v20umn67x6i5ybx"
]
}
}
]
total:
description: The number of all vpcs after operating vpc.
returned: when success
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, vpc_connect
HAS_FOOTMARK = False
try:
from footmark.exception import VPCResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def get_info(vpc):
"""
Retrieves vpc information from an vpc
ID and returns it as a dictionary
"""
return {
'cidr_block': vpc.cidr_block,
'description': vpc.description,
'is_default': vpc.is_default,
'region_id': vpc.region_id,
'status': vpc.status,
'tags': vpc.tags,
'user_cidrs': vpc.user_cidrs,
'vpc_id': vpc.vpc_id,
'vpc_name': vpc.vpc_name,
'vrouter_id': vpc.vrouter_id,
'vswitch_ids': vpc.vswitch_ids
}
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
vpc_ids=dict(type='list', aliases=['ids'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
result = []
vpc_ids = module.params['vpc_ids']
if vpc_ids and (not isinstance(vpc_ids, list) or len(vpc_ids)) < 1:
module.fail_json(msg='vpc_ids should be a list of vpc id, aborting')
try:
vpc_conn = vpc_connect(module)
# list all vpc's by ids
if vpc_ids:
for vpc_id in vpc_ids:
vpcs = vpc_conn.get_all_vpcs(vpc_id=vpc_id)
if vpcs and len(vpcs) == 1:
result.append(get_info(vpcs[0]))
# list all vpc's in specified region
else:
vpcs = vpc_conn.get_all_vpcs()
vpc_ids = []
for vpc in vpcs:
vpc_ids.append(vpc.vpc_id)
result.append(get_info(vpc))
except Exception as e:
module.fail_json(msg=str("Unable to describe vpc, error:{0}".format(e)))
module.exit_json(changed=False, vpc_ids=vpc_ids, vpcs=result, total=len(result))
if __name__ == '__main__':
main()
|
<reponame>ryanbowen/django-datatables
"""
Column classes
"""
from django.urls import reverse
class Column(object):
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, title=None, css_class=None, value=None, link=None, link_args=None):
self.title = title
self.value = value
self.link = link
self.css_class = css_class
self.link_args = link_args or []
# Increase the creation counter, and save our local copy.
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
def render_column(self, value):
"""
Returns a rendered value for the field.
"""
return value
def render_column_using_values(self, value, values_dict):
"""
Return a rendered value which need to reference the Model's values dict
"""
return value
def get_referenced_values(self):
""" Returns a list of values that will need to be referenced """
values = []
if self.has_link(): # link will need link_args
for link_arg in self.link_args:
if link_arg[0] not in (".", "#"):
values.append(link_arg)
if type(self) == CheckBoxColumn: # CheckBoxColumn's "value" is a field
values.append(self.value)
return values
def render_link(self, value, values_dict):
"""
Returns value wrapped in link tag as specified by link
"""
def get_link_val(key):
""" Gets a link by column, fixed string (#), or class attribute (.)"""
if key.startswith("#"):
return key[1:]
elif key.startswith("."):
return getattr(self, key[1:])
return values_dict[key]
reverse_args = [get_link_val(key) for key in self.link_args]
link = reverse(self.link, args=reverse_args)
return '<a href="{link}">{val}</a>'.format(link=link, val=value)
def has_link(self):
""" Returns True if column has link property set """
return self.link is not None
class TextColumn(Column):
pass
class CheckBoxColumn(Column):
def __init__(self, name=None, *args, **kwargs):
self.name = name
self.db_independant = True
super(CheckBoxColumn, self).__init__(*args, **kwargs)
def render_column_using_values(self, value, values_dict):
return '<input id="{id}" type="checkbox" name="{name}" value="{value}"></>'.format(
id='???',
name=self.name if self.name else '',
value=values_dict[self.value],
)
class GlyphiconColumn(Column):
def __init__(self, icon, *args, **kwargs):
self.icon = icon
self.db_independant = True
super(GlyphiconColumn, self).__init__(*args, **kwargs)
def render_column(self, value):
return "<span class='glyphicon glyphicon-{}'></span>".format(self.icon)
class FontAwesome4Column(Column):
def __init__(self, icon, *args, **kwargs):
if icon.startswith('fa-'):
icon = icon[3:]
self.icon = icon
self.db_independant = True
super(FontAwesome4Column, self).__init__(*args, **kwargs)
def render_column(self, value):
return """<i class="fa fa-{}" aria-hidden="true"></i>""".format(self.icon)
class FontAwesome5Column(Column):
def __init__(self, icon, *args, **kwargs):
self.icon = icon
self.db_independant = True
super(FontAwesome5Column, self).__init__(*args, **kwargs)
def render_column(self, value):
return """<i class="{}"></i>""".format(self.icon)
class BulletedListColumn(Column):
def render_column(self, value):
items = '\n'.join(map(lambda v: f'<li>{v}</li>', value))
return f"""
<ul style="margin: 0; padding-left: 1.5em;">
{items}
</ul>
"""
class ConstantTextColumn(Column):
def __init__(self, text, *args, **kwargs):
self.text = text
self.db_independant = True
super(ConstantTextColumn, self).__init__(*args, **kwargs)
def render_column(self, value):
return self.text
class DateColumn(Column):
"""
Renders a date in Y-m-d format
"""
def render_column(self, value):
if value:
return value.strftime("%Y-%m-%d").upper()
return ''
class StringColumn(Column):
"""
Does not ask the database for a column. Used to custom render
values from other columns.
"""
def __init__(self, *args, **kwargs):
self.db_independant = True
super(StringColumn, self).__init__(*args, **kwargs)
|
import cv2
import os
import sys
import numpy as np
import scipy as sp
import pylab as pl
from datetime import datetime
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from button_classifier import train_save_test
from matplotlib import pyplot as plt
from pymongo import MongoClient
from bson.binary import Binary
from images import convert
from time import clock
from math import floor
data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/'))
match_directory = os.path.abspath(os.path.join(data_path, 'matching'))
db = MongoClient()
buttons = db['aidu']['elevator_buttons']
query_images = {}
benchmarking = False
benchmark_detector = []
def millis(dt):
"""
Converts a datetime object to milliseconds
"""
return (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
def preprocess(image):
"""
Preprocesses an image by applying a thresholding
"""
return cv2.cvtColor(
cv2.medianBlur(
cv2.adaptiveThreshold(
cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 3
), 5
)\
,cv2.COLOR_GRAY2BGR
)
def cut_to_bounding_box(image):
"""
Cuts an image to the largest bounding box that it can find
"""
img = image
contours, hierarchy = cv2.findContours(cv2.cvtColor(255 - img, cv2.COLOR_BGR2GRAY), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxArea = 0
x,y,w,h = (0, 0, 100, 100)
for cnt in contours:
if cv2.contourArea(cnt) > maxArea:
x,y,w,h = cv2.boundingRect(cnt)
maxArea = cv2.contourArea(cnt)
return img[y:y+h, x:x+w]
def drawMatches(queryImage, trainImage, k1, k2, matches):
"""
Draws the matches that were detected, for debugging purposes
"""
img1 = queryImage
img2 = trainImage
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
view[:h1, :w1, :] = img1
view[:h2, w1:, :] = img2
view[:, :, 1] = view[:, :, 0]
view[:, :, 2] = view[:, :, 0]
for m in matches:
color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
cv2.line(view, (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1])), (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1])), color)
cv2.imshow("view", view)
cv2.waitKey()
def keypoint_match(queryImage, trainImage):
img1 = queryImage # queryImage
img2 = trainImage # trainImage
# Initiate SIFT detector
sift = cv2.SIFT()
sift.compute()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
#kp3, des3 = cv2.goodFeaturesToTrack(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY), 12, 3, 1)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.95*n.distance:
good.append(m)
# Score
score = 0
for m in good:
score += 1 / (m.distance + 1)
# cv2.drawMatchesKnn expects list of lists as matches.
#drawMatches(queryImage, trainImage, kp1, kp2, good)
return score
def detect_keypoints(image, detector=''):
feature_detector = cv2.FeatureDetector_create(detector)
if benchmarking:
start = clock()
kpts = feature_detector.detect(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
if benchmarking:
benchmark_detector.append(clock() - start)
return kpts
def compute_descriptors(image, keypoints, descriptor=''):
pass
def match(image, trainImage, matcher):
pass
def get_query_images():
return {f[:-4]: preprocess(cv2.imread(os.path.join(match_directory, f))) for f in os.listdir(match_directory) if '-' not in f}
def progressor(generator):
for idx, item in enumerate(generator):
sys.stdout.write('\r[%d samples loaded]' % idx)
sys.stdout.flush()
yield item
sys.stdout.write('\n')
def test_results(y, y_res, lbl, algo = ''):
cm = confusion_matrix(y, y_res, labels=lbl)
cm = cm / cm.astype(np.float).sum(axis=1)
print classification_report(y, y_res, labels=lbl)
# Show confusion matrix in a separate window
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.savefig('/home/rolf/Desktop/Assignment 6/plots/matching/%s.pdf' % algo)
def test_technique(X, y, labels, detector_name='', descriptor_name='', matcher_name=''):
sys.stdout.write('Testing - Detector:%s - Descriptor:%s - Matcher:%s' % (detector_name, descriptor_name, matcher_name))
sys.stdout.flush()
start = clock()
y_res = []
y_real = []
Xt = []
benchmark = []
detector = cv2.FeatureDetector_create(detector_name)
descriptor = cv2.DescriptorExtractor_create(descriptor_name)
matcher = cv2.DescriptorMatcher_create(matcher_name)
k = 6
# Compute query image descriptors
query_images_descriptors = {}
query_images_keypoints = {}
for key, query_image in query_images.iteritems():
query_images_keypoints[key] = detector.detect(query_image)
query_images_descriptors[key] = descriptor.compute(query_image, query_images_keypoints[key])[1]
if query_images_descriptors[key] is not None:
query_images_descriptors[key] = query_images_descriptors[key].astype('float32')
# Compute matches for every image in dataset X
scores = {x: [] for x in query_images}
sys.stdout.write(' ' * 10)
for idx, image in enumerate(X):
sys.stdout.write('\b' * 10 + ('%d' % idx).rjust(10))
sys.stdout.flush()
keypoints = detector.detect(image)
if len(keypoints) > 0:
descriptors = descriptor.compute(image, keypoints)[1]
if descriptors is not None:
descriptors = descriptors.astype('float32')
current_k = max(1, min(k, len(descriptors)-1))
max_score = 0
label = 'NONE'
#print ''
#print y[idx]
start_time = datetime.now()
for key, query_image_descriptors in query_images_descriptors.iteritems():
if query_image_descriptors is not None:
try:
image_matches = matcher.match(query_image_descriptors, trainDescriptors = descriptors)#, k = current_k)
score = 1.0
good_matches = []
for m in image_matches:
good_matches.append((m.distance, m))
good_matches.sort(key=lambda tup: tup[0])
good_matches = [m for d, m in good_matches[:10]]
#print key, ' - ', [m.distance for m in good_matches][:4]
for m in good_matches:
score += m.distance
score = 1.0 / score
if score > max_score:
max_score = score
label = key
if key == y[idx]:
scores[key].append(score)
except Exception as e:
print e
print descriptors
print len(descriptors[0])
print len(descriptors[1])
raise Exception("Quit")
#print label, ' - ', max_score
y_res.append((label, max_score))
y_real.append(y[idx])
benchmark.append(millis(datetime.now() - start_time))
print('')
for key, key_scores in scores.iteritems():
npa = np.array(key_scores)
#print np.median(npa), ' - ', npa.mean(), ' - ', npa.std()
try:
scores[key] = np.percentile(npa, 60) #- npa.std()/10
except:
scores[key] = npa.mean()
for idx, (label, score) in enumerate(y_res):
if score > scores[key]:
y_res[idx] = label
else:
y_res[idx] = 'NONE'
# Test the found results
#clf = LogisticRegression(class_weight='auto')
#clf.fit(Xt, y_real)
#y_pred = clf.predict(Xt)
npa = np.array(benchmark)
print 'Benchmark - Mean: %.2fms - Std: %.2fms' % (npa.mean(), npa.std())
print 'Benchmark - Total: %d minutes %.2f seconds' % (int(floor((clock() - start) / 60)), (clock() - start) % 60)
test_results(y_real, y_res, labels, algo='%s_%s_%s' % (detector_name, descriptor_name, matcher_name))
def test():
"""
Runs the test set with all possible detectors, descriptors and matchers,
"""
# Get the query images
global query_images
query_images = get_query_images()
for key, image in query_images.iteritems():
query_images[key][ 0: 18, 0:100] = [255, 255, 255]
query_images[key][82:100, 0:100] = [255, 255, 255]
query_images[key][ 0:100, 0: 18] = [255, 255, 255]
query_images[key][ 0:100, 82:100] = [255, 255, 255]
# Get the data test set
X = []
y = []
labels = []
for button in progressor(buttons.find(snapshot=True)):
try:
image = preprocess(convert(button['image'], input_type='ros', output_type='cv2'))
label = button['label'].upper() if button['label'] is not None else 'NONE'
if label not in labels:
labels.append(label)
X.append(image)
y.append(label)
except:
print 'Skipping sample'
# Set up which detectors, descriptors and matchers to use for testing
detector_formats = [""]#,"Grid","Pyramid"]
detector_types = ["HARRIS", "SIFT", "SURF", "ORB", "MSER", "GFTT"]
descriptor_types = ["SIFT", "SURF", "ORB"]
matcher_types = ["BruteForce", "FlannBased"]
# Test all combinations
for detector_format in detector_formats:
for detector_type in detector_types:
for descriptor_type in descriptor_types:
for matcher_type in matcher_types:
if descriptor_type == 'ORB' and detector_type == 'SIFT':
continue
test_technique(X, y, labels, detector_name=detector_format + detector_type, descriptor_name=descriptor_type,
matcher_name=matcher_type)
def main():
test()
if __name__ == "__main__":
main()
#max_score = 0
#label = cls = 'NONE'
#x = np.zeros(len(query_images))
#for idx, (key, qimage) in enumerate(query_images.iteritems()):
# score = keypoint_match(qimage, image)
# x[idx] = score
# if score > max_score:
# max_score = score
# cls = key
#if max_score > 0.025:
# label = cls
#X.append(x)
#y.append(button['label'].upper() if button['label'] is not None else 'NONE')
#y_res.append(label.upper())
#if y[-1] not in labels:
# labels.append(y[-1]) |
import sublime
import sublime_plugin
import re
import os.path
def uniq(list):
seen = set()
return [value for value in list if value not in seen and not seen.add(value)]
def fuzzy_match(prefix, word):
query_i, word_i, next_i = 0, -1, -1
while query_i < len(prefix):
word_i = word.find(prefix[query_i], word_i + 1)
if word_i <= next_i:
return False
query_i += 1
next_i = word_i
return True
class Candidate:
def __init__(self, distance, text):
self.distance = distance
self.text = text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
return cmp(self.text, other.text)
def __str__(self):
return self.text
def __repr__(self):
return 'Candidate(text={self.text!r}, distance={self.distance!r})'.format(self=self)
class AlternativeAutocompleteCommand(sublime_plugin.TextCommand):
candidates = []
previous_completions = {}
def run(self, edit, cycle='next', tab=False):
cmd = self.cmd()
if cmd == 'tab':
if tab:
self.run_tab(edit, cycle)
elif cmd == 'autocomplete':
self.run_sel(edit, cycle)
def cmd(self):
if not self.view.sel():
return None
text = self.view.substr(sublime.Region(0, self.view.size()))
should_tab = True
should_autocomplete = True
for sel in self.view.sel():
position = sel.b
prefix_match = re.search(r'(\w+)\Z', text[:position], re.M | re.U)
if prefix_match:
should_tab = False
else:
should_autocomplete = False
if should_tab:
return 'tab'
elif should_autocomplete:
return 'autocomplete'
def run_tab(self, edit, cycle):
if cycle == 'next':
self.view.run_command('indent')
for sel in self.view.sel():
if self.view.substr(sel.b) == "\n" and (sel.b == 0 or self.view.substr(sel.b - 1) == "\n"):
self.view.insert(edit, sel.b, "\t")
else:
self.view.run_command('unindent')
def run_sel(self, edit, cycle):
if len(self.view.sel()) != len(self.previous_completions):
self.previous_completions = {}
for index, sel in enumerate(self.view.sel()):
self.view.sel().subtract(sel)
try:
previous_completion = self.previous_completions[index]
except KeyError:
previous_completion = None
self.previous_completions[index] = self.run_sel_one(sel, edit, cycle, previous_completion, index == 0)
def run_sel_one(self, sel, edit, cycle, previous_completion, is_first):
text = self.view.substr(sublime.Region(0, self.view.size()))
position = sel.b
prefix_match = re.search(r'(\w+)\Z', text[:position], re.M | re.U)
postfix_match = re.search(r'\A(\w+)', text[position:], re.M | re.U)
current_prefix = prefix_match.group(1)
current_search = current_prefix
replace_start = prefix_match.start(1)
replace_end = prefix_match.end(1)
if postfix_match and current_prefix != previous_completion:
replace_end += postfix_match.end(1)
current_search += postfix_match.group(1)
if is_first and previous_completion is None or (current_search != previous_completion and current_prefix != previous_completion):
previous_completion = None
self.candidates = self.find_candidates(current_search, position, text)
if not self.candidates:
self.candidates = self.find_candidates(current_prefix, position, text)
replace_end = prefix_match.end(1)
if current_search in self.candidates:
self.candidates.remove(current_search)
if self.candidates:
if previous_completion is None:
completion = self.candidates[0]
else:
if cycle == 'previous':
direction = -1
else:
direction = 1
completion = self.candidates[(self.candidates.index(previous_completion) + direction) % len(self.candidates)]
self.view.replace(edit, sublime.Region(replace_start, replace_end), completion)
previous_completion = completion
else:
completion = current_search
cursor = replace_start + len(completion)
self.view.sel().add(sublime.Region(cursor, cursor))
return previous_completion
@staticmethod
def get_distance(candidate):
return candidate.distance
def find_candidates(self, prefix, position, text):
default_candidates = self.populate_candidates(prefix)
candidates = []
if default_candidates:
default_candidates.sort(key=self.get_distance)
if len(default_candidates) > 100:
default_candidates = default_candidates[0:99]
word_regex = re.compile(r'\b' + re.escape(prefix[0:1]) + r'\w+', re.M | re.U | re.I)
for match in word_regex.finditer(text):
if match.start() < position < match.end():
continue
elif match.end() < position:
location = match.end()
else:
location = match.start()
distance = abs(position - location)
word = match.group()
if word != prefix and fuzzy_match(prefix, word):
candidates.append(Candidate(distance, word))
for default_candidate in default_candidates:
if not any(default_candidate.text == candidate.text for candidate in candidates):
candidates.append(default_candidate)
candidates.sort(key=self.get_distance)
candidates = [candidate.text for candidate in candidates]
if candidates:
candidates.append(prefix)
return uniq(candidates)
def populate_candidates(self, prefix):
settings_name, _ = os.path.splitext(os.path.basename(self.view.settings().get('syntax')))
default_settings = sublime.load_settings("alternative_autocompletion.sublime-settings")
default_candidates = default_settings.get(settings_name, [])
user_settings = sublime.load_settings(settings_name + ".sublime-settings")
user_candidates = user_settings.get('autocomplete', [])
merge = user_settings.get('merge', {}).get(settings_name)
if not merge:
merge = default_settings.get('merge', {}).get(settings_name)
if merge:
for merge_settings_name in merge:
default_candidates += default_settings.get(settings_name, [])
merge_settings = sublime.load_settings(merge_settings_name + ".sublime-settings")
user_candidates += merge_settings.get('autocomplete', [])
# some languages, like "HTML 5", map to another language, like "PHP"
# so if default_candidates is a str/unicode, look for that list
while isinstance(default_candidates, str):
settings_name = default_candidates
default_candidates = default_settings.get(settings_name)
if not user_candidates:
user_settings = sublime.load_settings(settings_name + ".sublime-settings")
user_candidates = user_settings.get('autocomplete')
if default_candidates:
candidates = [Candidate(self.view.size(), word) for word in default_candidates if fuzzy_match(prefix, word)]
else:
candidates = []
# now merge user settings
if user_candidates:
candidates.extend([Candidate(self.view.size(), word) for word in user_candidates if fuzzy_match(prefix, word)])
return candidates
|
<reponame>matthew-brett/transforms3d<gh_stars>100-1000
''' Functions for working with zooms (scales)
Terms used in function names:
* *mat* : array shape (3, 3) (3D non-homogenous coordinates)
* *aff* : affine array shape (4, 4) (3D homogenous coordinates)
* *zfdir* : zooms encoded by factor scalar and direction vector
'''
import numpy as np
from .utils import normalized_vector
def zfdir2mat(factor, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor == -1 for point symmetry.
Parameters
----------
factor : scalar
factor to zoom by (see `direction`)
direction : None or array-like shape (3,), optional
If None, simply apply uniform scaling by `factor`. Otherwise,
apply scaling along direction given by vector `direction`. We
convert direction to a :term:`unit vector` before application.
Returns
-------
mat : array shape (3,3)
3x3 transformation matrix implementing zooms
Examples
--------
>>> v = (np.random.rand(3, 5) - 0.5) * 20.0
>>> S = zfdir2mat(-1.234)
>>> np.allclose(np.dot(S, v), -1.234*v)
True
>>> factor = np.random.random() * 10 - 5
>>> direct = np.random.random(3) - 0.5
>>> S = zfdir2mat(factor, direct)
"""
if direction is None:
# uniform scaling
return np.diag([factor] * 3)
# nonuniform scaling
direction = normalized_vector(direction)
factor = 1.0 - factor
M = np.eye(3)
M -= factor * np.outer(direction, direction)
return M
def zfdir2aff(factor, direction=None, origin=None):
"""Return affine to scale by `factor` around `origin` in `direction`.
Use factor -1 for point symmetry.
Parameters
----------
factor : scalar
factor to zoom by (see direction)
direction : None or array-like shape (3,)
If None, simply apply uniform scaling by `factor`. Otherwise,
apply scaling along direction given by vector `direction`. We
convert direction to a :term:`unit vector` before application.
origin : None or array-like shape (3,)
point at which to apply implied zooms
Returns
-------
aff : array shape (4,4)
4x4 transformation matrix implementing zooms
Examples
--------
>>> v = (np.random.rand(3, 5) - 0.5) * 20.0
>>> S = zfdir2aff(-1.234)[:3,:3]
>>> np.allclose(np.dot(S, v), -1.234*v)
True
>>> factor = np.random.random() * 10 - 5
>>> direct = np.random.random(3) - 0.5
>>> origin = np.random.random(3) - 0.5
>>> S = zfdir2aff(factor, None, origin)
>>> S = zfdir2aff(factor, direct, origin)
"""
M = np.eye(4)
M[:3,:3] = zfdir2mat(factor, direction)
if origin is None:
return M
if direction is None:
M[:3, 3] = origin
M[:3, 3] *= 1.0 - factor
return M
# nonuniform scaling
direction = normalized_vector(direction)
M[:3, 3] = ((1-factor) * np.dot(origin, direction)) * direction
return M
def mat2zfdir(mat):
"""Return scaling factor and direction from zoom (scaling) matrix
Parameters
----------
mat : array-like shape (3,3)
3x3 zoom matrix
Returns
-------
factor : scalar
zoom (scale) factor as for ``zfdir2mat``
direction : None or array, shape (3,)
direction of zoom as for ``zfdir2mat``. None if scaling is
uniform.
Examples
--------
Roundtrip may not generate same factor, direction, but the
generated transformation matrices will be the same
>>> factor = np.random.random() * 10 - 5
>>> S0 = zfdir2mat(factor, None)
>>> f2, d2 = mat2zfdir(S0)
>>> S1 = zfdir2mat(f2, d2)
>>> np.allclose(S0, S1)
True
>>> direct = np.random.random(3) - 0.5
>>> S0 = zfdir2mat(factor, direct)
>>> f2, d2 = mat2zfdir(S0)
>>> S1 = zfdir2mat(f2, d2)
>>> np.allclose(S0, S1)
True
"""
mat = np.asarray(mat, dtype=np.float64)
factor = np.trace(mat) - 2.0
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = np.linalg.eig(mat)
near_factors, = np.nonzero(abs(np.real(l.squeeze()) - factor) < 1e-8)
if near_factors.size == 0:
# uniform scaling
factor = (factor + 2.0) / 3.0
return factor, None
direction = np.real(V[:, near_factors[0]])
return factor, normalized_vector(direction)
def aff2zfdir(aff):
"""Return scaling factor, direction and origin from scaling matrix.
Parameters
----------
aff : array-like shape (4,4)
4x4 :term:`affine transformation` matrix.
Returns
-------
factor : scalar
zoom (scale) factor as for ``zfdir2mat``
direction : None or array, shape (3,)
direction of zoom as for ``zfdir2mat``. None if scaling is
uniform.
origin : array, shape (3,)
origin of zooms
Examples
--------
>>> factor = np.random.random() * 10 - 5
>>> direct = np.random.random(3) - 0.5
>>> origin = np.random.random(3) - 0.5
>>> S0 = zfdir2aff(factor)
>>> f2, d2, o2 = aff2zfdir(S0)
>>> np.allclose(S0, zfdir2aff(f2, d2, o2))
True
>>> S0 = zfdir2aff(factor, direct)
>>> f2, d2, o2 = aff2zfdir(S0)
>>> np.allclose(S0, zfdir2aff(f2, d2, o2))
True
>>> S0 = zfdir2aff(factor, direct, origin)
"""
M = np.asarray(aff, dtype=np.float64)
factor, direction = mat2zfdir(M[:3,:3])
# origin: any eigenvector corresponding to eigenvalue 1
l, V = np.linalg.eig(M)
near_1, = np.nonzero(abs(np.real(l.squeeze()) - 1.0) < 1e-8)
if near_1.size == 0:
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = np.real(V[:, near_1[-1]]).squeeze()
origin = origin[:3] / origin[3]
return factor, direction, origin
|
from __future__ import annotations
import dataclasses
from typing import Dict, Tuple
import numpy as np
from coffee.client import BulletClient
from coffee.structs import JointInfo, JointType
@dataclasses.dataclass(frozen=True)
class Joints:
"""A convenience class for accessing the joint information of a PyBullet body.
These are parsed from the URDF. The extracted information is useful for things like
inverse kinematics, which can take advantage of rest poses and joint limits to
refine its solution.
Attributes:
body_id: The unique ID of the body.
joints_info: A tuple of `JointInfo` objects, one for each joint.
controllable_joints: A tuple of indices designating the controllable joints of
the body.
non_controllable_joints: A tuple of indices designating the non-controllable
joints of the body.
"""
body_id: int
joints_info: Tuple[JointInfo, ...]
controllable_joints: Tuple[int, ...]
non_controllable_joints: Tuple[int, ...]
def __repr__(self) -> str:
return f"{self.__class__.__name__}(bodyid={self.body_id}, dof={self.dof})"
# Factory methods.
@staticmethod
def from_body_id(body_id: int, pb_client: BulletClient) -> Joints:
controllable_joints = []
non_controllable_joints = []
joints_info = []
for i in range(pb_client.getNumJoints(body_id)):
joint_info = JointInfo(*pb_client.getJointInfo(body_id, i))
if joint_info.joint_type != JointType.FIXED.value:
controllable_joints.append(joint_info.joint_index)
else:
non_controllable_joints.append(joint_info.joint_index)
joints_info.append(joint_info)
return Joints(
body_id=body_id,
joints_info=tuple(joints_info),
controllable_joints=tuple(controllable_joints),
non_controllable_joints=tuple(non_controllable_joints),
)
# Accessors.
def get_joint_index_from_joint_name(self, joint_name: str) -> int:
for i, joint_info in enumerate(self.joints_info):
if joint_info.joint_name == joint_name:
return i
raise ValueError(f"Joint {joint_name} not found.")
def get_joint_name_from_joint_index(self, joint_index: int) -> str:
return self.joints_info[joint_index].joint_name
def get_joint_index_from_link_name(self, link_name: str) -> int:
for i, joint_info in enumerate(self.joints_info):
if joint_info.link_name == link_name:
return i
raise ValueError(f"Link {link_name} not found.")
def get_link_name_from_joint_index(self, joint_index: int) -> str:
return self.joints_info[joint_index].link_name
def contains_link(self, link_name: str) -> bool:
"""Returns True if the given link name is present in the URDF."""
for joint_info in self.joints_info:
if joint_info.link_name == link_name:
return True
return False
def contains_joint(self, joint_name: str) -> bool:
"""Returns True if the given joint name is present in the URDF."""
for joint_info in self.joints_info:
if joint_info.joint_name == joint_name:
return True
return False
@property
def link_names(self) -> Tuple[str, ...]:
"""Returns a tuple of link names."""
return tuple(joint_info.link_name for joint_info in self.joints_info)
@property
def name2index(self) -> Dict[str, int]:
"""A dictionary mapping joint names to joint indices."""
return {
joint_info.joint_name: i for i, joint_info in enumerate(self.joints_info)
}
@property
def index2name(self) -> Dict[int, str]:
"""A dictionary mapping joint indices to joint names."""
return {
i: joint_info.joint_name for i, joint_info in enumerate(self.joints_info)
}
@property
def dof(self) -> int:
return len(self.controllable_joints)
@property
def joints_lower_limit(self) -> np.ndarray:
lower = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
if joint_info.q_index > -1:
lower.append(joint_info.joint_lower_limit)
else:
lower.append(0.0)
return np.array(lower, dtype=np.float64)
@property
def joints_upper_limit(self) -> np.ndarray:
upper = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
if joint_info.q_index > -1:
upper.append(joint_info.joint_upper_limit)
else:
upper.append(2.0 * np.pi)
return np.array(upper, dtype=np.float64)
@property
def joints_range(self) -> np.ndarray:
# Shape: (dof, 2).
return np.vstack([self.joints_lower_limit, self.joints_upper_limit]).T
@property
def joints_max_force(self) -> np.ndarray:
max_force = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
max_force.append(joint_info.joint_max_force)
return np.array(max_force, dtype=np.float32)
@property
def joints_max_velocity(self) -> np.ndarray:
max_velocity = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
max_velocity.append(joint_info.joint_max_velocity)
return np.array(max_velocity, dtype=np.float64)
# Array creation.
def zeros_array(self) -> np.ndarray:
return np.zeros(self.dof, dtype=np.float64)
def ones_array(self) -> np.ndarray:
return np.ones(self.dof, dtype=np.float64)
def const_array(self, value: float) -> np.ndarray:
return np.full(self.dof, value, dtype=np.float64)
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.alitrip_1_0 import models as dingtalkalitrip__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def approve_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders()
return self.approve_city_car_apply_with_options(request, headers, runtime)
async def approve_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders()
return await self.approve_city_car_apply_with_options_async(request, headers, runtime)
def approve_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.operate_time):
body['operateTime'] = request.operate_time
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse(),
self.do_roarequest('ApproveCityCarApply', 'alitrip_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def approve_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.operate_time):
body['operateTime'] = request.operate_time
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse(),
await self.do_roarequest_async('ApproveCityCarApply', 'alitrip_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
def bill_settement_hotel(
self,
request: dingtalkalitrip__1__0_models.BillSettementHotelRequest,
) -> dingtalkalitrip__1__0_models.BillSettementHotelResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementHotelHeaders()
return self.bill_settement_hotel_with_options(request, headers, runtime)
async def bill_settement_hotel_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementHotelRequest,
) -> dingtalkalitrip__1__0_models.BillSettementHotelResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementHotelHeaders()
return await self.bill_settement_hotel_with_options_async(request, headers, runtime)
def bill_settement_hotel_with_options(
self,
request: dingtalkalitrip__1__0_models.BillSettementHotelRequest,
headers: dingtalkalitrip__1__0_models.BillSettementHotelHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementHotelResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementHotelResponse(),
self.do_roarequest('BillSettementHotel', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/hotels', 'json', req, runtime)
)
async def bill_settement_hotel_with_options_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementHotelRequest,
headers: dingtalkalitrip__1__0_models.BillSettementHotelHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementHotelResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementHotelResponse(),
await self.do_roarequest_async('BillSettementHotel', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/hotels', 'json', req, runtime)
)
def get_flight_exceed_apply(
self,
request: dingtalkalitrip__1__0_models.GetFlightExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetFlightExceedApplyHeaders()
return self.get_flight_exceed_apply_with_options(request, headers, runtime)
async def get_flight_exceed_apply_async(
self,
request: dingtalkalitrip__1__0_models.GetFlightExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetFlightExceedApplyHeaders()
return await self.get_flight_exceed_apply_with_options_async(request, headers, runtime)
def get_flight_exceed_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.GetFlightExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetFlightExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse(),
self.do_roarequest('GetFlightExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getFlight', 'json', req, runtime)
)
async def get_flight_exceed_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.GetFlightExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetFlightExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetFlightExceedApplyResponse(),
await self.do_roarequest_async('GetFlightExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getFlight', 'json', req, runtime)
)
def bill_settement_car(
self,
request: dingtalkalitrip__1__0_models.BillSettementCarRequest,
) -> dingtalkalitrip__1__0_models.BillSettementCarResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementCarHeaders()
return self.bill_settement_car_with_options(request, headers, runtime)
async def bill_settement_car_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementCarRequest,
) -> dingtalkalitrip__1__0_models.BillSettementCarResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementCarHeaders()
return await self.bill_settement_car_with_options_async(request, headers, runtime)
def bill_settement_car_with_options(
self,
request: dingtalkalitrip__1__0_models.BillSettementCarRequest,
headers: dingtalkalitrip__1__0_models.BillSettementCarHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementCarResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementCarResponse(),
self.do_roarequest('BillSettementCar', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/cars', 'json', req, runtime)
)
async def bill_settement_car_with_options_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementCarRequest,
headers: dingtalkalitrip__1__0_models.BillSettementCarHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementCarResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementCarResponse(),
await self.do_roarequest_async('BillSettementCar', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/cars', 'json', req, runtime)
)
def bill_settement_btrip_train(
self,
request: dingtalkalitrip__1__0_models.BillSettementBtripTrainRequest,
) -> dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementBtripTrainHeaders()
return self.bill_settement_btrip_train_with_options(request, headers, runtime)
async def bill_settement_btrip_train_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementBtripTrainRequest,
) -> dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementBtripTrainHeaders()
return await self.bill_settement_btrip_train_with_options_async(request, headers, runtime)
def bill_settement_btrip_train_with_options(
self,
request: dingtalkalitrip__1__0_models.BillSettementBtripTrainRequest,
headers: dingtalkalitrip__1__0_models.BillSettementBtripTrainHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse(),
self.do_roarequest('BillSettementBtripTrain', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/btripTrains', 'json', req, runtime)
)
async def bill_settement_btrip_train_with_options_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementBtripTrainRequest,
headers: dingtalkalitrip__1__0_models.BillSettementBtripTrainHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementBtripTrainResponse(),
await self.do_roarequest_async('BillSettementBtripTrain', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/btripTrains', 'json', req, runtime)
)
def sync_exceed_apply(
self,
request: dingtalkalitrip__1__0_models.SyncExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.SyncExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.SyncExceedApplyHeaders()
return self.sync_exceed_apply_with_options(request, headers, runtime)
async def sync_exceed_apply_async(
self,
request: dingtalkalitrip__1__0_models.SyncExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.SyncExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.SyncExceedApplyHeaders()
return await self.sync_exceed_apply_with_options_async(request, headers, runtime)
def sync_exceed_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.SyncExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.SyncExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.SyncExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.remark):
query['remark'] = request.remark
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.thirdparty_flow_id):
query['thirdpartyFlowId'] = request.thirdparty_flow_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
query['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.SyncExceedApplyResponse(),
self.do_roarequest('SyncExceedApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/exceedapply/sync', 'json', req, runtime)
)
async def sync_exceed_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.SyncExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.SyncExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.SyncExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.remark):
query['remark'] = request.remark
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.thirdparty_flow_id):
query['thirdpartyFlowId'] = request.thirdparty_flow_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
query['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.SyncExceedApplyResponse(),
await self.do_roarequest_async('SyncExceedApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/exceedapply/sync', 'json', req, runtime)
)
def add_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.AddCityCarApplyHeaders()
return self.add_city_car_apply_with_options(request, headers, runtime)
async def add_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.AddCityCarApplyHeaders()
return await self.add_city_car_apply_with_options_async(request, headers, runtime)
def add_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.AddCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.cause):
body['cause'] = request.cause
if not UtilClient.is_unset(request.city):
body['city'] = request.city
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.date):
body['date'] = request.date
if not UtilClient.is_unset(request.project_code):
body['projectCode'] = request.project_code
if not UtilClient.is_unset(request.project_name):
body['projectName'] = request.project_name
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.third_part_cost_center_id):
body['thirdPartCostCenterId'] = request.third_part_cost_center_id
if not UtilClient.is_unset(request.third_part_invoice_id):
body['thirdPartInvoiceId'] = request.third_part_invoice_id
if not UtilClient.is_unset(request.times_total):
body['timesTotal'] = request.times_total
if not UtilClient.is_unset(request.times_type):
body['timesType'] = request.times_type
if not UtilClient.is_unset(request.times_used):
body['timesUsed'] = request.times_used
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.finished_date):
body['finishedDate'] = request.finished_date
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.AddCityCarApplyResponse(),
self.do_roarequest('AddCityCarApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def add_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.AddCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.cause):
body['cause'] = request.cause
if not UtilClient.is_unset(request.city):
body['city'] = request.city
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.date):
body['date'] = request.date
if not UtilClient.is_unset(request.project_code):
body['projectCode'] = request.project_code
if not UtilClient.is_unset(request.project_name):
body['projectName'] = request.project_name
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.third_part_cost_center_id):
body['thirdPartCostCenterId'] = request.third_part_cost_center_id
if not UtilClient.is_unset(request.third_part_invoice_id):
body['thirdPartInvoiceId'] = request.third_part_invoice_id
if not UtilClient.is_unset(request.times_total):
body['timesTotal'] = request.times_total
if not UtilClient.is_unset(request.times_type):
body['timesType'] = request.times_type
if not UtilClient.is_unset(request.times_used):
body['timesUsed'] = request.times_used
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.finished_date):
body['finishedDate'] = request.finished_date
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.AddCityCarApplyResponse(),
await self.do_roarequest_async('AddCityCarApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
def bill_settement_flight(
self,
request: dingtalkalitrip__1__0_models.BillSettementFlightRequest,
) -> dingtalkalitrip__1__0_models.BillSettementFlightResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementFlightHeaders()
return self.bill_settement_flight_with_options(request, headers, runtime)
async def bill_settement_flight_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementFlightRequest,
) -> dingtalkalitrip__1__0_models.BillSettementFlightResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.BillSettementFlightHeaders()
return await self.bill_settement_flight_with_options_async(request, headers, runtime)
def bill_settement_flight_with_options(
self,
request: dingtalkalitrip__1__0_models.BillSettementFlightRequest,
headers: dingtalkalitrip__1__0_models.BillSettementFlightHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementFlightResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementFlightResponse(),
self.do_roarequest('BillSettementFlight', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/flights', 'json', req, runtime)
)
async def bill_settement_flight_with_options_async(
self,
request: dingtalkalitrip__1__0_models.BillSettementFlightRequest,
headers: dingtalkalitrip__1__0_models.BillSettementFlightHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.BillSettementFlightResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.category):
query['category'] = request.category
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.period_start):
query['periodStart'] = request.period_start
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.period_end):
query['periodEnd'] = request.period_end
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.BillSettementFlightResponse(),
await self.do_roarequest_async('BillSettementFlight', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/billSettlements/flights', 'json', req, runtime)
)
def get_hotel_exceed_apply(
self,
request: dingtalkalitrip__1__0_models.GetHotelExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetHotelExceedApplyHeaders()
return self.get_hotel_exceed_apply_with_options(request, headers, runtime)
async def get_hotel_exceed_apply_async(
self,
request: dingtalkalitrip__1__0_models.GetHotelExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetHotelExceedApplyHeaders()
return await self.get_hotel_exceed_apply_with_options_async(request, headers, runtime)
def get_hotel_exceed_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.GetHotelExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetHotelExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse(),
self.do_roarequest('GetHotelExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getHotel', 'json', req, runtime)
)
async def get_hotel_exceed_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.GetHotelExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetHotelExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetHotelExceedApplyResponse(),
await self.do_roarequest_async('GetHotelExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getHotel', 'json', req, runtime)
)
def query_union_order(
self,
request: dingtalkalitrip__1__0_models.QueryUnionOrderRequest,
) -> dingtalkalitrip__1__0_models.QueryUnionOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryUnionOrderHeaders()
return self.query_union_order_with_options(request, headers, runtime)
async def query_union_order_async(
self,
request: dingtalkalitrip__1__0_models.QueryUnionOrderRequest,
) -> dingtalkalitrip__1__0_models.QueryUnionOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryUnionOrderHeaders()
return await self.query_union_order_with_options_async(request, headers, runtime)
def query_union_order_with_options(
self,
request: dingtalkalitrip__1__0_models.QueryUnionOrderRequest,
headers: dingtalkalitrip__1__0_models.QueryUnionOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryUnionOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.union_no):
query['unionNo'] = request.union_no
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryUnionOrderResponse(),
self.do_roarequest('QueryUnionOrder', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/unionOrders', 'json', req, runtime)
)
async def query_union_order_with_options_async(
self,
request: dingtalkalitrip__1__0_models.QueryUnionOrderRequest,
headers: dingtalkalitrip__1__0_models.QueryUnionOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryUnionOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.union_no):
query['unionNo'] = request.union_no
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryUnionOrderResponse(),
await self.do_roarequest_async('QueryUnionOrder', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/unionOrders', 'json', req, runtime)
)
def query_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders()
return self.query_city_car_apply_with_options(request, headers, runtime)
async def query_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders()
return await self.query_city_car_apply_with_options_async(request, headers, runtime)
def query_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.created_end_at):
query['createdEndAt'] = request.created_end_at
if not UtilClient.is_unset(request.created_start_at):
query['createdStartAt'] = request.created_start_at
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryCityCarApplyResponse(),
self.do_roarequest('QueryCityCarApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def query_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.created_end_at):
query['createdEndAt'] = request.created_end_at
if not UtilClient.is_unset(request.created_start_at):
query['createdStartAt'] = request.created_start_at
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryCityCarApplyResponse(),
await self.do_roarequest_async('QueryCityCarApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
def get_train_exceed_apply(
self,
request: dingtalkalitrip__1__0_models.GetTrainExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetTrainExceedApplyHeaders()
return self.get_train_exceed_apply_with_options(request, headers, runtime)
async def get_train_exceed_apply_async(
self,
request: dingtalkalitrip__1__0_models.GetTrainExceedApplyRequest,
) -> dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.GetTrainExceedApplyHeaders()
return await self.get_train_exceed_apply_with_options_async(request, headers, runtime)
def get_train_exceed_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.GetTrainExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetTrainExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse(),
self.do_roarequest('GetTrainExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getTrain', 'json', req, runtime)
)
async def get_train_exceed_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.GetTrainExceedApplyRequest,
headers: dingtalkalitrip__1__0_models.GetTrainExceedApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.apply_id):
query['applyId'] = request.apply_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.GetTrainExceedApplyResponse(),
await self.do_roarequest_async('GetTrainExceedApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/exceedapply/getTrain', 'json', req, runtime)
)
|
<gh_stars>10-100
import unittest
import numpy as np
import time
import argparse
from utils.ur_msg import create_ur_msg
from utils.test_utils import do_dashboard_command, wait_for_new_message, wait_for_dc_mode
from packages.pyalice import Application, Message, Composite
class ToolIoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ip = args.robotip
robot = args.robot
cls.app = Application(name="tool_io_test")
if robot == "e-series":
cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json", prefix="ur")
elif robot == "cb3":
cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_cb3_robot.subgraph.json", prefix="ur")
else: # default to eseries
cls.app.load("packages/universal_robots/ur_robot_driver/apps/ur_eseries_robot.subgraph.json", prefix="ur")
ur_driver = cls.app.nodes["ur.universal_robots"]["UniversalRobots"]
ur_driver.config.robot_ip = ip
ur_driver.config.headless_mode = True
cls.app.start()
io_names = ["tool_digital_out_0", "tool_digital_out_1"]
cls.io_parser = [[x, "none", 1] for x in io_names]
@classmethod
def tearDownClass(cls):
cls.app.stop()
def test_set_tool_io(self):
"""Test setting tool IO values, and check wheter the values has been set."""
maximum_messages = 5
io_values = np.array([0, 1], dtype=np.float64)
io_target_msg = Composite.create_composite_message(self.io_parser, io_values)
self.app.publish("ur.subgraph", "interface", "io_command", io_target_msg)
messages = 0
io_state = None
while messages < maximum_messages:
io_state_msg = wait_for_new_message(self.app, "ur.subgraph", "interface", "io_state")
if io_state_msg is not None:
io_state = Composite.parse_composite_message(io_state_msg, self.io_parser)
if((io_state==io_values).all()):
break
messages += 1
if messages >= maximum_messages:
self.fail("Could not read desired state after {} messages.".format(maximum_messages))
self.assertEqual(io_state[0], io_values[0])
self.assertEqual(io_state[1], io_values[1])
io_values = np.array([1, 0], dtype=np.float64)
io_target_msg = Composite.create_composite_message(self.io_parser, io_values)
self.app.publish("ur.subgraph", "interface", "io_command", io_target_msg)
messages = 0
while messages < maximum_messages:
io_state_msg = wait_for_new_message(self.app, "ur.subgraph", "interface", "io_state")
if io_state_msg is not None:
io_state = Composite.parse_composite_message(io_state_msg, self.io_parser)
if((io_state==io_values).all()):
break
messages +=1
if messages >= maximum_messages:
self.fail("Could not read desired state after {} messages.".format(maximum_messages))
self.assertEqual(io_state[0], io_values[0])
self.assertEqual(io_state[1], io_values[1])
if __name__ == "__main__":
# parse the arguments with --test_arg=--robot="cb3" --test_arg=robotip="127.0.0.1"
parser = argparse.ArgumentParser()
parser.add_argument("--robot", help="robot generation to test against", choices=["e-series", "cb3"], default="e-series")
parser.add_argument("--robotip", help="ip address of the robot", default="127.0.0.1")
args = parser.parse_args()
unittest.main(argv=["--robot, --robotip"]) |
#!/usr/bin/python
import os
import sys
import csv
import mysql.connector
import argparse
#Argparse ActionClass check extension
def CheckExt(choices):
class Act(argparse.Action):
def __call__(self,parser,namespace,fname,option_string=None):
ext = os.path.splitext(fname)[1][1:]
if ext not in choices:
option_string = '({})'.format(option_string) if option_string else ''
if type(choices) is str:
parser.error("file doesn't end with {}{}".format(choices,option_string))
elif len(choices) == 1:
parser.error("file doesn't end with {}{}".format(list(choices)[0],option_string))
else:
parser.error("file doesn't end with one of {}{}".format(choices,option_string))
else:
setattr(namespace,self.dest,fname)
return Act
#Setup argparse
parser = argparse.ArgumentParser(description='Converts csv to sql queries for The Art of Hiking', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('csvFile', help='CSV filename', type=str, action=CheckExt('csv'))
parser.add_argument('-tn', '--tname', help='Alternate trail name, default is based on CSV filename', metavar='', type=str)
group = parser.add_mutually_exclusive_group()
group.add_argument('-f', action='store_true', default=False, help='Write SQL queries to a file')
group.add_argument('-p', action='store_true', default=False, help='Print SQL queries stdout')
args = parser.parse_args()
cnx = None
cursor = None
#Setup output
if args.p:
pass
elif args.f:
sys.stdout = open(args.csvFile.replace(".csv", ".sql"), "w")
args.p = True
else:
cnx = mysql.connector.connect(user='ghanas', password='<PASSWORD>', database='ghanas', host='dbs.eecs.utk.edu')
cursor = cnx.cursor()
#Read in the csv
with open(args.csvFile, 'rb') as csvfile:
spamreader = csv.reader(csvfile)
a = [{k: float(v) for k, v in row.items()}
for row in csv.DictReader(csvfile, skipinitialspace=True)]
#Create Trail Name from args
tName = ""
if args.tname is None:
tName = args.csvFile[:-4].replace("_", " ")
else:
tName = args.tname
#Get the trailId
tId = 0
query = ("SELECT trailId FROM Trail WHERE trailName = %s")
if args.p:
print("SET @tId = (" + query % ("'" + tName + "'") + "); -- If NULL, trail not found")
#Needs error checking for sql side
else:
cursor.execute(query, (tName,))
tIdQResponse = cursor.fetchone()
if tIdQResponse is None:
sys.exit('Trail not found: {}'.format(tName))
tId = tIdQResponse[0]
tpId = 0
for i, r in enumerate(a):
#Always insert the TrailPoint
query = ("INSERT INTO TrailPoint (longitudePoint, latitudePoint, elevationPoint, trailId, previousPoint) VALUES(%s, %s, %s, %s, %s)")
if args.p:
print(query % (r['lng'], r['lat'], r['ele'], '@tId', '@tpId' if i != 0 else 'NULL') + ";")
else:
cursor.execute(query, (r['lng'], r['lat'], r['ele'], tId, tpId if i != 0 else None))
if i == 0:
#Get first trailId in series
query = ("SELECT trailPointId FROM TrailPoint ORDER BY trailId DESC LIMIT 1")
if args.p:
print("SET @tpId = (" + query + ");")
else:
cursor.execute(query)
tpId = cursor.fetchone()[0]
else:
#Increment trailId
if args.p:
print("SET @tpId = @tpId + 1;")
else:
tpId += 1
if i == 0 or i == (len(a) - 1):
#Insert the TrailPoint into TrailHead
query = ("INSERT INTO TrailHead (parkingLotId, trailPointId) VALUES(NULL, %s)")
if args.p:
print(query % ('@tpId') + ";")
else:
cursor.execute(query, (tpId,))
if args.f:
sys.stdout.close()
else:
cursor.close()
cnx.close()
|
import numpy as np
from layers import *
class RNN(object):
def __init__(self, vocab_dim, idx_to_char, input_dim=30, hidden_dim=25, cell_type='lstm'):
"""Takes as arguments
vocab_dim: The number of unique characters/words in the dataset
idx_to_char: A dictionary converting integer representations of vocabulary to string form.
Mainly used in the sample function in order to neatly output results
input_dim: Reduces one-hot encoding dimension of character from size vocab_dim to a vector of size input_dim
hidden_dim: Size of hidden dimension
cell_type: must choose one of 'lstm' or 'vanilla'
Automatically intiializes all weights"""
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.vocab_dim = vocab_dim
self.cell_type = cell_type
self.idx_to_char = idx_to_char
if cell_type != 'lstm' and cell_type != 'vanilla':
raise ValueError('Invalid cell type. Please choose lstm or vanilla')
self.dim_mul = (1 if cell_type == 'vanilla' else 4)
# self.idx_to_char = idx_to_char
self._initialize_params()
def _initialize_params(self):
"""Initialize all weights. We use He normalization for weights and
zeros for biases. We also intialize zeroth hidden layer h0"""
D, H, V = self.input_dim, self.hidden_dim, self.vocab_dim
self.params = {}
self.params['b'] = np.zeros(self.dim_mul*H)
self.params['Wx'] = 2*np.random.randn(D,self.dim_mul*H)/np.sqrt(D)
self.params['Wh'] = 2*np.random.randn(H,self.dim_mul*H)/np.sqrt(H)
self.params['b_out'] = np.zeros(V)
self.params['W_out'] = 2*np.random.randn(H,V)/np.sqrt(H)
self.params['W_embed'] = 2*np.random.randn(V,D)/np.sqrt(V)
self.h0 = np.random.randn(1,H)
def loss(self, inputs, targets):
"""inputs: an array of size (N,T,D), for N the minibatch size, T the sequence length, and D the input_dim
targets: an array of size (N,T) consisting of integers in [0,vocab_dim). Each value is the target characters
given the (N,T)^th input.
Outputs:
Loss -> the loss function taken over all N and T
grads -> a dictionary containing the gradients of all parameters in self.parameters
"""
loss, grads = 0, {}
# VERY IMPORTANT. We must name the items in grads identical to their names in self.params!
# Unpack params
b = self.params['b']
b_out = self.params['b_out']
Wx = self.params['Wx']
Wh = self.params['Wh']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# x is a sequence of integers of length T, with integers in [0,V).
# we can always change this input later if we choose
# We use an embedding matrix W_embed: (N,T) -> (N,T,D) that generalizes the one-hot-encoding
# i.e. one-hot would be directly x: (N,T) -> (N,T,V) for V size of vocabulary
# Forward pass
inputs = (np.expand_dims(inputs, axis=0) if len(inputs.shape)==1 else inputs)
x, cache_embed = embed_forward(inputs, W_embed)
h_prev = np.broadcast_to(self.h0,(len(inputs),self.h0.shape[1]))
h, cache_h = (lstm_all_forward(x, Wx, b, h_prev, Wh) if self.cell_type=='lstm' else vanilla_all_forward(x, Wx, b, h_prev, Wh))
probs, cache_probs = affine_all_forward(h, W_out, b_out)
loss, dprobs = softmax_loss_all(probs, targets)
# Backward pass
dh, grads['W_out'], grads['b_out'] = affine_all_backward(dprobs, cache_probs)
dx, grads['Wx'], grads['b'], grads['Wh'] = (lstm_all_backward(dh, cache_h) if self.cell_type=='lstm' else vanilla_all_backward(dh, cache_h))
grads['W_embed'] = embed_backward(dx, cache_embed)
# reset memory layer to last in batch, last in sequence
self.h0 = h[-1,-1,:].reshape(1,-1)
# return loss and gradient
return loss, grads
def sample(self, seed_idx=None, T=200, h0=None, p_power=1):
"""Inputs: seed_idx=None -> the starting character index for the generated sequences
T=200 -> the default length of sequence to output
h0=self.h0 -> the current memory, i.e. initial hidden state. Defaults to last computed h0
p_power=1 -> raises probability distribution of next character by power p_power.
higher p_power produces more deterministic, higher prob words.
Will result in short repeating sequences, but with well-defined words"""
if h0 is None:
h0 = self.h0
if seed_idx is None:
seed_idx = np.random.choice(self.vocab_dim)
#initialize word
idxs = [seed_idx]
# unpack weights
b = self.params['b']
b_out = self.params['b_out']
Wx = self.params['Wx']
Wh = self.params['Wh']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# Forward pass only
x, _ = embed_forward(seed_idx, W_embed)
x = np.expand_dims(x, axis=0)
c = np.zeros_like(h0)
for t in range(T):
if self.cell_type == 'lstm':
c, h0, _ = lstm_forward(x, Wx, b, h0, Wh, c)
else:
h0, _ = vanilla_forward(x, Wx, b, h0, Wh)
probs, _ = affine_forward(h0, W_out, b_out)
probs = np.squeeze(probs)
# predict next entry
probs = np.exp(probs-np.max(probs))
probs = probs**p_power
probs /= np.sum(probs)
idx = np.random.choice(np.arange(len(probs)),p=probs.ravel())
idxs.append(idx)
x, _ = embed_forward(idx, W_embed)
x = np.expand_dims(x, axis=0)
# return index list
return ''.join([self.idx_to_char[i] for i in idxs])
#########################################
import numpy as np
from layers import *
class TwoHiddenLayerRNN(object):
def __init__(self, vocab_dim, idx_to_char, input_dim=30, hidden_dim=25, H2=25, cell_type='lstm'):
"""Takes as arguments
vocab_dim: The number of unique characters/words in the dataset
idx_to_char: A dictionary converting integer representations of vocabulary to string form.
Mainly used in the sample function in order to neatly output results
input_dim: Reduces one-hot encoding dimension of character from size vocab_dim to a vector of size input_dim
hidden_dim: Size of hidden dimension
cell_type: must choose one of 'lstm' or 'vanilla'
Automatically intiializes all weights"""
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.H2 = H2
self.vocab_dim = vocab_dim
self.cell_type = cell_type
self.idx_to_char = idx_to_char
if cell_type != 'lstm' and cell_type != 'vanilla':
raise ValueError('Invalid cell type. Please choose lstm or vanilla')
self.dim_mul = (1 if cell_type == 'vanilla' else 4)
# self.idx_to_char = idx_to_char
self._initialize_params()
def _initialize_params(self):
"""Initialize all weights. We use He normalization for weights and
zeros for biases. We also intialize zeroth hidden layer h0"""
D, H, V, H2 = self.input_dim, self.hidden_dim, self.vocab_dim, self.H2
self.params = {}
self.params['b'] = np.zeros(self.dim_mul*H)
self.params['Wx'] = 2*np.random.randn(D,self.dim_mul*H)/np.sqrt(D)
self.params['Wh'] = 2*np.random.randn(H,self.dim_mul*H)/np.sqrt(H)
self.params['Wh2'] = 2*np.random.randn(H2,self.dim_mul*H2)/np.sqrt(H2)
self.params['b2'] = np.zeros(self.dim_mul*H2)
self.params['W2'] = 2*np.random.randn(H,self.dim_mul*H2)/np.sqrt(H)
self.params['b_out'] = np.zeros(V)
self.params['W_out'] = 2*np.random.randn(H2,V)/np.sqrt(H2)
self.params['W_embed'] = 2*np.random.randn(V,D)/np.sqrt(V)
self.h0 = np.random.randn(1,H)
self.h1 = np.random.randn(1,H2)
def loss(self, inputs, targets):
"""inputs: an array of size (N,T,D), for N the minibatch size, T the sequence length, and D the input_dim
targets: an array of size (N,T) consisting of integers in [0,vocab_dim). Each value is the target characters
given the (N,T)^th input.
Outputs:
Loss -> the loss function taken over all N and T
grads -> a dictionary containing the gradients of all parameters in self.parameters
"""
loss, grads = 0, {}
# VERY IMPORTANT. We must name the items in grads identical to their names in self.params!
# Unpack params
b = self.params['b']
b2 = self.params['b2']
b_out = self.params['b_out']
Wx = self.params['Wx']
Wh = self.params['Wh']
Wh2 = self.params['Wh2']
W2 = self.params['W2']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# x is a sequence of integers of length T, with integers in [0,V).
# we can always change this input later if we choose
# We use an embedding matrix W_embed: (N,T) -> (N,T,D) that generalizes the one-hot-encoding
# i.e. one-hot would be directly x: (N,T) -> (N,T,V) for V size of vocabulary
# Forward pass
inputs = (np.expand_dims(inputs, axis=0) if len(inputs.shape)==1 else inputs)
x, cache_embed = embed_forward(inputs, W_embed)
h_prev = np.broadcast_to(self.h0,(len(inputs),self.h0.shape[1]))
h, cache_h = (lstm_all_forward(x, Wx, b, h_prev, Wh) if self.cell_type=='lstm' else vanilla_all_forward(x, Wx, b, h_prev, Wh))
h_prev = np.broadcast_to(self.h1,(len(inputs),self.h1.shape[1]))
h2, cache_h2 = (lstm_all_forward(h, W2, b2, h_prev, Wh2) if self.cell_type=='lstm' else vanilla_all_forward(h, W2, b2, h_prev, Wh2))
probs, cache_probs = affine_all_forward(h2, W_out, b_out)
loss, dprobs = softmax_loss_all(probs, targets)
# Backward pass
dh2, grads['W_out'], grads['b_out'] = affine_all_backward(dprobs, cache_probs)
dh, grads['W2'], grads['b2'], grads['Wh2'] = (lstm_all_backward(dh2, cache_h2) if self.cell_type=='lstm' else vanilla_all_backward(dh2, cache_h2))
dx, grads['Wx'], grads['b'], grads['Wh'] = (lstm_all_backward(dh, cache_h) if self.cell_type=='lstm' else vanilla_all_backward(dh, cache_h))
grads['W_embed'] = embed_backward(dx, cache_embed)
# reset memory layer to last in batch, last in sequence
self.h0 = h[-1,-1,:].reshape(1,-1)
self.h1 = h2[-1,-1,:].reshape(1,-1)
# return loss and gradient
return loss, grads
def sample(self, seed_idx=None, T=200, h0=None, h1=None, p_power=1):
"""Inputs: seed_idx=None -> the starting character index for the generated sequences
T=200 -> the default length of sequence to output
h0=self.h0 -> the current memory, i.e. initial hidden state. Defaults to last computed h0
p_power=1 -> raises probability distribution of next character by power p_power.
higher p_power produces more deterministic, higher prob words.
Will result in short repeating sequences, but with well-defined words"""
if h0 is None:
h0 = self.h0
if h1 is None:
h1 = self.h1
if seed_idx is None:
seed_idx = np.random.choice(self.vocab_dim)
#initialize word
idxs = [seed_idx]
# unpack weights
b = self.params['b']
b_out = self.params['b_out']
b2 = self.params['b2']
Wx = self.params['Wx']
Wh = self.params['Wh']
Wh2 = self.params['Wh2']
W2 = self.params['W2']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# Forward pass only
x, _ = embed_forward(seed_idx, W_embed)
x = np.expand_dims(x, axis=0)
c = np.zeros_like(h0)
c2 = np.zeros_like(h1)
for t in range(T):
if self.cell_type == 'lstm':
c, h0, _ = lstm_forward(x, Wx, b, h0, Wh, c)
else:
h0, _ = vanilla_forward(x, Wx, b, h0, Wh)
if self.cell_type == 'lstm':
c2, h1, _ = lstm_forward(h0, W2, b2, h1, Wh2, c2)
else:
h1, _ = vanilla_forward(h0, W2, b2, h1, Wh2)
probs, _ = affine_forward(h1, W_out, b_out)
probs = np.squeeze(probs)
# predict next entry
probs = np.exp(probs-np.max(probs))
probs = probs**p_power
probs /= np.sum(probs)
idx = np.random.choice(np.arange(len(probs)),p=probs.ravel())
idxs.append(idx)
x, _ = embed_forward(idx, W_embed)
x = np.expand_dims(x, axis=0)
# return index list
return ''.join([self.idx_to_char[i] for i in idxs])
|
<gh_stars>10-100
"""
desisim.scripts.pixsim_nights
=============================
This is a module.
"""
from __future__ import absolute_import, division, print_function
import os,sys
import os.path
import shutil
import random
from time import asctime
import numpy as np
import desimodel.io
from desiutil.log import get_logger
import desispec.io
from desispec.parallel import stdouterr_redirected
from . import pixsim
from ..pixsim import simulate_exposure
from ..pixsim import get_nodes_per_exp
from ..pixsim import mpi_count_nodes
from ..pixsim import mpi_split_by_node
from ..io import SimSpec
from .. import obs, io
import argparse
import desispec.io as specio
from .. import io as simio
log = get_logger()
def parse(options=None):
parser = argparse.ArgumentParser(
description = 'Generate pixel-level simulated DESI data for one or more nights',
)
parser.add_argument("--nights", type=str, default=None, required=False, help="YEARMMDD,YEARMMDD,YEARMMDD")
parser.add_argument("--verbose", action="store_true", help="Include debug log info")
parser.add_argument("--overwrite", action="store_true", help="Overwrite existing raw and simpix files")
parser.add_argument("--cosmics", default=None, action="store_true", required=False, help="Add simulated cosmics")
# parser.add_argument("--seed", type=int, default=123456, required=False, help="random number seed")
# parser.add_argument("--nspec", type=int, help="Number of spectra to simulate per camera")
parser.add_argument("--nexp", type=int, help="Number of exposures to process")
# parser.add_argument("--wavemin", type=float, help="Minimum wavelength to simulate")
# parser.add_argument("--wavemax", type=float, help="Maximum wavelength to simulate")
parser.add_argument("--cameras", type=str, default=None, help="cameras, e.g. b0,r5,z9")
parser.add_argument("--nodes_per_exp", type=int, default=None, help="nodes per exposure")
args = None
if options is None:
args = parser.parse_args()
else:
options = [str(x) for x in options]
args = parser.parse_args(options)
return args
def main(args, comm=None):
if args.verbose:
import logging
log.setLevel(logging.DEBUG)
#we do this so we can use operator.itemgetter
import operator
#we do this so our print statements can have timestamps
import time
rank = 0
nproc = 1
if comm is not None:
import mpi4py
rank = comm.rank
nproc = comm.size
if rank == 0:
log.info('Starting pixsim at {}'.format(asctime()))
#no preflight check here, too complicated.
#we'll assume the user knows what he or she is doing...
# Determine which nights we are using
nights = None
if args.nights is not None:
nights = args.nights.split(",")
else:
rawdir = os.path.abspath(specio.rawdata_root())
nights = []
nightpat = re.compile(r"\d{8}")
for root, dirs, files in os.walk(rawdir, topdown=True):
for d in dirs:
nightmat = nightpat.match(d)
if nightmat is not None:
nights.append(d)
# Get the list of exposures for each night
night_expid = {}
all_expid = []
exp_to_night = {}
for nt in nights:
night_expid[nt] = specio.get_exposures(nt, raw=True)
#get a list of tuples of (night,expid) that we can evenly divide between communicators
night_exposure_list=list()
if comm is None or comm.rank == 0:
for nt in nights:
for exp in night_expid[nt]:
rawfile = desispec.io.findfile('raw', nt, exp)
if not os.path.exists(rawfile):
night_exposure_list.append([nt,exp])
elif args.overwrite:
log.warning('Overwriting pre-existing {}'.format(os.path.basename(rawfile)))
os.remove(rawfile)
night_exposure_list.append([nt,exp])
else:
log.info('Skipping pre-existing {}'.format(os.path.basename(rawfile)))
if args.nexp is not None:
night_exposure_list = night_exposure_list[0:args.nexp]
if comm is not None:
night_exposure_list = comm.bcast(night_exposure_list, root=0)
if len(night_exposure_list) == 0:
if comm is None or comm.rank == 0:
log.error('No exposures to process')
sys.exit(1)
# Get the list of cameras and make sure it's in the right format
cams = []
if args.cameras is not None:
entry = args.cameras.split(',')
for i in entry:
cams.append(i)
else:
#do this with band first so we can avoid re-broadcasting cosmics
for band in ['b', 'r', 'z']:
for spec in range(10):
cams.append('{}{}'.format(band, spec))
#are we using cosmics?
if args.cosmics is not None:
addcosmics = True
else:
addcosmics = False
ncameras=len(cams)
nexposures=len(night_exposure_list)
#call ultity function to figure out how many nodes we have
nnodes=mpi_count_nodes(comm)
#call utility functions to divide our workload
if args.nodes_per_exp is not None:
user_specified_nodes=args.nodes_per_exp
else:
user_specified_nodes=None
nodes_per_comm_exp=get_nodes_per_exp(nnodes,nexposures,ncameras,user_specified_nodes)
#also figure out how many exposure communicators we have
num_exp_comm = nnodes // nodes_per_comm_exp
#split the communicator into exposure communicators
comm_exp, node_index_exp, num_nodes_exp = mpi_split_by_node(comm, nodes_per_comm_exp)
#further splitting will happen automatically in simulate_exposure
#based on this, figure out which simspecfiles and rawfiles are assigned to each communicator
#find all specfiles
#find all rawfiles
rawfile_list=[]
simspecfile_list=[]
night_list=[]
expid_list=[]
for i in range(len(night_exposure_list)):
night_list.append(night_exposure_list[i][0])
expid_list.append(night_exposure_list[i][1])
rawfile_list.append(desispec.io.findfile('raw', night_list[i], expid_list[i]))
simspecfile_list.append(io.findfile('simspec', night_list[i], expid_list[i]))
#now divy the rawfiles and specfiles between node communicators
#there is onerawfile and one specfile for each exposure
rawfile_comm_exp=[]
simspecfile_comm_exp=[]
for i in range(num_exp_comm):
if node_index_exp == i: #assign rawfile, simspec file to one communicator at a time
rawfile_comm_exp=rawfile_list[i::num_exp_comm]
simspecfile_comm_exp=simspecfile_list[i::num_exp_comm]
night_comm_exp=night_list[i::num_exp_comm]
expid_comm_exp=expid_list[i::num_exp_comm]
comm.Barrier()
#now wrap pixsim.simulate_exposure for each exposure (in desisim.pixsim)
if comm_exp.rank == 0:
log.info("Starting simulate_exposure for night {} expid {}".format(night_comm_exp, expid_comm_exp))
for i in range(len(rawfile_comm_exp)):
simulate_exposure(simspecfile_comm_exp[i], rawfile_comm_exp[i], cameras=cams,
ccdshape=None, simpixfile=None, addcosmics=addcosmics, comm=comm_exp)
comm.Barrier()
if rank == 0:
log.info('Finished pixsim nights {}'.format(args.nights, asctime()))
|
<filename>digit_caps.py
#
# Dynamic Routing Between Capsules
# https://arxiv.org/pdf/1710.09829.pdf
#
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import math
from squash import squash
class DigitCaps(nn.Module):
def __init__(self, routing_iters, gpu, primary_capsules):
super(DigitCaps, self).__init__()
self.routing_iters = routing_iters
self.gpu = gpu
self.in_capsules = primary_capsules
self.in_capsule_size = 8
self.out_capsules = 10
self.out_capsule_size = 16
self.W = nn.Parameter(
torch.Tensor(
self.in_capsules,
self.out_capsules,
self.out_capsule_size,
self.in_capsule_size
)
)
# W: [in_capsules, out_capsules, out_capsule_size, in_capsule_size] = [1152, 10, 16, 8]
self.reset_parameters()
def reset_parameters(self):
""" Reset W.
"""
stdv = 1. / math.sqrt(self.in_capsules)
self.W.data.uniform_(-stdv, stdv)
# FIXME, write in an easier way to understand, some tensors have some redundant dimensions.
def forward(self, x):
# x: [batch_size, in_capsules=1152, in_capsule_size=8]
batch_size = x.size(0)
x = torch.stack([x] * self.out_capsules, dim=2)
# x: [batch_size, in_capsules=1152, out_capsules=10, in_capsule_size=8]
W = torch.cat([self.W.unsqueeze(0)] * batch_size, dim=0)
# W: [batch_size, in_capsules=1152, out_capsules=10, out_capsule_size=16, in_capsule_size=8]
# Transform inputs by weight matrix `W`.
u_hat = torch.matmul(W, x.unsqueeze(4)) # matrix multiplication
# u_hat: [batch_size, in_capsules=1152, out_capsules=10, out_capsule_size=16, 1]
u_hat_detached = u_hat.detach()
# u_hat_detached: [batch_size, in_capsules=1152, out_capsules=10, out_capsule_size=16, 1]
# In forward pass, `u_hat_detached` = `u_hat`, and
# in backward, no gradient can flow from `u_hat_detached` back to `u_hat`.
# Initialize routing logits to zero.
b_ij = Variable(torch.zeros(self.in_capsules, self.out_capsules, 1))
if self.gpu >= 0:
b_ij = b_ij.cuda(self.gpu)
# b_ij: [in_capsules=1152, out_capsules=10, 1]
# Iterative routing.
for iteration in range(self.routing_iters):
# Convert routing logits to softmax.
c_ij = F.softmax(b_ij.unsqueeze(0), dim=2)
c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(4)
# c_ij: [batch_size, in_capsules=1152, out_capsules=10, 1, 1]
if iteration == self.routing_iters - 1:
# Apply routing `c_ij` to weighted inputs `u_hat`.
s_j = (c_ij * u_hat).sum(dim=1, keepdim=True) # element-wise product
# s_j: [batch_size, 1, out_capsules=10, out_capsule_size=16, 1]
v_j = squash(s_j, dim=3)
# v_j: [batch_size, 1, out_capsules=10, out_capsule_size=16, 1]
else:
# Apply routing `c_ij` to weighted inputs `u_hat`.
s_j = (c_ij * u_hat_detached).sum(dim=1, keepdim=True) # element-wise product
# s_j: [batch_size, 1, out_capsules=10, out_capsule_size=16, 1]
v_j = squash(s_j, dim=3)
# v_j: [batch_size, 1, out_capsules=10, out_capsule_size=16, 1]
# Compute inner products of 2 16D-vectors, `u_hat` and `v_j`.
u_vj1 = torch.matmul(u_hat_detached.transpose(3, 4), v_j).squeeze(4).mean(dim=0, keepdim=False)
# u_vj1: [in_capsules=1152, out_capsules=10, 1]
# Update b_ij (routing).
b_ij = b_ij + u_vj1
return v_j.squeeze(4).squeeze(1) # [batch_size, out_capsules=10, out_capsule_size=16]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Union, Tuple, Optional, Any, List, Dict, cast
from torchrec.distributed.planner.types import (
ShardingOption,
Stats,
Topology,
ParameterConstraints,
Storage,
)
from torchrec.distributed.planner.utils import bytes_to_gb
from torchrec.distributed.types import ShardingType, ParameterSharding, ShardingPlan
logger: logging.Logger = logging.getLogger(__name__)
STATS_DIVIDER = "####################################################################################################"
STATS_BAR = f"#{'------------------------------------------------------------------------------------------------': ^98}#"
class EmbeddingStats(Stats):
"""
Stats for a sharding planner execution.
"""
def log(
self,
sharding_plan: ShardingPlan,
topology: Topology,
num_proposals: int,
num_plans: int,
best_plan: List[ShardingOption],
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
"""
Log stats for a given sharding plan to stdout.
Provide a tabular view of stats for the given sharding plan with per device
storage usage (HBM and DDR), perf, input (pooling factors), output (embedding
dimension), and number and type of shards.
Args:
sharding_plan (ShardingPlan): sharding plan chosen by the ShardingPlanner.
topology (Topology): device topology.
num_proposals (int): number of proposals evaluated
num_plans (int): number of proposals successfully partitioned
best_plan (List[ShardingOption]): plan with expected performance
constraints (Optional[Dict[str, ParameterConstraints]]): dict of parameter
names to provided ParameterConstraints.
"""
shard_by_fqn = {
module_name + "." + param_name: value
for module_name, param_dict in sharding_plan.plan.items()
for param_name, value in param_dict.items()
}
stats: Dict[int, Dict[str, Any]] = {
rank: {"type": {}, "pooling_factor": 0.0, "embedding_dims": 0}
for rank in range(topology.world_size)
}
used_sharding_types = set()
for sharding_option in best_plan:
fqn = sharding_option.fqn
if shard_by_fqn.get(fqn) is None:
continue
shard: ParameterSharding = shard_by_fqn[fqn]
ranks, pooling_factor, emb_dims = self._get_shard_stats(
shard=shard,
sharding_option=sharding_option,
world_size=topology.world_size,
local_size=topology.local_world_size,
constraints=constraints,
)
sharding_type_abbr = _get_sharding_type_abbr(shard.sharding_type)
used_sharding_types.add(sharding_type_abbr)
for i, rank in enumerate(ranks):
count = stats[rank]["type"].get(sharding_type_abbr, 0)
stats[rank]["type"][sharding_type_abbr] = count + 1
stats[rank]["pooling_factor"] += pooling_factor[i]
stats[rank]["embedding_dims"] += emb_dims[i]
used_hbm = [0] * topology.world_size
used_ddr = [0] * topology.world_size
perf = [0.0] * topology.world_size
for sharding_option in best_plan:
for shard in sharding_option.shards:
storage = cast(Storage, shard.storage)
rank = cast(int, shard.rank)
used_hbm[rank] += storage.hbm
used_ddr[rank] += storage.ddr
perf[rank] += cast(float, shard.perf)
table: List[List[Union[str, int]]] = [
["Rank", "HBM (GB)", "DDR (GB)", "Perf", "Input", "Output", "Shards"],
[
"------",
"----------",
"----------",
"------",
"-------",
"--------",
"--------",
],
]
for rank, device in enumerate(topology.devices):
used_hbm_gb = bytes_to_gb(used_hbm[rank])
used_hbm_ratio = (
used_hbm[rank] / device.storage.hbm
if topology.compute_device == "cuda"
else 0
)
used_ddr_gb = bytes_to_gb(used_ddr[rank])
used_ddr_ratio = used_ddr[rank] / device.storage.ddr
for sharding_type in used_sharding_types:
if sharding_type not in stats[rank]["type"]:
stats[rank]["type"][sharding_type] = 0
rank_hbm = f"{used_hbm_gb:.1f} ({used_hbm_ratio:.0%})"
rank_ddr = f"{used_ddr_gb:.1f} ({used_ddr_ratio:.0%})"
rank_perf = f"{perf[rank] / 1000:,.0f}"
rank_pooling = f"{int(stats[rank]['pooling_factor']):,}"
rank_dims = f"{stats[rank]['embedding_dims']:,}"
rank_shards = " ".join(
f"{sharding_type}: {num_tables}"
for sharding_type, num_tables in sorted(stats[rank]["type"].items())
)
table.append(
[
rank,
rank_hbm,
rank_ddr,
rank_perf,
rank_pooling,
rank_dims,
rank_shards,
]
)
logger.info(STATS_DIVIDER)
header_text = "--- Planner Statistics ---"
logger.info(f"#{header_text: ^98}#")
iter_text = (
f"--- Evalulated {num_proposals} proposal(s), "
f"found {num_plans} possible plan(s) ---"
)
logger.info(f"#{iter_text: ^98}#")
logger.info(STATS_BAR)
formatted_table = _format_table(table)
for row in formatted_table:
logger.info(f"# {row: <97}#")
logger.info(f"#{'' : ^98}#")
legend = "Input: pooling factor, Output: embedding dimension, Shards: number of tables"
logger.info(f"# {legend: <97}#")
logger.info(STATS_DIVIDER)
def _get_shard_stats(
self,
shard: ParameterSharding,
sharding_option: ShardingOption,
world_size: int,
local_size: int,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> Tuple[List[int], List[float], List[int]]:
"""
Gets ranks, pooling factors, and embedding dimensions per shard.
Returns:
ranks: list of ranks.
pooling_factor: list of pooling factors across ranks.
emb_dims: list of embedding dimensions across ranks.
"""
ranks = list(range(world_size))
pooling_factor = [
sum(constraints[sharding_option.name].pooling_factors)
if constraints and constraints.get(sharding_option.name)
else 0.0
]
emb_dims = [sharding_option.tensor.shape[1]]
if shard.sharding_type == ShardingType.DATA_PARALLEL.value:
emb_dims = emb_dims * len(ranks)
pooling_factor = pooling_factor * len(ranks)
elif shard.sharding_type == ShardingType.TABLE_WISE.value:
assert shard.ranks
ranks = shard.ranks
elif shard.sharding_type == ShardingType.COLUMN_WISE.value:
assert shard.ranks
ranks = shard.ranks
emb_dims = [
int(shard.shard_sizes[1])
# pyre-ignore [16]
for shard in shard.sharding_spec.shards
]
pooling_factor = pooling_factor * len(ranks)
elif shard.sharding_type == ShardingType.ROW_WISE.value:
pooling_factor = [pooling_factor[0] / world_size] * len(ranks)
emb_dims = emb_dims * len(ranks)
elif shard.sharding_type == ShardingType.TABLE_ROW_WISE.value:
assert shard.ranks
host_id = shard.ranks[0] // local_size
ranks = list(range(host_id * local_size, (host_id + 1) * local_size))
pooling_factor = [pooling_factor[0] / local_size] * len(ranks)
emb_dims = emb_dims * len(ranks)
return ranks, pooling_factor, emb_dims
def _get_sharding_type_abbr(sharding_type: str) -> str:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return "DP"
elif sharding_type == ShardingType.TABLE_WISE.value:
return "TW"
elif sharding_type == ShardingType.COLUMN_WISE.value:
return "CW"
elif sharding_type == ShardingType.ROW_WISE.value:
return "RW"
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return "TWRW"
else:
raise ValueError(f"Unrecognized sharding type provided: {sharding_type}")
def _format_table(table: List[List[Union[str, int]]]) -> List[str]:
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3) for i in range(len(table[0]))
]
row_format = "".join(
["{:>" + str(longest_col) + "}" for longest_col in longest_cols]
)
return [row_format.format(*row) for row in table]
|
<reponame>skeuomorf/cryptography
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import datetime
import os
import pytest
from cryptography import x509
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends.interfaces import (
DSABackend, EllipticCurveBackend, RSABackend, X509Backend
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from .hazmat.primitives.test_ec import _skip_curve_unsupported
from .utils import load_vectors_from_file
def _load_cert(filename, loader, backend):
cert = load_vectors_from_file(
filename=filename,
loader=lambda pemfile: loader(pemfile.read(), backend),
mode="rb"
)
return cert
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestRSACertificate(object):
def test_load_pem_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "post2000utctime.pem"),
x509.load_pem_x509_certificate,
backend
)
assert isinstance(cert, x509.Certificate)
assert cert.serial == 11559813051657483483
fingerprint = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
assert fingerprint == b"2b619ed04bfc9c3b08eb677d272192286a0947a8"
assert isinstance(cert.signature_hash_algorithm, hashes.SHA1)
def test_load_der_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
assert isinstance(cert, x509.Certificate)
assert cert.serial == 2
fingerprint = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
assert fingerprint == b"6f49779533d565e8b7c1062503eab41492c38e4d"
assert isinstance(cert.signature_hash_algorithm, hashes.SHA256)
def test_issuer(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"Validpre2000UTCnotBeforeDateTest3EE.crt"
),
x509.load_der_x509_certificate,
backend
)
issuer = cert.issuer
assert isinstance(issuer, x509.Name)
assert list(issuer) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(
x509.OID_ORGANIZATION_NAME, 'Test Certificates 2011'
),
x509.NameAttribute(x509.OID_COMMON_NAME, 'Good CA')
]
assert issuer.get_attributes_for_oid(x509.OID_COMMON_NAME) == [
x509.NameAttribute(x509.OID_COMMON_NAME, 'Good CA')
]
def test_all_issuer_name_types(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom",
"all_supported_names.pem"
),
x509.load_pem_x509_certificate,
backend
)
issuer = cert.issuer
assert isinstance(issuer, x509.Name)
assert list(issuer) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'CA'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Illinois'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Chicago'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'Zero, LLC'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'One, LLC'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'common name 0'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'common name 1'),
x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, 'OU 0'),
x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, 'OU 1'),
x509.NameAttribute(x509.OID_DN_QUALIFIER, 'dnQualifier0'),
x509.NameAttribute(x509.OID_DN_QUALIFIER, 'dnQualifier1'),
x509.NameAttribute(x509.OID_SERIAL_NUMBER, '123'),
x509.NameAttribute(x509.OID_SERIAL_NUMBER, '456'),
x509.NameAttribute(x509.OID_TITLE, 'Title 0'),
x509.NameAttribute(x509.OID_TITLE, 'Title 1'),
x509.NameAttribute(x509.OID_SURNAME, 'Surname 0'),
x509.NameAttribute(x509.OID_SURNAME, 'Surname 1'),
x509.NameAttribute(x509.OID_GIVEN_NAME, 'Given Name 0'),
x509.NameAttribute(x509.OID_GIVEN_NAME, 'Given Name 1'),
x509.NameAttribute(x509.OID_PSEUDONYM, 'Incognito 0'),
x509.NameAttribute(x509.OID_PSEUDONYM, 'Incognito 1'),
x509.NameAttribute(x509.OID_GENERATION_QUALIFIER, 'Last Gen'),
x509.NameAttribute(x509.OID_GENERATION_QUALIFIER, 'Next Gen'),
x509.NameAttribute(x509.OID_DOMAIN_COMPONENT, 'dc0'),
x509.NameAttribute(x509.OID_DOMAIN_COMPONENT, 'dc1'),
x509.NameAttribute(x509.OID_EMAIL_ADDRESS, '<EMAIL>'),
x509.NameAttribute(x509.OID_EMAIL_ADDRESS, '<EMAIL>'),
]
def test_subject(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"Validpre2000UTCnotBeforeDateTest3EE.crt"
),
x509.load_der_x509_certificate,
backend
)
subject = cert.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(
x509.OID_ORGANIZATION_NAME, 'Test Certificates 2011'
),
x509.NameAttribute(
x509.OID_COMMON_NAME,
'Valid pre2000 UTC notBefore Date EE Certificate Test3'
)
]
assert subject.get_attributes_for_oid(x509.OID_COMMON_NAME) == [
x509.NameAttribute(
x509.OID_COMMON_NAME,
'Valid pre2000 UTC notBefore Date EE Certificate Test3'
)
]
def test_unicode_name(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom",
"utf8_common_name.pem"
),
x509.load_pem_x509_certificate,
backend
)
assert cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME) == [
x509.NameAttribute(
x509.OID_COMMON_NAME,
u'We heart UTF8!\u2122'
)
]
assert cert.issuer.get_attributes_for_oid(x509.OID_COMMON_NAME) == [
x509.NameAttribute(
x509.OID_COMMON_NAME,
u'We heart UTF8!\u2122'
)
]
def test_all_subject_name_types(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom",
"all_supported_names.pem"
),
x509.load_pem_x509_certificate,
backend
)
subject = cert.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'AU'),
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'DE'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'California'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'New York'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'San Francisco'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Ithaca'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'Org Zero, LLC'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'Org One, LLC'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'CN 0'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'CN 1'),
x509.NameAttribute(
x509.OID_ORGANIZATIONAL_UNIT_NAME, 'Engineering 0'
),
x509.NameAttribute(
x509.OID_ORGANIZATIONAL_UNIT_NAME, 'Engineering 1'
),
x509.NameAttribute(x509.OID_DN_QUALIFIER, 'qualified0'),
x509.NameAttribute(x509.OID_DN_QUALIFIER, 'qualified1'),
x509.NameAttribute(x509.OID_SERIAL_NUMBER, '789'),
x509.NameAttribute(x509.OID_SERIAL_NUMBER, '012'),
x509.NameAttribute(x509.OID_TITLE, 'Title IX'),
x509.NameAttribute(x509.OID_TITLE, 'Title X'),
x509.NameAttribute(x509.OID_SURNAME, 'Last 0'),
x509.NameAttribute(x509.OID_SURNAME, 'Last 1'),
x509.NameAttribute(x509.OID_GIVEN_NAME, 'First 0'),
x509.NameAttribute(x509.OID_GIVEN_NAME, 'First 1'),
x509.NameAttribute(x509.OID_PSEUDONYM, 'Guy Incognito 0'),
x509.NameAttribute(x509.OID_PSEUDONYM, 'Guy Incognito 1'),
x509.NameAttribute(x509.OID_GENERATION_QUALIFIER, '32X'),
x509.NameAttribute(x509.OID_GENERATION_QUALIFIER, 'Dreamcast'),
x509.NameAttribute(x509.OID_DOMAIN_COMPONENT, 'dc2'),
x509.NameAttribute(x509.OID_DOMAIN_COMPONENT, 'dc3'),
x509.NameAttribute(x509.OID_EMAIL_ADDRESS, '<EMAIL>'),
x509.NameAttribute(x509.OID_EMAIL_ADDRESS, '<EMAIL>'),
]
def test_load_good_ca_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
assert cert.not_valid_before == datetime.datetime(2010, 1, 1, 8, 30)
assert cert.not_valid_after == datetime.datetime(2030, 12, 31, 8, 30)
assert cert.serial == 2
public_key = cert.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
assert cert.version is x509.Version.v3
fingerprint = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
assert fingerprint == b"6f49779533d565e8b7c1062503eab41492c38e4d"
def test_utc_pre_2000_not_before_cert(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"Validpre2000UTCnotBeforeDateTest3EE.crt"
),
x509.load_der_x509_certificate,
backend
)
assert cert.not_valid_before == datetime.datetime(1950, 1, 1, 12, 1)
def test_pre_2000_utc_not_after_cert(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"Invalidpre2000UTCEEnotAfterDateTest7EE.crt"
),
x509.load_der_x509_certificate,
backend
)
assert cert.not_valid_after == datetime.datetime(1999, 1, 1, 12, 1)
def test_post_2000_utc_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "post2000utctime.pem"),
x509.load_pem_x509_certificate,
backend
)
assert cert.not_valid_before == datetime.datetime(
2014, 11, 26, 21, 41, 20
)
assert cert.not_valid_after == datetime.datetime(
2014, 12, 26, 21, 41, 20
)
def test_generalized_time_not_before_cert(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"ValidGeneralizedTimenotBeforeDateTest4EE.crt"
),
x509.load_der_x509_certificate,
backend
)
assert cert.not_valid_before == datetime.datetime(2002, 1, 1, 12, 1)
assert cert.not_valid_after == datetime.datetime(2030, 12, 31, 8, 30)
assert cert.version is x509.Version.v3
def test_generalized_time_not_after_cert(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"ValidGeneralizedTimenotAfterDateTest8EE.crt"
),
x509.load_der_x509_certificate,
backend
)
assert cert.not_valid_before == datetime.datetime(2010, 1, 1, 8, 30)
assert cert.not_valid_after == datetime.datetime(2050, 1, 1, 12, 1)
assert cert.version is x509.Version.v3
def test_invalid_version_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "invalid_version.pem"),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(x509.InvalidVersion) as exc:
cert.version
assert exc.value.parsed_version == 7
def test_eq(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "post2000utctime.pem"),
x509.load_pem_x509_certificate,
backend
)
cert2 = _load_cert(
os.path.join("x509", "custom", "post2000utctime.pem"),
x509.load_pem_x509_certificate,
backend
)
assert cert == cert2
def test_ne(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "post2000utctime.pem"),
x509.load_pem_x509_certificate,
backend
)
cert2 = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs",
"ValidGeneralizedTimenotAfterDateTest8EE.crt"
),
x509.load_der_x509_certificate,
backend
)
assert cert != cert2
assert cert != object()
def test_version_1_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "v1_cert.pem"),
x509.load_pem_x509_certificate,
backend
)
assert cert.version is x509.Version.v1
def test_invalid_pem(self, backend):
with pytest.raises(ValueError):
x509.load_pem_x509_certificate(b"notacert", backend)
def test_invalid_der(self, backend):
with pytest.raises(ValueError):
x509.load_der_x509_certificate(b"notacert", backend)
def test_unsupported_signature_hash_algorithm_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "verisign_md2_root.pem"),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(UnsupportedAlgorithm):
cert.signature_hash_algorithm
def test_public_bytes_pem(self, backend):
# Load an existing certificate.
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
# Encode it to PEM and load it back.
cert = x509.load_pem_x509_certificate(cert.public_bytes(
encoding=serialization.Encoding.PEM,
), backend)
# We should recover what we had to start with.
assert cert.not_valid_before == datetime.datetime(2010, 1, 1, 8, 30)
assert cert.not_valid_after == datetime.datetime(2030, 12, 31, 8, 30)
assert cert.serial == 2
public_key = cert.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
assert cert.version is x509.Version.v3
fingerprint = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
assert fingerprint == b"6f49779533d565e8b7c1062503eab41492c38e4d"
def test_public_bytes_der(self, backend):
# Load an existing certificate.
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
# Encode it to DER and load it back.
cert = x509.load_der_x509_certificate(cert.public_bytes(
encoding=serialization.Encoding.DER,
), backend)
# We should recover what we had to start with.
assert cert.not_valid_before == datetime.datetime(2010, 1, 1, 8, 30)
assert cert.not_valid_after == datetime.datetime(2030, 12, 31, 8, 30)
assert cert.serial == 2
public_key = cert.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
assert cert.version is x509.Version.v3
fingerprint = binascii.hexlify(cert.fingerprint(hashes.SHA1()))
assert fingerprint == b"6f49779533d565e8b7c1062503eab41492c38e4d"
def test_public_bytes_invalid_encoding(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
with pytest.raises(TypeError):
cert.public_bytes('NotAnEncoding')
@pytest.mark.parametrize(
("cert_path", "loader_func", "encoding"),
[
(
os.path.join("x509", "v1_cert.pem"),
x509.load_pem_x509_certificate,
serialization.Encoding.PEM,
),
(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
serialization.Encoding.DER,
),
]
)
def test_public_bytes_match(self, cert_path, loader_func, encoding,
backend):
cert_bytes = load_vectors_from_file(
cert_path, lambda pemfile: pemfile.read(), mode="rb"
)
cert = loader_func(cert_bytes, backend)
serialized = cert.public_bytes(encoding)
assert serialized == cert_bytes
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestRSACertificateRequest(object):
@pytest.mark.parametrize(
("path", "loader_func"),
[
[
os.path.join("x509", "requests", "rsa_sha1.pem"),
x509.load_pem_x509_csr
],
[
os.path.join("x509", "requests", "rsa_sha1.der"),
x509.load_der_x509_csr
],
]
)
def test_load_rsa_certificate_request(self, path, loader_func, backend):
request = _load_cert(path, loader_func, backend)
assert isinstance(request.signature_hash_algorithm, hashes.SHA1)
public_key = request.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
]
extensions = request.extensions
assert isinstance(extensions, x509.Extensions)
assert list(extensions) == []
@pytest.mark.parametrize(
"loader_func",
[x509.load_pem_x509_csr, x509.load_der_x509_csr]
)
def test_invalid_certificate_request(self, loader_func, backend):
with pytest.raises(ValueError):
loader_func(b"notacsr", backend)
def test_unsupported_signature_hash_algorithm_request(self, backend):
request = _load_cert(
os.path.join("x509", "requests", "rsa_md4.pem"),
x509.load_pem_x509_csr,
backend
)
with pytest.raises(UnsupportedAlgorithm):
request.signature_hash_algorithm
def test_duplicate_extension(self, backend):
request = _load_cert(
os.path.join(
"x509", "requests", "two_basic_constraints.pem"
),
x509.load_pem_x509_csr,
backend
)
with pytest.raises(x509.DuplicateExtension) as exc:
request.extensions
assert exc.value.oid == x509.OID_BASIC_CONSTRAINTS
def test_unsupported_critical_extension(self, backend):
request = _load_cert(
os.path.join(
"x509", "requests", "unsupported_extension_critical.pem"
),
x509.load_pem_x509_csr,
backend
)
with pytest.raises(x509.UnsupportedExtension) as exc:
request.extensions
assert exc.value.oid == x509.ObjectIdentifier('1.2.3.4')
def test_unsupported_extension(self, backend):
request = _load_cert(
os.path.join(
"x509", "requests", "unsupported_extension.pem"
),
x509.load_pem_x509_csr,
backend
)
extensions = request.extensions
assert len(extensions) == 0
def test_request_basic_constraints(self, backend):
request = _load_cert(
os.path.join(
"x509", "requests", "basic_constraints.pem"
),
x509.load_pem_x509_csr,
backend
)
extensions = request.extensions
assert isinstance(extensions, x509.Extensions)
assert list(extensions) == [
x509.Extension(
x509.OID_BASIC_CONSTRAINTS,
True,
x509.BasicConstraints(True, 1),
),
]
def test_public_bytes_pem(self, backend):
# Load an existing CSR.
request = _load_cert(
os.path.join("x509", "requests", "rsa_sha1.pem"),
x509.load_pem_x509_csr,
backend
)
# Encode it to PEM and load it back.
request = x509.load_pem_x509_csr(request.public_bytes(
encoding=serialization.Encoding.PEM,
), backend)
# We should recover what we had to start with.
assert isinstance(request.signature_hash_algorithm, hashes.SHA1)
public_key = request.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
]
def test_public_bytes_der(self, backend):
# Load an existing CSR.
request = _load_cert(
os.path.join("x509", "requests", "rsa_sha1.pem"),
x509.load_pem_x509_csr,
backend
)
# Encode it to DER and load it back.
request = x509.load_der_x509_csr(request.public_bytes(
encoding=serialization.Encoding.DER,
), backend)
# We should recover what we had to start with.
assert isinstance(request.signature_hash_algorithm, hashes.SHA1)
public_key = request.public_key()
assert isinstance(public_key, rsa.RSAPublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
]
def test_public_bytes_invalid_encoding(self, backend):
request = _load_cert(
os.path.join("x509", "requests", "rsa_sha1.pem"),
x509.load_pem_x509_csr,
backend
)
with pytest.raises(TypeError):
request.public_bytes('NotAnEncoding')
@pytest.mark.parametrize(
("request_path", "loader_func", "encoding"),
[
(
os.path.join("x509", "requests", "rsa_sha1.pem"),
x509.load_pem_x509_csr,
serialization.Encoding.PEM,
),
(
os.path.join("x509", "requests", "rsa_sha1.der"),
x509.load_der_x509_csr,
serialization.Encoding.DER,
),
]
)
def test_public_bytes_match(self, request_path, loader_func, encoding,
backend):
request_bytes = load_vectors_from_file(
request_path, lambda pemfile: pemfile.read(), mode="rb"
)
request = loader_func(request_bytes, backend)
serialized = request.public_bytes(encoding)
assert serialized == request_bytes
@pytest.mark.requires_backend_interface(interface=DSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestDSACertificate(object):
def test_load_dsa_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "dsa_selfsigned_ca.pem"),
x509.load_pem_x509_certificate,
backend
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA1)
public_key = cert.public_key()
assert isinstance(public_key, dsa.DSAPublicKey)
if isinstance(public_key, dsa.DSAPublicKeyWithSerialization):
num = public_key.public_numbers()
assert num.y == int(
"<KEY>"
"<KEY>"
"dea559f0b584c97a2b235b9b69b46bc6de1aed422a6f341832618bcaae2"
"198aba388099dafb05ff0b5efecb3b0ae169a62e1c72022af50ae68af3b"
"033c18e6eec1f7df4692c456ccafb79cc7e08da0a5786e9816ceda651d6"
"1b4bb7b81c2783da97cea62df67af5e85991fdc13aff10fc60e06586386"
"b96bb78d65750f542f86951e05a6d81baadbcd35a2e5cad4119923ae6a2"
"<KEY>"
"5df4af6b3911ef267d26623a5a1c5df4a6d13f1c", 16
)
assert num.parameter_numbers.g == int(
"4b7ced71dc353965ecc10d441a9a06fc24943a32d66429dd5ef44d43e67"
"d789d99770aec32c0415dc92970880872da45fef8dd1e115a3e4801387b"
"a6d755861f062fd3b6e9ea8e2641152339b828315b1528ee6c7b79458d2"
"1f3db973f6fc303f9397174c2799dd2351282aa2d8842c357a73495bbaa"
"c4932786414c55e60d73169f5761036fba29e9eebfb049f8a3b1b7cee6f"
"3fbfa136205f130bee2cf5b9c38dc1095d4006f2e73335c07352c64130a"
"1ab2b89f13b48f628d3cc3868beece9bb7beade9f830eacc6fa241425c0"
"b3fcc0df416a0c89f7bf35668d765ec95cdcfbe9caff49cfc156c668c76"
"fa6247676a6d3ac945844a083509c6a1b436baca", 16
)
assert num.parameter_numbers.p == int(
"bfade6048e373cd4e48b677e878c8e5b08c02102ae04eb2cb5c46a523a3"
"af1c73d16b24f34a4964781ae7e50500e21777754a670bd19a7420d6330"
"84e5556e33ca2c0e7d547ea5f46a07a01bf8669ae3bdec042d9b2ae5e6e"
"cf49f00ba9dac99ab6eff140d2cedf722ee62c2f9736857971444c25d0a"
"33d2017dc36d682a1054fe2a9428dda355a851ce6e6d61e03e419fd4ca4"
"e703313743d86caa885930f62ed5bf342d8165627681e9cc3244ba72aa2"
"2148400a6bbe80154e855d042c9dc2a3405f1e517be9dea50562f56da93"
"f6085f844a7e705c1f043e65751c583b80d29103e590ccb26efdaa0893d"
"833e36468f3907cfca788a3cb790f0341c8a31bf", 16
)
assert num.parameter_numbers.q == int(
"822ff5d234e073b901cf5941f58e1f538e71d40d", 16
)
@pytest.mark.parametrize(
("path", "loader_func"),
[
[
os.path.join("x509", "requests", "dsa_sha1.pem"),
x509.load_pem_x509_csr
],
[
os.path.join("x509", "requests", "dsa_sha1.der"),
x509.load_der_x509_csr
],
]
)
def test_load_dsa_request(self, path, loader_func, backend):
request = _load_cert(path, loader_func, backend)
assert isinstance(request.signature_hash_algorithm, hashes.SHA1)
public_key = request.public_key()
assert isinstance(public_key, dsa.DSAPublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
]
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestECDSACertificate(object):
def test_load_ecdsa_cert(self, backend):
_skip_curve_unsupported(backend, ec.SECP384R1())
cert = _load_cert(
os.path.join("x509", "ecdsa_root.pem"),
x509.load_pem_x509_certificate,
backend
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA384)
public_key = cert.public_key()
assert isinstance(public_key, ec.EllipticCurvePublicKey)
if isinstance(public_key, ec.EllipticCurvePublicKeyWithSerialization):
num = public_key.public_numbers()
assert num.x == int(
"<KEY>"
"6f0ccd00bba615b51467e9e2d9fee8e630c17", 16
)
assert num.y == int(
"ec0770f5cf842e40839ce83f416d3badd3a4145936789d0343ee10136c7"
"2deae88a7a16bb543ce67dc23ff031ca3e23e", 16
)
assert isinstance(num.curve, ec.SECP384R1)
def test_load_ecdsa_no_named_curve(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
cert = _load_cert(
os.path.join("x509", "custom", "ec_no_named_curve.pem"),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(NotImplementedError):
cert.public_key()
@pytest.mark.parametrize(
("path", "loader_func"),
[
[
os.path.join("x509", "requests", "ec_sha256.pem"),
x509.load_pem_x509_csr
],
[
os.path.join("x509", "requests", "ec_sha256.der"),
x509.load_der_x509_csr
],
]
)
def test_load_ecdsa_certificate_request(self, path, loader_func, backend):
_skip_curve_unsupported(backend, ec.SECP384R1())
request = _load_cert(path, loader_func, backend)
assert isinstance(request.signature_hash_algorithm, hashes.SHA256)
public_key = request.public_key()
assert isinstance(public_key, ec.EllipticCurvePublicKey)
subject = request.subject
assert isinstance(subject, x509.Name)
assert list(subject) == [
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
x509.NameAttribute(x509.OID_COUNTRY_NAME, 'US'),
x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, 'Texas'),
x509.NameAttribute(x509.OID_LOCALITY_NAME, 'Austin'),
]
class TestNameAttribute(object):
def test_init_bad_oid(self):
with pytest.raises(TypeError):
x509.NameAttribute(None, 'value')
def test_eq(self):
assert x509.NameAttribute(
x509.ObjectIdentifier('oid'), 'value'
) == x509.NameAttribute(
x509.ObjectIdentifier('oid'), 'value'
)
def test_ne(self):
assert x509.NameAttribute(
x509.ObjectIdentifier('2.5.4.3'), 'value'
) != x509.NameAttribute(
x509.ObjectIdentifier('2.5.4.5'), 'value'
)
assert x509.NameAttribute(
x509.ObjectIdentifier('oid'), 'value'
) != x509.NameAttribute(
x509.ObjectIdentifier('oid'), 'value2'
)
assert x509.NameAttribute(
x509.ObjectIdentifier('oid'), 'value'
) != object()
def test_repr(self):
na = x509.NameAttribute(x509.ObjectIdentifier('2.5.4.3'), 'value')
assert repr(na) == (
"<NameAttribute(oid=<ObjectIdentifier(oid=2.5.4.3, name=commonName"
")>, value='value')>"
)
class TestObjectIdentifier(object):
def test_eq(self):
oid1 = x509.ObjectIdentifier('oid')
oid2 = x509.ObjectIdentifier('oid')
assert oid1 == oid2
def test_ne(self):
oid1 = x509.ObjectIdentifier('oid')
assert oid1 != x509.ObjectIdentifier('oid1')
assert oid1 != object()
def test_repr(self):
oid = x509.ObjectIdentifier("2.5.4.3")
assert repr(oid) == "<ObjectIdentifier(oid=2.5.4.3, name=commonName)>"
oid = x509.ObjectIdentifier("oid1")
assert repr(oid) == "<ObjectIdentifier(oid=oid1, name=Unknown OID)>"
class TestName(object):
def test_eq(self):
name1 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
])
name2 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
])
assert name1 == name2
def test_ne(self):
name1 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
])
name2 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
])
assert name1 != name2
assert name1 != object()
def test_repr(self):
name = x509.Name([
x509.NameAttribute(x509.OID_COMMON_NAME, 'cryptography.io'),
x509.NameAttribute(x509.OID_ORGANIZATION_NAME, 'PyCA'),
])
assert repr(name) == (
"<Name([<NameAttribute(oid=<ObjectIdentifier(oid=2.5.4.3, name=com"
"monName)>, value='cryptography.io')>, <NameAttribute(oid=<ObjectI"
"dentifier(oid=2.5.4.10, name=organizationName)>, value='PyCA')>])"
">"
)
|
<filename>security/ecdsa/publicKey.py
# -*- coding: utf-8 -*-
from .utils.compatibility import toBytes
from .utils.der import fromPem, removeSequence, removeObject, removeBitString, toPem, encodeSequence, encodeOid, encodeBitString
from .utils.binary import BinaryAscii
from .point import Point
from .curve import curvesByOid, supportedCurves, sm2p256v1
class PublicKey:
def __init__(self, point, curve):
"""
Initialize the point and curve.
Args:
self: write your description
point: write your description
curve: write your description
"""
self.point = point
self.curve = curve
def toString(self, encoded=False):
"""
Return a string representation of the curve.
Args:
self: write your description
encoded: write your description
"""
xString = BinaryAscii.stringFromNumber(
number=self.point.x,
length=self.curve.length(),
)
yString = BinaryAscii.stringFromNumber(
number=self.point.y,
length=self.curve.length(),
)
return "\x00\x04" + xString + yString if encoded else xString + yString
def toStr(self):
"""
Returns a string representation of the point.
Args:
self: write your description
"""
return str(hex(self.point.x))[2:-1] + str(hex(self.point.y))[2:-1]
def toDer(self):
"""
Return a DER - encoded representation of this EllipticCurve.
Args:
self: write your description
"""
oidEcPublicKey = (1, 2, 840, 10045, 2, 1)
encodeEcAndOid = encodeSequence(
encodeOid(*oidEcPublicKey),
encodeOid(*self.curve.oid),
)
return encodeSequence(encodeEcAndOid, encodeBitString(self.toString(encoded=True)))
def toPem(self):
"""
Return this Signer as a DER - encoded PEM.
Args:
self: write your description
"""
return toPem(der=toBytes(self.toDer()), name="PUBLIC KEY")
@classmethod
def fromPem(cls, string):
"""
Create a PrivateKey from a PEM encoded string.
Args:
cls: write your description
string: write your description
"""
return cls.fromDer(fromPem(string))
@classmethod
def fromDer(cls, string):
"""
Create a curve from a DER encoded string.
Args:
cls: write your description
string: write your description
"""
s1, empty = removeSequence(string)
if len(empty) != 0:
raise Exception("trailing junk after DER public key: {}".format(
BinaryAscii.hexFromBinary(empty)
))
s2, pointBitString = removeSequence(s1)
oidPk, rest = removeObject(s2)
oidCurve, empty = removeObject(rest)
if len(empty) != 0:
raise Exception("trailing junk after DER public key objects: {}".format(
BinaryAscii.hexFromBinary(empty)
))
if oidCurve not in curvesByOid:
raise Exception(
"Unknown curve with oid %s. Only the following are available: %s" % (
oidCurve,
", ".join([curve.name for curve in supportedCurves])
)
)
curve = curvesByOid[oidCurve]
pointStr, empty = removeBitString(pointBitString)
if len(empty) != 0:
raise Exception(
"trailing junk after public key point-string: " +
BinaryAscii.hexFromBinary(empty)
)
return cls.fromString(pointStr[2:], curve)
@classmethod
def fromString(cls, string, curve=sm2p256v1, validatePoint=True):
"""
Create a PublicKey instance from a string.
Args:
cls: write your description
string: write your description
curve: write your description
sm2p256v1: write your description
validatePoint: write your description
"""
baseLen = curve.length()
xs = string[:baseLen]
ys = string[baseLen:]
p = Point(
x=BinaryAscii.numberFromString(xs),
y=BinaryAscii.numberFromString(ys),
)
if validatePoint and not curve.contains(p):
raise Exception(
"point ({x},{y}) is not valid for curve {name}".format(
x=p.x, y=p.y, name=curve.name
)
)
return PublicKey(point=p, curve=curve)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing individual user data
user_1 = pd.read_csv('User_1.csv')
user_2 = pd.read_csv('User_2.csv')
user_3 = pd.read_csv('User_3.csv')
user_4 = pd.read_csv('User_4.csv')
user_5 = pd.read_csv('User_5.csv')
user_6 = pd.read_csv('User_6.csv')
user_7 = pd.read_csv('User_7.csv')
user_8 = pd.read_csv('User_8.csv')
user_9 = pd.read_csv('User_9.csv')
user_10 = pd.read_csv('User_10.csv')
#Fixing User 1 table
#user_1 = pd.DataFrame(user_1.iloc[:34247,:].values)
#user_1.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
##user_1["Timeframe"] = user_1["Timeframe"] - 45.944096
#Export_csv = user_1.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_1.csv')
#User_2 = User_2.to_numpy()
#User_2 = pd.DataFrame(User_2)
#Fixing Timeframe for each user
#for user 2
user_1 = user_1.to_numpy()
user_2 = user_2.to_numpy()
user_2[0][2] = user_1[34246][2] + user_2[0][6]
for i in range(1, len(user_2)):
user_2[i][2] = user_2[i-1][2] + user_2[i][6]
user_1 = pd.DataFrame(user_1)
user_1.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
user_2 = pd.DataFrame(user_2)
user_2.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 3
user_2 = user_2.to_numpy()
user_3 = user_3.to_numpy()
user_3[0][2] = user_2[32450][2] + user_3[0][6]
for i in range(1, len(user_3)):
user_3[i][2] = user_3[i-1][2] + user_3[i][6]
user_2 = pd.DataFrame(user_2)
user_2.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_3 = pd.DataFrame(user_3)
user_3.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 4
user_3 = user_3.to_numpy()
user_4 = user_4.to_numpy()
user_4[0][2] = user_3[29623][2] + user_4[0][6]
for i in range(1, len(user_4)):
user_4[i][2] = user_4[i-1][2] + user_4[i][6]
user_3 = pd.DataFrame(user_3)
user_3.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_4 = pd.DataFrame(user_4)
user_4.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 5
user_4 = user_4.to_numpy()
user_5 = user_5.to_numpy()
user_5[0][2] = user_4[30116][2] + user_5[0][6]
for i in range(1, len(user_5)):
user_5[i][2] = user_5[i-1][2] + user_5[i][6]
user_4 = pd.DataFrame(user_4)
user_4.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_5 = pd.DataFrame(user_5)
user_5.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 6
user_5 = user_5.to_numpy()
user_6 = user_6.to_numpy()
user_6[0][2] = user_5[29682][2] + user_6[0][6]
for i in range(1, len(user_6)):
user_6[i][2] = user_6[i-1][2] + user_6[i][6]
user_5 = pd.DataFrame(user_5)
user_5.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_6 = pd.DataFrame(user_6)
user_6.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 7
user_6 = user_6.to_numpy()
user_7 = user_7.to_numpy()
user_7[0][2] = user_6[16611][2] + user_7[0][6]
for i in range(1, len(user_7)):
user_7[i][2] = user_7[i-1][2] + user_7[i][6]
user_6 = pd.DataFrame(user_6)
user_6.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_7 = pd.DataFrame(user_7)
user_7.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 8
user_7 = user_7.to_numpy()
user_8 = user_8.to_numpy()
user_8[0][2] = user_7[len(user_7)-1][2] + user_8[0][6]
for i in range(1, len(user_8)):
user_8[i][2] = user_8[i-1][2] + user_8[i][6]
user_7 = pd.DataFrame(user_7)
user_7.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_8 = pd.DataFrame(user_8)
user_8.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 9
user_8 = user_8.to_numpy()
user_9 = user_9.to_numpy()
user_9[0][2] = user_8[len(user_8)-1][2] + user_9[0][6]
for i in range(1, len(user_9)):
user_9[i][2] = user_9[i-1][2] + user_9[i][6]
user_8 = pd.DataFrame(user_8)
user_8.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_9 = pd.DataFrame(user_9)
user_9.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#for user 10
user_9 = user_9.to_numpy()
user_10 = user_10.to_numpy()
user_10[0][2] = user_9[len(user_9)-1][2] + user_10[0][6]
for i in range(1, len(user_10)):
user_10[i][2] = user_10[i-1][2] + user_10[i][6]
user_9 = pd.DataFrame(user_9)
user_9.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
user_10 = pd.DataFrame(user_10)
user_10.columns = ["User", "Activity", "Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#dropping time difference column
user_2 = user_2.drop(['Timedifference'], axis = 1)
user_3 = user_3.drop(['Timedifference'], axis = 1)
user_4 = user_4.drop(['Timedifference'], axis = 1)
user_5 = user_5.drop(['Timedifference'], axis = 1)
user_6 = user_6.drop(['Timedifference'], axis = 1)
user_7 = user_7.drop(['Timedifference'], axis = 1)
user_8 = user_8.drop(['Timedifference'], axis = 1)
user_9 = user_9.drop(['Timedifference'], axis = 1)
user_10 = user_10.drop(['Timedifference'], axis = 1)
#creating one large dataset
#Final_Training_set = pd.DataFrame(user_1, columns = ['User', 'Activity', 'Timeframe', 'X axis', 'Y axis', 'Z axis',])
Final_Training_set = pd.concat([user_1, user_2])
Final_Training_set = pd.concat([Final_Training_set, user_3])
Final_Training_set = pd.concat([Final_Training_set, user_4])
Final_Training_set = pd.concat([Final_Training_set, user_5])
Final_Training_set = pd.concat([Final_Training_set, user_6])
Final_Training_set = pd.concat([Final_Training_set, user_7])
Final_Training_set = pd.concat([Final_Training_set, user_8])
#Final_Training_set = pd.concat([Final_Training_set, user_9])
#Final_Training_set = pd.concat([Final_Training_set, user_10])
#Exporting Final Training Dataset
Export_csv = Final_Training_set.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/final_training_set_8people.csv')
#Creating Testset from User 9 and 10
Final_Test_set = pd.concat([user_9, user_10])
Export2_csv = Final_Test_set.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/final_test_set_2people.csv')
|
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from time import strftime as stime
import os
from sklearn.cluster.bicluster import SpectralCoclustering
RATINGS_FILE = "rating_cleaned.json"
ANIME_FILE = "anime_cleaned.csv"
DIR = "DATABASE"
DATA_FILE = "ratings_database.csv"
def cocluster_data(users, n_clusters=5):
matrix = users.iloc[:, 1:]
corr_list = pd.DataFrame.corr( matrix )
clustered_model = SpectralCoclustering(n_clusters = n_clusters, random_state=0)
clustered_model.fit(corr_list)
feed = clustered_model.row_labels_
users = matrix.transpose()
users["Group"] = pd.Series(feed, index=users.index)
users = users.iloc[np.argsort( feed )]
users = users.reset_index(drop=True)
matrix = users.iloc[:, :-1].transpose()
matrix.columns = all_genres(pd.read_csv(DIR+"\\"+ANIME_FILE, encoding="utf8"))
corr_list = pd.DataFrame.corr( matrix )
return corr_list
def plot_correlation(corr_list, name):
labelsY = corr_list.index
labelsX = []
for label in labelsY:
labelsX.append(label[0:6])
fig, ax = plt.subplots()
fig.set_figheight(14)
fig.set_figwidth(18)
im = ax.pcolor(corr_list)
fig.colorbar(im)
ax.tick_params(labelsize=10)
ax.xaxis.set(ticks=np.arange(0.5, len(labelsX)), ticklabels=labelsX)
ax.yaxis.set(ticks=np.arange(0.5, len(labelsY)), ticklabels=labelsY)
plt.xticks(rotation=45)
plt.axis("tight")
plt.title("Correlation in anime genres", fontsize=30)
if not os.path.exists("ANALYSIS"):
os.makedirs("ANALYSIS")
plt.savefig("ANALYSIS\\"+ name + stime("%d-%m-%Y_%H-%M-%S") + ".pdf")
plt.show()
def correlate_data(df):
matrix = df.iloc[:, 1:]
corr_list = pd.DataFrame.corr( matrix )
return corr_list
def create_dataframe(j_file, a_data):
all_g = all_genres(a_data)
cols = ["user"] + all_g
df = pd.DataFrame(0, index=range( len(j_file) ), columns=cols)
k=0
for user in j_file:
l = len(j_file)
print( k , " out of " , l )
df.loc[k,"user"] = int(user)
times = dict( zip(all_g, [0]*len(all_g) ))
for anime in j_file[user]:
genres = get_genre(a_data, anime)
rating = j_file[user][anime]
for genre in genres:
times[genre] += 1
df.loc[k, genre] += rating
for genre in all_g:
if(times[genre] != 0):
val = df.loc[k,genre] / times[genre]
df.loc[k,genre] = int(val*100)/100
k += 1
return df
def get_genre(df, a_id):
a_id=str(a_id)
temp = (df.query("anime_id == "+a_id).genre)
if len(temp) == 0:
return []
gs = df.loc[(temp.index[0]),"genre"].split(",")
gs = list( map( lambda x:x.strip(), gs ) )
return gs
def all_genres(df):
gs = df.iloc[:, 2]
genres = []
for gl in gs:
gl = gl.split(",")
for i in range(len(gl)):
gl[i] = gl[i].strip()
genres = set( list(genres) + gl)
return list(genres)
def create_database(afile, rfile, new_rfile, dirc):
afile = dirc + "\\" + afile
rfile = dirc + "\\" + rfile
a_data = pd.read_csv(afile, encoding = 'utf8')
print("anime data loaded.")
with open(rfile) as data_file:
r_data = json.load(data_file)
print("ratings data loaded.")
r_data = create_dataframe(r_data, a_data)
save_database(r_data, new_rfile, dirc)
def save_database(df, datafile, dirc):
if not os.path.exists(dirc):
os.makedirs(dirc)
datafile = dirc + "\\" + datafile
df.to_csv(datafile, encoding='utf-8', index=False)
print("Database created in " + datafile)
def load_saved_database(datafile, dirc):
datafile = dirc + "\\" + datafile
df = pd.read_csv(datafile, encoding="utf8")
return df
def analyse_saved_data(df):
c1 = correlate_data(df)
plot_correlation(c1,"correlated-genres")
c2 = cocluster_data(df, n_clusters=2)
plot_correlation(c2,"coclustered-genres")
|
<reponame>rsudheerk001/MIVisionX
# Copyright (c) 2018 - 2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, sys
import onnx
from onnx import onnx_pb
from onnx import numpy_helper
from nnir import *
onnx2ir_attr = {
'axis' : 'axis',
'axes' : 'axes',
'perm' : 'axes',
'broadcast' : 'broadcast',
'keepdims' : 'keepdims',
'kernel_shape' : 'kernel_shape',
'pads' : 'pads',
'strides' : 'strides',
'dilations' : 'dilations',
'group' : 'group',
'epsilon' : 'epsilon',
'alpha' : 'alpha',
'beta' : 'beta',
'transA' : 'transA',
'transB' : 'transB',
'bias' : 'bias',
'size' : 'size',
'split' : 'split',
'shape' : 'shape',
'min' : 'min',
'max' : 'max',
'to' : 'to',
'center_point_box' : 'center_point_box',
'value' : 'value',
'largest' : 'largest',
'sorted' : 'sorted',
}
onnx2ir_op_type = {
'Conv' : 'conv',
'ConvTranspose' : 'conv_transpose',
'BatchNormalization' : 'batch_norm',
'AveragePool' : 'avg_pool',
'MaxPool' : 'max_pool',
'Relu' : 'relu',
'Sum' : 'sum',
'Add' : 'add',
'Sub' : 'sub',
'Mul' : 'mul',
'MatMul' : 'matmul',
'Gemm' : 'gemm',
'LRN' : 'lrn',
'Concat' : 'concat',
'LeakyRelu' : 'leaky_relu',
'Sigmoid' : 'sigmoid',
'GlobalAveragePool' : 'global_avg_pool',
'Softmax' : 'softmax',
'Reshape' : 'reshape',
'Squeeze' : 'squeeze',
'Unsqueeze' : 'unsqueeze',
'Transpose' : 'transpose',
'Flatten' : 'flatten',
'Identity' : 'copy',
'Min' : 'min',
'Max' : 'max',
'Div' : 'div',
'Exp' : 'exp',
'Log' : 'log',
'ReduceMean' : 'global_avg_pool',
'Clip' : 'clamp',
'Cast' : 'cast',
'Shape' : 'shape',
'ArgMax' : 'argmax',
'NonMaxSuppression' : 'nms',
'Constant' : 'constant',
'Gather' : 'gather',
'TopK' : 'topk',
'ReduceMin' : 'reduce_min',
'Tile' : 'tile',
}
onnx2ir_data_type = [
"UND_", "F032", "U008", "I008", "U016", "I016", "I032", "I064",
"STR_", "BOOL", "F016", "F064", "U032", "U064", "C064", "C128"
]
def onnx_name_to_ir_name(name):
return '_'.join(('_'.join(('_'.join(name.split('/')).split('-')))).split(':'))
def onnx_node_to_ir_attr(node):
global onnx2ir_attr
attr = IrAttr()
for item in node.attribute:
if item.name in onnx2ir_attr:
name = onnx2ir_attr[item.name]
if item.HasField('f'):
attr.set(name,float(item.f))
elif item.HasField('i'):
attr.set(name,int(item.i))
elif item.HasField('s'):
attr.set(name,item.s)
elif item.HasField('t'):
attr.set(name,numpy_helper.to_array(item.t))
elif len(item.floats):
attr.set(name,list(item.floats))
elif len(item.ints):
attr.set(name,[int(v) for v in list(item.ints)])
elif len(item.strings):
attr.set(name,list(item.strings))
else:
raise ValueError("Unsupported ONNX attribute: {}".format(item))
if attr.is_set('output_padding'):
output_padding = attr.get('output_padding')
kernel_shape = attr.get('kernel_shape')
if (kernel_shape[0] <= 1) or (kernel_shape[1] <= 1) or \
((output_padding[0] % (kernel_shape[0] - 1)) != 0) or \
((output_padding[1] % (kernel_shape[1] - 1)) != 0):
raise ValueError("Unsupported ONNX value for output_padding attribute")
dilations = [output_padding[0] / (kernel_shape[0] - 1) + 1, output_padding[1] / (kernel_shape[1] - 1) + 1]
attr.set('dilations', dilations)
if node.op_type == 'MatMul':
attr.set('beta', 0.0)
return attr
def onnx_node_to_ir_node(onnx_node):
global onnx2ir_op_type
node = IrNode()
if onnx_node.op_type in onnx2ir_op_type:
type = onnx2ir_op_type[onnx_node.op_type]
else:
print('ERROR: ONNX operation "%s" not supported yet' % (onnx_node.op_type))
sys.exit(1)
node.set(type, [onnx_name_to_ir_name(name) for name in onnx_node.input], \
[onnx_name_to_ir_name(name) for name in onnx_node.output], \
onnx_node_to_ir_attr(onnx_node))
return node
def onnx_tensor_info_to_data(info, dims):
tensor = IrTensor()
tensor.setName(onnx_name_to_ir_name(info.name))
tensor.setInfo(onnx2ir_data_type[info.data_type], [int(x) for x in dims])
return tensor
def onnx_value_info_to_data(info, dims):
tensor = IrTensor()
tensor.setName(onnx_name_to_ir_name(info.name))
tensor.setInfo(onnx2ir_data_type[info.type.tensor_type.elem_type], [int(x) for x in dims])
return tensor
def onnx_graph_to_ir_graph(onnx_graph):
graph = IrGraph(False)
initializerList = []
shapeList = []
inputUser = False
for onnx_node in onnx_graph.node:
for tensor in onnx_graph.initializer:
if onnx_node.op_type == 'Reshape' and len(onnx_node.input) == 2 and tensor.name == onnx_node.input[1]:
tensorName = onnx_name_to_ir_name(tensor.name)
if tensorName not in shapeList:
shapeList.append(tensorName)
graph.addVariable(onnx_tensor_info_to_data(tensor,numpy_helper.to_array(tensor)))
graph.addBinary(tensorName, tensor.raw_data)
for tensor in onnx_graph.initializer:
if not onnx_name_to_ir_name(tensor.name) in shapeList:
tensorName = onnx_name_to_ir_name(tensor.name)
initializerList.append(tensorName)
graph.addVariable(onnx_tensor_info_to_data(tensor, tensor.dims))
graph.addBinary(tensorName, tensor.raw_data)
for tensor in onnx_graph.input:
if not onnx_name_to_ir_name(tensor.name) in initializerList and not onnx_name_to_ir_name(tensor.name) in shapeList:
input_dims = [int(x.dim_value) for x in tensor.type.tensor_type.shape.dim]
if (len(sys.argv) > 3) and (sys.argv[3] == "--input_dims"):
if (x == 0 or x is None or x == '?' for x in input_dims):
input_dims = sys.argv[4].split(',')
inputUser = True
graph.addInput(onnx_value_info_to_data(tensor, input_dims))
for tensor in onnx_graph.output:
output_dims = [int(x.dim_value) for x in tensor.type.tensor_type.shape.dim]
if (x == 0 or x is None or x == '?' for x in output_dims):
if inputUser == True:
output_dims[0] = input_dims[0]
while len(output_dims) != 4:
output_dims.append(1)
graph.addOutput(onnx_value_info_to_data(tensor, output_dims))
tensorAliasList = {}
for onnx_node in onnx_graph.node:
if onnx_node.op_type == 'Dropout':
tensorAliasList[onnx_node.output[0]] = onnx_node.input[0]
else:
for i in range(len(onnx_node.input)):
if onnx_node.input[i] in tensorAliasList:
onnx_node.input[i] = tensorAliasList[onnx_node.input[i]]
node = onnx_node_to_ir_node(onnx_node)
graph.addNode(node)
graph.updateLocals()
return graph
def onnx2ir(model, output_folder, node_type_append):
# get graph from ONNX model
if isinstance(model, str):
onnx_model = onnx.load(model)
elif isinstance(model, onnx.ModelProto):
onnx_model = model
else:
raise TypeError("Model must be file path to .onnx file or onnx loaded model")
graph = onnx_graph_to_ir_graph(onnx_model.graph)
graph.toFile(output_folder, node_type_append)
def main():
if len(sys.argv) < 3:
print('Usage: python onnx_to_nnir.py <onnxModel> <nnirOutputFolder> [--input_dims n,c,h,w (optional)] [--node_type_append 0/1 (optional: appends node type to output tensor name)]')
sys.exit(1)
onnxFileName = sys.argv[1]
outputFolder = sys.argv[2]
#appends node type to output tensor name.
node_type_append = 0
pos = 4
while pos < len(sys.argv) and len(sys.argv) > 3 and sys.argv[pos][:2] == '--':
if sys.argv[pos] == '--node_type_append':
node_type_append = int(sys.argv[pos+1])
pos = pos + 2
elif sys.argv[pos] == '--input_dims':
#input_dims = sys.argv[pos+1]
pos = pos + 2
print('loading ONNX model from %s ...' % (onnxFileName))
onnx_model_proto = onnx_pb.ModelProto()
if not os.path.isfile(onnxFileName):
print('ERROR: unable to open: ' + onnxFileName)
sys.exit(1)
onnx_model_proto.ParseFromString(open(onnxFileName, 'rb').read())
print('converting to IR model in %s ...' % (outputFolder))
onnx2ir(onnx_model_proto, outputFolder, node_type_append)
if __name__ == '__main__':
main()
|
# this code is modified from the pytorch example code: https://github.com/pytorch/examples/blob/master/imagenet/main.py
# after the model is trained, you might use convert_model.py to remove the data parallel module to make the model as standalone weight.
#
# <NAME>
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
import wideresnet
import pdb
import SENet
import progressbar
from PIL import Image
import pandas as pdb
import csv
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Places365 Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='seresnet50_new',
help='model architecture: ' +
' | '.join(model_names) +
' (default: seresnet50)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=70, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int,
metavar='N', help='print frequency (default: 200)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_false',
help='use pre-trained model')
parser.add_argument('--num_classes',default=365, type=int, help='num of class in the model')
parser.add_argument('--dataset',default='places365',help='which dataset to train')
device_ids = [2,3]
ini_device = 2
best_prec1 = 0
class optimizerController(object):
def __init__(self, net, trainEpoches, iniLr=1e-1, finalLr=1e-4):
self.trainEpoches = trainEpoches
if(hasattr(net, 'module') ):
model = net.module
else:
model = net
self.modelOptimiazer = torch.optim.SGD(model.parameters(), lr=finalLr, momentum=0.9, weight_decay=1e-4)
self.optimizer0 = torch.optim.SGD(model.getParameters(0), lr=iniLr, momentum=0.9, weight_decay=1e-4)
self.optimizer1 = torch.optim.SGD(model.getParameters(1), lr=iniLr, momentum=0.9, weight_decay=1e-4)
self.optimizer2 = torch.optim.SGD(model.getParameters(2), lr=iniLr, momentum=0.9, weight_decay=1e-4)
self.optimizer3 = torch.optim.SGD(model.getParameters(3), lr=iniLr, momentum=0.9, weight_decay=1e-4)
self.optimizer4 = torch.optim.SGD([{'params': model.getParameters(4)},
{'params': model.getParameters(5)} ],
lr=iniLr, momentum=0.9, weight_decay=1e-4)
self.scheduler0 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer0, T_max=trainEpoches[0],eta_min=finalLr)
self.scheduler1 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer1, T_max=trainEpoches[1],eta_min=finalLr)
self.scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer2, T_max=trainEpoches[2],eta_min=finalLr)
self.scheduler3 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer3, T_max=trainEpoches[3],eta_min=finalLr)
self.scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer4, T_max=trainEpoches[4],eta_min=finalLr)
def optimizerStep(self, epoch):
if(epoch < self.trainEpoches[0] ):
self.optimizer0.step()
if(epoch < self.trainEpoches[1]):
self.optimizer1.step()
if(epoch < self.trainEpoches[2]):
self.optimizer2.step()
if(epoch < self.trainEpoches[3]):
self.optimizer3.step()
if(epoch < self.trainEpoches[4]):
self.optimizer4.step()
else:
self.modelOptimiazer.step()
def optimizerZero_grad(self, epoch):
if(epoch < self.trainEpoches[0] ):
self.optimizer0.zero_grad()
if(epoch < self.trainEpoches[1]):
self.optimizer1.zero_grad()
if(epoch < self.trainEpoches[2]):
self.optimizer2.zero_grad()
if(epoch < self.trainEpoches[3]):
self.optimizer3.zero_grad()
if(epoch < self.trainEpoches[4]):
self.optimizer4.zero_grad()
else:
self.modelOptimiazer.zero_grad()
def schedulerStep(self):
self.scheduler0.step()
self.scheduler1.step()
self.scheduler2.step()
self.scheduler3.step()
self.scheduler4.step()
def main():
global args, best_prec1
args = parser.parse_args()
print(args)
torch.cuda.set_device(ini_device)
# create model
print("=> creating model '{}'".format(args.arch))
model = SENet.se_resnet50(num_classes=args.num_classes)
model = torch.nn.DataParallel(model, device_ids).cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
return
else:
print(model)
cudnn.benchmark = True
train_loader, val_loader = getDataLoader(args.data)
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
if args.evaluate:
print(validate(val_loader, model, criterion))
return
#trainEpoch = [4,8,16,32,64]
trainEpoch = [40,40,40,40,40]
opController = optimizerController(model, trainEpoch, iniLr=1e-1, finalLr=1e-4)
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
if(trainEpoch[0] <= epoch and epoch < trainEpoch[1]):
model.module.frezzeFromShallowToDeep(0)
elif(trainEpoch[1] <= epoch and epoch < trainEpoch[2]):
model.module.frezzeFromShallowToDeep(1)
elif(trainEpoch[2] <= epoch and epoch < trainEpoch[3]):
model.module.frezzeFromShallowToDeep(2)
elif(trainEpoch[3] <= epoch and epoch < trainEpoch[4]):
model.module.frezzeFromShallowToDeep(3)
else:
model.module.frezzeFromShallowToDeep(-1)
train(train_loader, model, criterion, opController, epoch)
opController.schedulerStep()
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, args.arch.lower())
def getDataLoader(dataDir):
# Data loading code
traindir = os.path.join(dataDir, 'train')
valdir = os.path.join(dataDir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def train(train_loader, model, criterion, opController, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
bar = progressbar.progressbar(len(train_loader))
end = time.time()
start = time.perf_counter()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
opController.optimizerZero_grad(epoch)
loss.backward()
opController.optimizerStep(epoch)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#bar.clear()
if i % args.print_freq == 0:
print('\rEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
#bar.output(i+1)
#print()
print('Epoch waste time {}s'.format(time.perf_counter()- start) )
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
'''
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
'''
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename + '_latest.pth.tar')
if is_best:
shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 20))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.