input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# QuSpin modules
# numpy modules
import numpy as _np # generic math functions
# _scipy modules
import scipy as _scipy
import scipy.sparse as _sp
from scipy.sparse.linalg import expm_multiply as _expm_multiply
# multi-processing modules
from multiprocessing import Process as _Process
from multiprocessing import Queue as _Queue
from multiprocessing import Event as _Event
from joblib import Parallel as _Parallel
from joblib import delayed as _delayed
# six for python 2.* and 3.* dictionary compatibility
from six import iteritems as _iteritems
from six import itervalues as _itervalues
try:
from itertools import izip as _izip
except ImportError:
_izip = zip
__all__=["block_diag_hamiltonian","block_ops"]
def block_diag_hamiltonian(blocks,static,dynamic,basis_con,basis_args,dtype,basis_kwargs={},get_proj_kwargs={},get_proj=True,check_symm=True,check_herm=True,check_pcon=True):
"""Block-diagonalises a Hamiltonian obeying a symmetry.
The symmetry blocks are created via the argument 'blocks'.
Examples
--------
The example below demonstrates how to to use the `block_diag_hamiltonian()` function to block-diagonalise
the single-particle Hamiltonian
.. math::
H=\\sum_j (J+(-1)^j\\delta J)b^\\dagger_{j+1} b_j + \\mathrm{h.c.} + \\Delta(-1)^j b^\\dagger_j b_j
with respect to translation symemtry. The Fourier transform is computed along the way.
.. literalinclude:: ../../doc_examples/block_diag_hamiltonian-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
blocks : list/tuple/iterator
Contains the symmetry blocks to construct the Hamiltonian with, as dictionaries.
static : list
Static operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
dynamic : list
Dynamic operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
dtype : 'type'
The data type (e.g. numpy.float64) to construct the Hamiltonian with.
get_proj : bool, optional
Flag which tells the function to calculate and return the projector to the
symmetry-block subpace requested. Default is 'True'.
basis_kwargs : dict, optional
Dictionary of keyword arguments to add when calling `basis` constructor.
get_proj_kwargs : dict, optional
Dictionary of keyword arguments for `basis.get_proj()` and `basis.get_vec()`.
check_symm : bool, optional
Enable/Disable symmetry check of the operators for the first Hamiltonian constructed.
check_herm : bool, optional
Enable/Disable hermiticity check of the operators for the first Hamiltonian constructed.
check_pcon : bool, optional
Enable/Disable particle conservation check of the operators for the first Hamiltonian constructed.
Returns
--------
tuple
P : scipy.sparse.csr
Projector to the symmetr-block subspace (e.g. Fourier transform in case of momentum blocks).
H : `obj`
`hamiltonian` object in block diagonal form.
Raises
------
ValueError
If `blocks` is not a list of `hamiltonian` objects or a list of dictionaries containing
the symmetry sectors.
"""
from ..operators import hamiltonian
H_list = []
P_list = []
blocks = list(blocks)
if all([isinstance(block,dict) for block in blocks]):
[blocks[i].update(basis_kwargs) for i in range(len(blocks))]
dynamic_list = [(tup[-2],tuple(tup[-1])) for tup in dynamic]
dynamic_list = [([],f,f_args) for f,f_args in set(dynamic_list)]
static_mats = []
for block in blocks:
b = basis_con(*basis_args,**block)
if get_proj:
P = b.get_proj(dtype,**get_proj_kwargs)
P_list.append(P)
H = hamiltonian(static,dynamic,basis=b,dtype=dtype,check_symm=check_symm,check_herm=check_herm,check_pcon=check_pcon)
check_symm = False
check_herm = False
check_pcon = False
static_mats.append(H.static.tocoo())
for i,Hd in enumerate(_itervalues(H.dynamic)):
dynamic_list[i][0].append(Hd.tocoo())
static = [_sp.block_diag(static_mats,format="csr")]
dynamic = []
for mats,f,f_args in dynamic_list:
mats = _sp.block_diag(mats,format="csr")
dynamic.append([mats,f,f_args])
else:
raise ValueError("blocks must be list of dictionaries containing symmetry sectors.")
if get_proj:
P = _sp.hstack(P_list,format="csr")
return P,hamiltonian(static,dynamic,copy=False)
else:
return hamiltonian(static,dynamic,copy=False)
def _worker(gen_func,args_list,q,e):
"""
Worker function which loops over one of more generators provided by `gen_func` and returns the result
via queue `q`.
Waits for signal from `e` before continuing.
"""
gens = []
for arg in args_list:
gens.append(gen_func(*arg))
generator = _izip(*gens)
for s in generator:
e.clear()
q.put(s)
e.wait()
q.close()
def _generate_parallel(n_process,n_iter,gen_func,args_list):
"""
Generator which spawns processes to run generators, then uses a queue for each process to retrieve
the results which it then yields.
"""
n_items = len(args_list)
# calculate how to distribute generators over processes.
if n_items <= n_process and n_process > 0:
n_process = n_items
n_pp = 1
n_left = 1
elif n_items > n_process and n_process > 0:
n_pp = n_items//n_process
n_left = n_pp + n_items%n_process
# if one process specified just do the generator without sub processes.
if n_process <= 1:
gens = []
for arg in args_list:
gens.append(gen_func(*arg))
generator = _izip(*gens)
for s in generator:
yield s
return
# split up argument list
sub_lists = [args_list[0:n_left]]
sub_lists.extend([ args_list[n_left + i*n_pp:n_left + (i+1)*n_pp] for i in range(n_process-1)])
# create lists of queues, events, and processes.
es = []
qs = []
ps = []
for i in range(n_process):
e = _Event()
q = _Queue(1)
p = _Process(target=_worker, args=(gen_func,sub_lists[i],q,e))
p.daemon = True
es.append(e)
qs.append(q)
ps.append(p)
# start processes
for p in ps:
p.start()
# for number of iterations
for i in range(n_iter):
s = []
# retrieve results for each sub-process and let the process know to continue calculation.
for q,e in _izip(qs,es):
s.extend(q.get())
e.set() # free process to do next calculation
# yield all results
yield tuple(s)
# end processes
for p in ps:
p.join()
def _evolve_gen(psi,H,t0,times,stack_state,imag_time,solver_name,solver_args):
"""Generating function for evolution with `H.evolve`."""
for psi in H.evolve(psi,t0,times,stack_state=stack_state,imag_time=imag_time,solver_name=solver_name,iterate=True,**solver_args):
yield psi
def _expm_gen(psi,H,times,dt):
"""Generating function for evolution via `_expm_multiply`."""
if times[0] != 0:
H *= times[0]
psi = _expm_multiply(H,psi)
H /= times[0]
yield psi
H *= dt
for t in times[1:]:
psi = _expm_multiply(H,psi)
yield psi
H /= dt
def _block_evolve_iter(psi_blocks,H_list,P,t0,times,stack_state,imag_time,solver_name,solver_args,n_jobs):
"""using `_generate_parallel` to get block evolution yields state in full H-space."""
args_list = [(psi_blocks[i],H_list[i],t0,times,stack_state,imag_time,solver_name,solver_args) for i in range(len(H_list))]
for psi_blocks in _generate_parallel(n_jobs,len(times),_evolve_gen,args_list):
psi_t = _np.hstack(psi_blocks)
yield P.dot(psi_t)
def _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs):
"""using `_generate_parallel` to get block evolution yields state in full H-space."""
times,dt = _np.linspace(start,stop,num=num,endpoint=endpoint,retstep=True)
args_list = [(psi_blocks[i],H_list[i],times,dt) for i in range(len(H_list))]
for psi_blocks in _generate_parallel(n_jobs,len(times),_expm_gen,args_list):
psi_t = _np.hstack(psi_blocks)
yield P.dot(psi_t)
def _block_evolve_helper(H,psi,t0,times,stack_state,imag_time,solver_name,solver_args):
"""helper functions for doing evolution not with iterator."""
return H.evolve(psi,t0,times,stack_state=stack_state,imag_time=imag_time,solver_name=solver_name,**solver_args)
class block_ops(object):
"""Splits up the dynamics of a state over various symmetry sectors.
Particularly useful if the initial state does NOT obey a symmetry but the hamiltonian does.
Moreover, we provide a multiprocessing option which allows the user to split up the dynamics
over multiple processing cores.
Can be used to calculate nonequal time correlators in symmetry-reduced sectors.
Notes
-----
The `block_ops` object is initialised only after calling the function methods of the class to save memory.
Examples
--------
The following sequence of examples uses the Bose-Hubbard model
.. math::
H=-J\\sum_j b^\\dagger_{j+1}b_j + \\mathrm{h.c.} + \\frac{U}{2}\\sum_j n_j(n_j-1)
to show how to use the `block_ops` class to evolve a Fock state, which explicitly breaks
translational invariance, by decomposing it in all momentum blocks, time-evolving the projections, and putting
the state back together in the Fock basis in the end. We use the time-evolved state to measure the local density operator :math:`n_j`.
The code snippets for the time evolution can be found in the examples for the function methods of the class.
The code snippet below initiates the class, and is required to run the example codes for the function methods.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 7-55
"""
def __init__(self,blocks,static,dynamic,basis_con,basis_args,dtype,basis_kwargs={},get_proj_kwargs={},save_previous_data=True,compute_all_blocks=False,check_symm=True,check_herm=True,check_pcon=True):
"""Instantiates the `block_ops` class.
Parameters
-----------
blocks : list/tuple/iterator
Contains the symmetry blocks to construct the Hamiltonian with,
as dictionaries or `hamiltonian` objects.
static : list
Static operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
dynamic : list
Dynamic operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
dtype : 'type'
The data type (e.g. numpy.float64) to construct the Hamiltonian with.
basis_kwargs : dict, optional
Dictionary of keyword arguments to add when calling `basis` constructor.
get_proj_kwargs : dict, optional
Dictionary of keyword arguments for `basis.get_proj()` and `basis.get_vec()`.
save_previous_data : bool, optional
To do time evolution the `block_ops` class constructs Hamiltonians, which can take time.
Set this flag to `True`, and the class will save previously calculated Hamiltonians, so
next time one needs to do evolution in that block, the code does NOT have to calculate it again.
Default is `True`.
compute_all_blocks : bool, optional
Flag which tells the `block_ops` class to compute all symmetry blocks at initialization.
Default is `False`.
This option sets `save_previous_data = True` automatically.
check_symm : bool, optional
Enable/Disable symmetry check of the operators for the first Hamiltonian constructed.
check_herm : bool, optional
Enable/Disable hermiticity check of the operators for the first Hamiltonian constructed.
check_pcon : bool, optional
Enable/Disable particle conservation check of the operators for the first Hamiltonian constructed.
"""
self._basis_dict = {}
self._H_dict = {}
self._P_dict = {}
self._dtype=dtype
self._save = save_previous_data
self._static = static
self._dynamic = dynamic
self._checks = {"check_symm":check_symm,"check_herm":check_herm,"check_pcon":check_pcon}
self._no_checks = {"check_symm":False,"check_herm":False,"check_pcon":False}
self._checked = False
self._get_proj_kwargs = get_proj_kwargs
for block in blocks:
block.update(basis_kwargs)
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self._save=True
self.compute_all_blocks()
@property
def dtype(self):
"""type: numpy data type to store the block hamiltonians in."""
return self._dtype
@property
def save_previous_data(self):
"""bool: reflects state of optimal argument `save_previous_data`."""
return self._save
@property
def H_dict(self):
"""dict: dictionary which contains the block Hamiltonians under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._H_dict
@property
def P_dict(self):
"""dict: dictionary which contains the block projectors under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._P_dict
@property
def basis_dict(self):
"""dict: dictionary which contains the `basis` objects under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._basis_dict
@property
def static(self):
"""list: contains the static operators used to construct the symmetry-block Hamiltonians."""
return list(self._static)
@property
def dynamic(self):
"""list: contains the dynamic operators used to construct the symmetry-block Hamiltonians."""
return list(self._dynamic)
def update_blocks(self,blocks,basis_con,basis_args,compute_all_blocks=False):
"""Allows to update the `blocks` variable of the class.
Parameters
-----------
blocks : list/tuple/iterator
Contains | |
self.ruler_delta <= self.MIN_SCALE_DELTA:
return False
delta = self.upbound - self.lowbound
rate = delta /self.ruler_delta
if rate < self.ZOOM_IN_RATE:
return True
def change_upbound_in_ruler(self, value):
self.set_action('上限設為{}'.format(value))
tmp_step = (value - self.upbound ) / self.parent.animate_num
tmp_upper = self.upbound
for n in range(self.parent.animate_num):
tmp_upper += tmp_step
self.draw_ruler(self.lowbound, round(tmp_upper))
self.upbound = value
def change_lowbound_in_ruler(self, value):
self.set_action('下限設為{}'.format(value))
tmp_step = (value - self.lowbound ) / self.parent.animate_num
tmp_lower = self.lowbound
for n in range(self.parent.animate_num):
tmp_lower += tmp_step
self.draw_ruler(round(tmp_lower), self.upbound)
self.lowbound = value
# def set_lowbound(self, value):
# if value == self.upbound or value == self.lowbound:
# return
# if not self.lowbound < value < self.upbound:
# raise 搜尋猜數錯誤(f"exceed ruler range")
# for n in range(self.lowbound, value+1):
# self.draw_ruler( n, self.upbound)
# self.lowbound = value
def set_action(self, text=''):
self.parent.canvas.itemconfigure(self.action_textid,
text=text,
state=tk.NORMAL)
self.parent.canvas.update()
def hide_action(self):
self.parent.canvas.itemconfigure(self.action_textid,
state=tk.HIDDEN)
self.parent.canvas.update()
def set_ruler_range(self, lower_num ,upper_num):
if lower_num == self.ruler_lowbound and upper_num == self.ruler_upbound:
#print('<<尺度相同,不需改變>>')
return
if self.lowbound < lower_num or self.upbound > upper_num:
raise 搜尋猜數錯誤("上下限不能在尺度範圍外")
if lower_num >= upper_num:
raise 搜尋猜數錯誤("lower_num is bigger")
# show scale_changing_text
text = '刻度改為\n{}~{}'.format(lower_num, upper_num)
self.set_action(text)
self.hide_scale()
self.hide_searcher()
self.current_color = self.CHANGE_SCALE_COLOR
#self.current_color = next(self.COLOR_POOL)
# animate
old_low_y = self.num2y(self.ruler_lowbound, self.ruler_delta, self.lowbound)
old_up_y = self.num2y(self.ruler_lowbound, self.ruler_delta, self.upbound)
new_low_y = self.num2y(lower_num, upper_num-lower_num, self.lowbound)
new_up_y = self.num2y(lower_num, upper_num-lower_num, self.upbound)
self.animate_both_bound(old_low_y, old_up_y, new_low_y, new_up_y)
# set scale bound and ruler text
self.hide_action()
self.current_color = self.BAR_COLOR
# change ruler bounds
self.ruler_lowbound = lower_num
self.ruler_upbound = upper_num
self.ruler_delta = self.ruler_upbound - self.ruler_lowbound
self.draw_scale()
self.draw_ruler(self.lowbound, self.upbound)
self.set_searcher(self.searcher_num)
# restore ruler text
# self.parent.canvas.itemconfigure(self.ruler_lowbound_textid,
# state=tk.NORMAL,
# text='{}'.format(self.ruler_lowbound))
# self.parent.canvas.itemconfigure(self.ruler_upbound_textid,
# state=tk.NORMAL,
# text='{}'.format(self.ruler_upbound))
#self.draw_ruler(self.lower_bound, self.upper_bound)
#self.draw_ruler(self.ruler_lowbound, self.ruler_upbound)
def animate_both_bound(self, old_low_y, old_up_y, new_low_y, new_up_y):
self.hide_gizmo()
step_up = (new_up_y - old_up_y)/self.parent.animate_num
step_low = (new_low_y - old_low_y)/self.parent.animate_num
big_y, small_y = old_low_y, old_up_y
for i in range(self.parent.animate_num):
big_y += step_low
small_y += step_up
#print(small_y)
self.redraw_bar(round(big_y), round(small_y))
self.parent.canvas.update()
self.delay()
def create_scale(self):
one_10th = self.ruler_delta // 10
for value in range(self.ruler_lowbound,
self.ruler_upbound + one_10th,
one_10th):
y = self.num2y(self.ruler_lowbound, self.ruler_delta, value)
#print('y: ', y)
tmp_scale_textid = self.parent.canvas.create_text(
self.RULER_SCALE_X ,
y,
anchor=tk.W,
justify=tk.LEFT,
state=tk.NORMAL,
font = self.parent.scale_font,
fill = self.SCALE_COLOR,
text=str(value))
self.ruler_scale_id_list.append(tmp_scale_textid)
self.parent.canvas.update()
def hide_scale(self):
for id in self.ruler_scale_id_list:
self.parent.canvas.itemconfigure(id, state=tk.HIDDEN)
self.parent.canvas.update()
def draw_scale(self):
one_10th = self.ruler_delta // 10
for idx, value in enumerate(range(self.ruler_lowbound,
self.ruler_upbound + one_10th,
one_10th)):
tmp_scale_textid = self.ruler_scale_id_list[idx]
self.parent.canvas.itemconfigure(
tmp_scale_textid ,
state=tk.NORMAL,
text=str(value))
self.parent.canvas.update()
def create_searcher(self):
# load arrow
middle = self.ruler_lowbound + self.ruler_delta//2
#self.searcher_num = middle
y = self.num2y(self.ruler_lowbound, self.ruler_delta, middle)
path = Path(__file__).parent / 'images' / (self.ARROW_NAME + '.png')
_im = Image.open(path)
self.arrow_img = ImageTk.PhotoImage(_im)
self.arrow_id = self.parent.canvas.create_image(
self.ARROW_X,
y,
image=self.arrow_img,
anchor=tk.W ,
state=tk.NORMAL)
self.arrow_textid = self.parent.canvas.create_text(
self.ARROW_TEXT_X,
y,
anchor=tk.W,
justify=tk.LEFT,
state=tk.NORMAL,
font = self.parent.normal_font,
text='搜尋')
self.parent.canvas.update()
def hide_searcher(self):
self.parent.canvas.itemconfigure(self.arrow_id,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.arrow_textid,
state=tk.HIDDEN)
self.parent.canvas.update()
def show_searcher(self):
self.parent.canvas.itemconfigure(self.arrow_id,
state=tk.NORMAL)
self.parent.canvas.itemconfigure(self.arrow_textid,
state=tk.NORMAL)
self.parent.canvas.update()
def set_searcher(self, value):
if type(value) is not int:
raise 搜尋猜數錯誤('搜尋數字必需為整數')
#self.set_action('搜尋設為{}'.format(value))
self.hide_comparator()
self.searcher_num = value
if self.ruler_lowbound <= value <= self.ruler_upbound:
self.draw_searcher(value)
if self.check_need_zoomin_scale():
low, up = self.calc_ruler_range(self.lowbound,
self.upbound)
# for i in range(50):
# self.delay()
if low <= value <= up:
self.set_ruler_range( low, up)
elif value > self.ruler_upbound:
assert False, 'should checked'
# print("<<超出尺刻度範圍({}~{}),錯誤值{}>>".format(
# self.ruler_lowbound,
# self.ruler_upbound,
# other))
#low, up = self.calc_ruler_range(self.lowbound, value)
#self.set_ruler_range( low, up)
#self.draw_searcher(value)
elif value < self.ruler_lowbound:
assert False, 'should checked'
# print("<<超出尺刻度範圍({}~{}),錯誤值{}>>".format(
# self.ruler_lowbound,
# self.ruler_upbound,
# other))
#low, up = self.calc_ruler_range(value, self.upbound)
#self.set_ruler_range( low, up)
#self.draw_searcher(value)
def draw_searcher(self, value):
y = self.num2y(self.ruler_lowbound, self.ruler_delta, value)
self.parent.canvas.coords(self.arrow_id,
self.ARROW_X,
y)
self.parent.canvas.itemconfigure(self.arrow_id,
state=tk.NORMAL)
self.parent.canvas.coords(self.arrow_textid,
self.ARROW_TEXT_X,
y)
self.parent.canvas.itemconfigure(self.arrow_textid,
text='搜尋\n{}'.format(self.searcher_num),
state=tk.NORMAL)
self.parent.canvas.update()
def create_comparator(self):
path = Path(__file__).parent / 'images' / (self.UP_ROCKET_NAME + '.png')
_im = Image.open(path)
self.uprocket_img = ImageTk.PhotoImage(_im)
self.uprocket_id = self.parent.canvas.create_image(
0, 0,
image=self.uprocket_img,
anchor=tk.CENTER,
state=tk.HIDDEN)
path = Path(__file__).parent / 'images' / (self.LOW_ROCKET_NAME + '.png')
_im = Image.open(path)
self.lowrocket_img = ImageTk.PhotoImage(_im)
self.lowrocket_id = self.parent.canvas.create_image(
0, 0,
image=self.lowrocket_img,
anchor=tk.CENTER,
state=tk.HIDDEN)
path = Path(__file__).parent / 'images' / (self.BULB_NAME + '.png')
_im = Image.open(path)
self.bulb_img = ImageTk.PhotoImage(_im)
self.bulb_id = self.parent.canvas.create_image(
0, 0,
image=self.bulb_img,
anchor=tk.CENTER,
state=tk.HIDDEN)
def draw_comparator(self, op):
y = self.num2y(self.ruler_lowbound, self.ruler_delta, self.searcher_num)
if op == '>':
self.parent.canvas.itemconfigure(self.uprocket_id,
state=tk.NORMAL)
# bigger animate
new_up_y = y - self.COMPARATOR_SHIFTY
tmp_step = (y - new_up_y ) / self.parent.animate_num
tmp_y = y
for n in range(self.parent.animate_num):
tmp_y -= tmp_step
self.parent.canvas.coords(self.uprocket_id,
self.COMPARATOR_X ,
round(tmp_y))
self.parent.canvas.update()
self.delay()
elif op == '<':
self.parent.canvas.itemconfigure(self.lowrocket_id,
state=tk.NORMAL)
# smaller animate
new_low_y = y + self.COMPARATOR_SHIFTY
tmp_step = (new_low_y - y) / self.parent.animate_num
tmp_y = y
for n in range(self.parent.animate_num):
tmp_y += tmp_step
self.parent.canvas.coords(self.lowrocket_id,
self.COMPARATOR_X ,
round(tmp_y))
self.parent.canvas.update()
self.delay()
elif op == '==':
self.parent.canvas.itemconfigure(self.bulb_id,
state=tk.NORMAL)
# equal animate
new_up_y = y - self.BULB_SHIFTY
tmp_step = (y - new_up_y ) / self.parent.animate_num
tmp_y = y
for n in range(self.parent.animate_num):
tmp_y -= tmp_step
self.parent.canvas.coords(self.bulb_id,
self.COMPARATOR_X ,
round(tmp_y))
self.parent.canvas.update()
self.delay()
else:
raise 搜尋猜數錯誤('\n\nunknown comparision op')
self.parent.canvas.update()
def hide_comparator(self):
self.parent.canvas.itemconfigure(self.uprocket_id,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.lowrocket_id,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.bulb_id,
state=tk.HIDDEN)
def gt_cmp(self):
self.parent.statistic.add_search_num(self.searcher_num)
if int(self.parent.puzzle_answer, 2) > self.searcher_num:
self.set_action('答案大於{}'.format(self.searcher_num))
self.draw_comparator('>')
self.delay()
return True
else:
self.set_action('答案不大於{}'.format(self.searcher_num))
self.delay()
return False
def eq_cmp(self):
self.parent.statistic.add_search_num(self.searcher_num)
if int(self.parent.puzzle_answer, 2) == self.searcher_num:
self.set_action('答案等於{}'.format(self.searcher_num))
self.draw_comparator('==')
self.delay()
return True
else:
self.set_action('答案不等於{}'.format(self.searcher_num))
self.delay()
return False
def lt_cmp(self):
self.parent.statistic.add_search_num(self.searcher_num)
if int(self.parent.puzzle_answer, 2) < self.searcher_num:
self.set_action('答案小於{}'.format(self.searcher_num))
self.draw_comparator('<')
self.delay()
return True
else:
self.set_action('答案不小於{}'.format(self.searcher_num))
self.delay()
return False
def draw_ruler(self, lower_num, upper_num):
if lower_num > upper_num :
raise 搜尋猜數錯誤('lowernum > upper_num')
if type(lower_num) is not int or type(upper_num) is not int:
raise 搜尋猜數錯誤(' lowernum or upper_num not int')
big_y = self.num2y(self.ruler_lowbound, self.ruler_delta, lower_num)
small_y = self.num2y(self.ruler_lowbound, self.ruler_delta, upper_num)
self.redraw_bar(big_y, small_y)
# handle both bound text display
# update upper bound line, dot
self.set_gizmo(lower_num ,upper_num, big_y, small_y)
self.parent.canvas.update()
self.delay()
def redraw_bar(self, big_y, small_y):
# delete old bar if necessary
if self.bar_id is not None:
self.parent.canvas.delete(self.bar_id)
self.bar_id = None
if self.thin_bar_id is not None:
self.parent.canvas.delete(self.thin_bar_id)
self.thin_bar_id = None
# redarw bar and thin bar
self.bar_id = self.parent.canvas.create_rectangle(
self.BAR_X,
small_y,
self.BAR_X_RIGHT,
big_y,
width=0,
fill=self.current_color,)
self.thin_bar_id = self.parent.canvas.create_rectangle(
self.THIN_BAR_X,
small_y,
self.THIN_BAR_X_RIGHT,
big_y,
width=0,
fill=self.current_color,)
def set_gizmo(self, lower_num, upper_num, big_y, small_y):
self.parent.canvas.coords(self.upbound_lineid,
self.LINE_X,
small_y,
self.THIN_BAR_X_RIGHT,
small_y, )
self.parent.canvas.itemconfigure(self.upbound_lineid,
state=tk.NORMAL,
fill=self.current_color,)
self.parent.canvas.coords(self.upbound_dotid,
self.LINE_X - 6 , small_y - 6,
self.LINE_X + 5, small_y + 5 )
self.parent.canvas.itemconfigure(self.upbound_dotid,
state=tk.NORMAL,
fill=self.current_color,)
# update lower bound line, dot
self.parent.canvas.coords(self.lowbound_lineid,
self.LINE_X,
big_y,
self.THIN_BAR_X_RIGHT,
big_y, )
self.parent.canvas.itemconfigure(self.lowbound_lineid,
state=tk.NORMAL,
fill=self.current_color,)
self.parent.canvas.coords(self.lowbound_dotid,
self.LINE_X - 6 , big_y - 6,
self.LINE_X + 5, big_y + 5 )
self.parent.canvas.itemconfigure(self.lowbound_dotid,
state=tk.NORMAL,
fill=self.current_color,)
self.parent.canvas.coords(self.upbound_textid,
self.BOUND_TEXT_X ,
small_y + self.UPBOUND_TEXT_SHIFTY,)
self.parent.canvas.itemconfigure(self.upbound_textid,
state=tk.NORMAL,
text='上限\n{}'.format(upper_num) )
self.parent.canvas.coords(self.lowbound_textid,
self.BOUND_TEXT_X ,
big_y + self.LOWBOUND_TEXT_SHIFTY,)
self.parent.canvas.itemconfigure(self.lowbound_textid,
state=tk.NORMAL,
text='{}\n下限'.format(lower_num) )
def hide_gizmo(self):
# hide line, dot ,text
self.parent.canvas.itemconfigure(self.upbound_lineid,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.upbound_dotid,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.lowbound_lineid,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.lowbound_dotid,
state=tk.HIDDEN)
self.parent.canvas.itemconfigure(self.upbound_textid,
state=tk.HIDDEN )
self.parent.canvas.itemconfigure(self.lowbound_textid,
state=tk.HIDDEN )
def num2y(self, lowbound, delta, n):
#
# delta: upbound - lowbound
# number map to coordinate y
# to do :value check
tmp = self.BAR_MAX_Y - (n - lowbound) * self.BAR_MAX_HEIGHT / delta
return int(tmp)
def delay(self, sec=0.0001):
#pass
time.sleep(sec)
def calc_ruler_range(self, lower_num, upper_num):
# return ruler_low, ruler_up of according to input
# base
# range_exp10:could be 1 2 3 ...
if upper_num <= lower_num:
raise 搜尋猜數錯誤
#print('--------------')
#print('low-up: ',lower_num, upper_num)
delta_exp10 = math.log10(upper_num - lower_num)
#print('delta: ', upper_num - lower_num)
#print('delta_exp10: ',delta_exp10)
# calc range_delta
range_exp10 = math.ceil(delta_exp10)
if range_exp10 < 2:
# min exp10 : 1
range_exp10 = 2
# calc base
down_grade = range_exp10 - 1
if down_grade < 2:
down_grade = 2
remainder = lower_num % (10 ** (down_grade))
base = int(lower_num - remainder)
# check outside range special case
if upper_num > base + 10 ** range_exp10 :
# upgrade exp
#print('out range : exp10 ++')
range_exp10 += 1
#print('base, range_exp10: ', base, range_exp10)
return base, base + 10 ** range_exp10
class AnswerCmp:
def __init__(self, parent):
self.parent = | |
timezone.localize(eta_or_arrival)
logger.debug("Arrival time for this sailing was {}".format(eta_or_arrival))
arrived = True
else:
logger.debug("No ETA or arrival time")
eta_or_arrival = None
arrived = False
# Get or create a Status object for this sailing
status_o, created = Status.objects.get_or_create(
status=status
)
# Log if we found or created a new Status object
if created:
logger.info("Created status {}".format(status))
else:
logger.debug("Found status {}".format(status))
# Get or create a Sailing object for this sailing
sailing_o, created = Sailing.objects.get_or_create(
route=route_o,
scheduled_departure=sched
)
# Log if we found or created a new Destination object
if created:
logger.info("Created sailing {}".format(sailing_o))
else:
logger.debug("Found sailing {}".format(sailing_o))
# Since we track changes to the sailing, we now need to see if anything's changed between the details
# we just parsed and the details we already had for this sailing
# Check if the ferry has changed
if sailing_o.ferry != ferry_o:
# Ferry has changed
logger.debug("Ferry has changed ({} to {})".format(
sailing_o.ferry, ferry_o
))
# Create a FerryEvent
event_o = FerryEvent(
sailing=sailing_o,
old_ferry=sailing_o.ferry,
new_ferry=ferry_o
)
sailing_o.ferry = ferry_o
event_o.save()
sailing_o.save()
# Check if the actual departure time has changed (and yes, it can apparently)
if sailing_o.actual_departure != actual:
# Actual departure time has changed
logger.debug("Actual departure has changed ({} to {})".format(
sailing_o.actual_departure, actual
))
# But wait! Has the actual departure time disappeared?
if not actual:
# Yes, this can happen too
logger.debug("Actual departure time has been removed")
# Create a new DepartureTimeEvent
event_o = DepartureTimeEvent(
sailing=sailing_o,
old_departure=sailing_o.actual_departure,
new_departure=actual
)
sailing_o.actual_departure = actual
event_o.save()
sailing_o.save()
# Check if the sailing has departed (or un-departed - this can probably happen too)
if sailing_o.departed != departed:
# Departure status has changed
logger.debug("Departed has changed ({} to {})".format(
sailing_o.departed, departed
))
# Create a new DepartedEvent
event_o = DepartedEvent(
sailing=sailing_o
)
sailing_o.departed = departed
event_o.save()
sailing_o.save()
# Check if the ETA or arrival time has changed
if sailing_o.eta_or_arrival_time != eta_or_arrival:
# The ETA or arrival time has changed
logger.debug("ETA or arrival time has changed ({} to {})".format(
sailing_o.eta_or_arrival_time, eta_or_arrival
))
# Create a new ArrivalTimeEvent
event_o = ArrivalTimeEvent(
sailing=sailing_o,
old_arrival=sailing_o.eta_or_arrival_time,
new_arrival=eta_or_arrival,
is_eta=not arrived
)
sailing_o.eta_or_arrival_time = eta_or_arrival
event_o.save()
sailing_o.save()
# Check if the sailing has arrived
if sailing_o.arrived != arrived:
# Arrival status has changed
logger.debug("Arrival has changed ({} to {})".format(
sailing_o.arrived, arrived
))
# Create a new ArrivedEvent
event_o = ArrivedEvent(
sailing=sailing_o
)
sailing_o.arrived = arrived
event_o.save()
sailing_o.save()
# Check if the sailing status has changed
if sailing_o.status != status_o:
# Sailing status has changed
logger.debug("Status has changed ({} to {})".format(
sailing_o.status, status_o
))
# Create a new StatusEvent
event_o = StatusEvent(
sailing=sailing_o,
old_status=sailing_o.status,
new_status=status_o
)
sailing_o.status = status_o
# If the status is now Cancelled, additionally create a CancelledEvent
# TODO - do we need to set the cancelled value here?
if sailing_o.status == "Cancelled":
cancelled_o = CancelledEvent(
sailing=sailing_o
)
cancelled_o.save()
event_o.save()
sailing_o.save()
# We've finished parsing and updating the departures information
run.set_status("Completed", successful=True)
logger.info("Finished retrieving and processing departures")
return True
def get_current_conditions(input_file: str=None) -> bool:
""" Pull data from the current conditions/"at-a-glance" page on the BC Ferries website.
This will query https://orca.bcferries.com/cc/marqui/at-a-glance.asp and parse it. This
page shows similar details to the actualDepartures.asp website, but shows some extra
information, including how full the next few sailings are and how many car and oversize
waits there are.
:param input_file: optional local file to read from
:type input_file: str
:returns: whether it succeeded or failed
:rtype: bool
"""
# Start a ConditionsRun
run = ConditionsRun()
# Build the URL to pull data from
url = "{}/{}".format(settings.BCF_BASE_URL, "at-a-glance.asp")
if input_file:
# If an input file is given, read from that instead
# TODO - should be in a context
fp = open(input_file, 'r')
data = fp.read()
else:
try:
# Request page
logger.info("Querying BCF for data...")
response = requests.get(url)
if response.status_code == 200:
logger.info("Successfully queried BCF for data")
data = response.text
else:
# Something went wrong (i.e. we got something other than a 200 OK)
logger.error("Could not retrieve details from the BC Ferries website: {}".format(response.status_code))
run.set_status("Could not retrieve details from the BC Ferries website (non-200 status code)")
run.successful = False
run.save()
return False
except:
# TODO - replace bare except
logger.error("Could not retrieve details from the BC Ferries website.")
run.set_status("Could not retrieve details from the BC Ferries website (unknown error)")
run.successful = False
run.save()
return False
# Data retrieved
run.set_status("Data retrieved from BCF")
raw_html = ConditionsRawHTML(
run=run,
data=data
)
raw_html.save()
# Load data into the BeautifulSoup parser
s = BeautifulSoup(data, 'html.parser')
# TODO - this might be unused
terminals = {}
j_routes = []
full_routes = []
# Iterate over each section
for section in s.find_all('tbody'):
current_terminal = None
if section.span:
terminal_name = section.span.contents[0]
terminal_id = terminal_name.lower().replace(' ', '_')
else:
# Parse out each sailing
previously_parsed = None
# Iterate over each sailing row
for route in section.find_all('tr', recursive=False)[1:-1]:
j_route = {}
route_name = route.td.text
details = route.find_all('td', recursive=False)
fully_booked = False
route_id = None
try:
# Match the route ID
route_id = re.match('.*route=(\d+)&dept=(\w+).*', details[7].a.get('href')).groups()[0]
except IndexError as e:
# If matching the above didn't work, it's possible the sailing is fully booked
if 'Vehicle space on this route is fully booked' in details[0].text:
logger.debug("All sailings today on the previously-parsed route are fully booked")
logger.debug("This was: {}".format(previously_parsed))
fully_booked = True
full_routes.append(previously_parsed)
else:
# Who knows?
logger.error("Unknown error: {}".format(e))
if route_id:
# Store the route ID and route name
j_route['route_id'] = route_id
j_route['route_name'] = route_name
if details[1].div.text == "N/A":
# Sometimes the sailing details are "N/A" - so not much we can do with them
next_sailing = "N/A"
percent_full = "N/A"
j_route['sailings'] = None
else:
# Parse out each sailing
sailing_details = {}
sailings = details[1].div.table.find_all('tr')
j_sailings = []
# Iterate over each sailing
for sailing in sailings:
# Get the next sailing
next_sailing = sailing.td.text
# Check if the sailing has been cancelled
if sailing.td.next_sibling.text == "Cancelled":
# Sailing is cancelled
j_sailings.append({
'time': next_sailing,
'cancelled': True
})
else:
# Sailing isn't cancelled, so parse out how full this sailing is
percent_full = int(sailing.td.next_sibling.text.split('% ')[0])
sailing_details.update({next_sailing: percent_full})
j_sailings.append({
'time': next_sailing,
'percent_full': percent_full,
})
# Save this sailings
j_route['sailings'] = j_sailings
# Parse out the number of car and oversize waits
car_waits = int(details[2].text.rstrip('\n'))
oversize_waits = int(details[3].text.rstrip('\n'))
# ...and save those too
j_route['car_waits'] = car_waits
j_route['oversize_waits'] = oversize_waits
# Parse out later sailings. These are the ones which we don't have full
# details for yet
next_sailings = details[4].text.lstrip(' ').split(' ')
j_route['later_sailings'] = next_sailings
# Save all the sailings for this route
j_routes.append(j_route)
previously_parsed = route_name
# Iterate over each route
for route in j_routes:
# Get the route name
route_name = route['route_name']
logger.debug("Found route {}".format(route_name))
# Get the Route object for this route
route_o = Route.objects.get(name=route_name)
# Check if this route is full for today
if route_name in full_routes:
logger.debug("All of today's sailings are now full")
# Set all of today's sailings to 100%
for full_sailing in route_o.sailings_today:
logger.debug("Setting sailing {} to 100% full...".format(
full_sailing
))
# If this sailing wasn't previously full, set it to full
if full_sailing.percent_full != 100:
logger.debug("Percent full has changed ({} -> {})".format(
full_sailing.percent_full, 100
))
# Create a PercentFullEvent
percentfull_o = PercentFullEvent(
sailing=full_sailing,
old_value=full_sailing.percent_full,
new_value=100
)
full_sailing.percent_full = 100
full_sailing.save()
percentfull_o.save()
else:
logger.debug("Sailing was already 100% full")
# Get the car and oversize waits
car_waits = route.get('car_waits', None)
oversize_waits = route.get('oversize_waits', None)
# Check if the waits have changed
if route_o.car_waits != car_waits:
# Car waits has changed
logger.debug("Car waits has changed ({} -> {})".format(
route_o.car_waits, car_waits
))
# Create a CarWaitEvent
carwaitevent_o = CarWaitEvent(
route=route_o,
old_value=route_o.car_waits,
new_value=car_waits
)
route_o.car_waits = car_waits
carwaitevent_o.save()
route_o.save()
if route_o.oversize_waits != oversize_waits:
# Oversize waits has changed
logger.debug("Oversize waits has changed ({} -> {})".format(
route_o.oversize_waits, oversize_waits
))
# Create a OversizeWaitEvent
oversizewaitevent_o = OversizeWaitEvent(
route=route_o,
old_value=route_o.oversize_waits,
new_value=oversize_waits
)
route_o.oversize_waits = oversize_waits
oversizewaitevent_o.save()
route_o.save()
# Check if there are any sailings for this route today
if not | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.366125,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 9.90249,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 1.41703e-05,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.2027,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 8.09612e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.406489,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.655651,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.330951,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.39309,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.464892,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.73305,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.52953e-05,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.01705,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.123298,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.126095,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.123313,
'Execution Unit/Register Files/Runtime Dynamic': 0.143145,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.259757,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.839221,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.98353,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00160622,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00160622,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00141491,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000556425,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00181136,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00643871,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0148325,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.121218,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.302917,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.411712,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96396,
'Instruction Fetch Unit/Runtime Dynamic': 0.857118,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0205201,
'L2/Runtime Dynamic': 0.00867599,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.95894,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.27982,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.152762,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.152762,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.68032,
'Load Store Unit/Runtime Dynamic': 3.18595,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.376685,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.75337,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.133687,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.13398,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.049702,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.785754,
'Memory Management Unit/Runtime Dynamic': 0.183682,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 24.7731,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 3.97702e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0183401,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.215443,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
for
details. Defaults to False.
name : str or None (optional)
Name for model, to be used in plots and in saving files.
load_from_file : str or None (optional)
This module uses an iterative approach to fitting models.
For complicated models with lots of data, each iteration
can take a long time (though the number of iterations is
typically less than 100). If the user wishes to pause
after the end of an iteration, they can pick up where
the left off by saving results (see the save_flag in .fit)
and loading them to start the next iterations. Specifying
this option supercedes all other parameters.
Returns
-------
mdl : Generalized Additive Model object
"""
if load_from_file is not None:
self._load(load_from_file)
return
if family is None:
raise ValueError('Family not specified.')
elif family not in FAMILIES:
raise ValueError('{} family not supported'.format(family))
elif family == 'exponential':
# Exponential is a special case of Gamma with a dispersion of 1.
self._family = 'gamma'
dispersion = 1.
else:
self._family = family
if link is None:
self._link = CANONICAL_LINKS[family]
elif link in LINKS:
self._link = link
else:
raise ValueError('{} link not supported'.format(link))
if dispersion is not None:
self._known_dispersion = True
self._dispersion = dispersion
elif (self._family in FAMILIES_WITH_KNOWN_DISPERSIONS.keys()
and not estimate_overdispersion):
self._known_dispersion = True
self._dispersion = FAMILIES_WITH_KNOWN_DISPERSIONS[self._family]
else:
self._known_dispersion = False
if self._link == 'identity':
self._eval_link = lambda x: x
self._eval_inv_link = lambda x: x
elif self._link == 'logistic':
self._eval_link = lambda x: np.log( x / (1. - x) )
self._eval_inv_link = lambda x: np.exp(x) / (1 + np.exp(x))
elif self._link == 'probit':
# Inverse CDF of the Gaussian distribution
self._eval_link = lambda x: stats.norm.ppf(x)
self._eval_inv_link = lambda x: stats.norm.cdf(x)
elif self._link == 'complementary_log_log':
self._eval_link = lambda x: np.log(-np.log(1. - x))
self._eval_inv_link = lambda x: 1. - np.exp(-np.exp(x))
elif self._link == 'log':
self._eval_link = lambda x: np.log(x)
self._eval_inv_link = lambda x: np.exp(x)
elif self._link == 'reciprocal':
self._eval_link = lambda x: 1. / x
self._eval_inv_link = lambda x: 1. / x
elif self._link == 'reciprocal_squared':
self._eval_link = lambda x: 1. / (x * x)
self._eval_inv_link = lambda x: 1. / np.sqrt(x)
self._estimate_overdispersion = estimate_overdispersion
self._features = {}
self._offset = 0.0
self._num_features = 0
self._fitted = False
self._name = name
def _save(self):
"""Save state.
Save the model to file to make predictions later, or continue
a fitting session.
"""
mv = {}
mv['family'] = self._family
mv['link'] = self._link
mv['known_dispersion'] = self._known_dispersion
if self._known_dispersion:
mv['dispersion'] = self._dispersion
mv['estimate_overdispersion'] = self._estimate_overdispersion
mv['offset'] = self._offset
mv['num_features'] = self._num_features
mv['fitted'] = self._fitted
mv['name'] = self._name
features = {}
for name, feature in self._features.iteritems():
features[name] = {'type': feature.__type__,
'filename': feature._filename
}
mv['features'] = features
# mv['rho'] = self._rho
mv['num_obs'] = self._num_obs
mv['y'] = self._y
mv['weights'] = self._weights
mv['has_covariate_classes'] = self._has_covariate_classes
if self._has_covariate_classes:
mv['covariate_class_sizes'] = self._covariate_class_sizes
mv['f_bar'] = self.f_bar
mv['z_bar'] = self.z_bar
mv['u'] = self.u
mv['prim_res'] = self.prim_res
mv['dual_res'] = self.dual_res
mv['prim_tol'] = self.prim_tol
mv['dual_tol'] = self.dual_tol
mv['dev'] = self.dev
filename = '{0:s}_model.pckl'.format(self._name)
f = open(filename, 'w')
pickle.dump(mv, f)
f.close()
def _load(self, filename):
"""Load state.
Load a model from file to make predictions.
"""
f = open(filename)
mv = pickle.load(f)
f.close()
self._filename = filename
self._family = mv['family']
self._link = mv['link']
self._known_dispersion = mv['known_dispersion']
if self._known_dispersion:
self._dispersion = mv['dispersion']
self._estimate_overdispersion = mv['estimate_overdispersion']
self._offset = mv['offset']
self._num_features = mv['num_features']
self._fitted = mv['fitted']
self._name = mv['name']
self._features = {}
features = mv['features']
for (name, feature) in features.iteritems():
if feature['type'] == 'categorical':
self._features[name] = _CategoricalFeature(load_from_file=feature['filename'])
elif feature['type'] == 'linear':
self._features[name] = _LinearFeature(load_from_file=feature['filename'])
elif feature['type'] == 'spline':
self._features[name] = _SplineFeature(load_from_file=feature['filename'])
else:
raise ValueError('Invalid feature type')
# self._rho = mv['rho']
self._num_obs = mv['num_obs']
self._y = mv['y']
self._weights = mv['weights']
self._has_covariate_classes = mv['has_covariate_classes']
if self._has_covariate_classes:
self._covariate_class_sizes = mv['covariate_class_sizes']
self.f_bar = mv['f_bar']
self.z_bar = mv['z_bar']
self.u = mv['u']
self.prim_res = mv['prim_res']
self.dual_res = mv['dual_res']
self.prim_tol = mv['prim_tol']
self.dual_tol = mv['dual_tol']
self.dev = mv['dev']
if self._link == 'identity':
self._eval_link = lambda x: x
self._eval_inv_link = lambda x: x
elif self._link == 'logistic':
self._eval_link = lambda x: np.log( x / (1. - x) )
self._eval_inv_link = lambda x: np.exp(x) / (1 + np.exp(x))
elif self._link == 'probit':
# Inverse CDF of the Gaussian distribution
self._eval_link = lambda x: stats.norm.ppf(x)
self._eval_inv_link = lambda x: stats.norm.cdf(x)
elif self._link == 'complementary_log_log':
self._eval_link = lambda x: np.log(-np.log(1. - x))
self._eval_inv_link = lambda x: 1. - np.exp(-np.exp(x))
elif self._link == 'log':
self._eval_link = lambda x: np.log(x)
self._eval_inv_link = lambda x: np.exp(x)
elif self._link == 'reciprocal':
self._eval_link = lambda x: 1. / x
self._eval_inv_link = lambda x: 1. / x
elif self._link == 'reciprocal_squared':
self._eval_link = lambda x: 1. / (x * x)
self._eval_inv_link = lambda x: 1. / np.sqrt(x)
def add_feature(self, name, type, transform=None, rel_dof=None, regularization=None):
"""Add a feature
Add a feature to a Generalized Additive Model. (An implicit
constant feature is always included, representing the overall
average response.)
Parameters
----------
name : str
Name for feature. Used internally to keep track of
features and is also used when saving files and in
plots.
type : str
Type of feature. Currently supported options include:
'categorical' (for categorical variables)
'linear' (for variables with a linear contribution
to the response)
'spline' (for variables with a potentially nonlinear
contribution to the response).
Other types of features worth supporting include
piecewise constant functions and monotonic functions.
Those might end up being regularization terms.
transform : function or None
Optional transform applied to feature data, saving
the user from repetitive boilerplate code. Any function
may be used; it is applied to data provided during fitting
and prediction. Common options might include np.log, np.log1p,
or np.sqrt. The user may wish to start with a base feature
like 'age' and use derived features 'age_linear', 'age_quadratic'
to permit quadratic models for that feature, with potentially
different regularization applied to each.
rel_dof : float or None
Relative degrees of freedom. Applicable only to spline features.
The degrees of freedom associated with a spline represent how
"wiggly" it is allowed to be. A spline with two degrees of freedom
is just a line. (Actually, since these features are constrained
to have zero mean response over the data, linear features
only have one degree of freedom.) The relative degrees of freedom
are used to specify the baseline smoothing parameter (lambda)
associated with a feature. When the model is fit to data, the user
can specify an overall smoothing parameter applied to all features
to alter the amount of regularization in the entire model. Thus
the actual degrees of freedom will vary based on the amount of
smoothing. The idea is that the analyst may wish to permit some
features to be more wiggly than others. By default, all
splines have 4 relative degrees of freedom.
Regularization of any feature effectively reduces the degrees of
freedom, and so this term is potentially applicable, but that is
not yet supported.
regularization : dictionary or None
Dictionary specifying the regularization applied to this feature.
Different types of features support different types of regularization.
Splines implicitly only support regularization of the wiggliness
via a C2 smoothness penalty. That is controlled via the rel_dof.
Other features have more diverse options described in their own
documentation.
Returns
-------
(nothing)
"""
if type == 'categorical':
f = _CategoricalFeature(name, regularization=regularization)
elif type == 'linear':
f = _LinearFeature(name, transform, regularization=regularization)
elif type == 'spline':
f = _SplineFeature(name, transform, rel_dof)
else:
raise ValueError('Features of type {} not supported.'.format(type))
self._features[name] = f
self._num_features += 1
def fit(self, X, y, covariate_class_sizes=None, weights=None,
optimizer='admm', smoothing=1., save_flag=False,
verbose=False, plot_convergence=False, max_its=100):
"""Fit a Generalized Additive Model to data.
Note regarding binomial families: many data sets include
multiple observations having identical features. For example,
imagine a data set with features 'gender', and 'country' and
binary response indicating whether the person died (morbid but
common in biostatistics). The data might look like this:
gender country patients survivors
M USA 50 48
F USA 70 65
M CAN 40 | |
<reponame>ktelep/Zabbix-DellEMC-PowerMax<filename>zabbix_powermax.py<gh_stars>1-10
#!/usr/bin/python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import sys
import json
import PyU4V
import argparse
import traceback
import logging
import logging.handlers
from pyzabbix import ZabbixMetric, ZabbixSender
# Update to include your Zabbix Server IP and Port
zabbix_ip = "192.168.1.64"
zabbix_port = 10051
# Logging Level INFO as default, change to DEBUG for more
# detailed info or troubleshooting
log_level = logging.DEBUG
log_file = "./zabbix_powermax.log"
# Host Base is the pattern used to define the Array in Zabbix
# it is case-sensitive
host_base = "PowerMax {arrayid}"
# Base name for the keys created, you can customize if you don't like
# the default but you will need to update the template appropriately
key_base = "dellemc.pmax."
# Metric recency is used to determine how "fresh" our stats must be
# 5 is the default (5 minutes), use 0 for testing. Note this does
# not change how often diagnostic data is collected ON the array
metric_recency = 0
def log_exception_handler(type, value, tb):
""" Handle all tracebacks and exceptions going to the logfile """
logger = logging.getLogger('discovery')
# Dump the traceback
logger.exception("Uncaught exception: {0}".format(str(value)))
for i in traceback.format_tb(tb):
logger.debug(i)
def setup_logging(log_file):
""" Sets up our file logging with rotation """
my_logger = logging.getLogger('discovery')
my_logger.setLevel(log_level)
try:
handler = logging.handlers.RotatingFileHandler(
log_file, maxBytes=5120000, backupCount=5)
except PermissionError:
print(f"ERROR: Error writing to or creating {log_file}")
print(" Please verify permissions and path to file")
sys.exit()
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(process)d %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
sys.excepthook = log_exception_handler
return
def generate_metric_key(base, category, metric, identifier):
""" Generate a Zabbix formatted key """
metric_key = f'{base}perf.{category}.{metric}[{identifier}]'
return metric_key
def zabbix_safe_output(data):
""" Generate JSON output for zabbix from a passed in list of dicts
This is Zabbix 4.x and higher compatible """
logger = logging.getLogger('discovery')
logger.info("Generating output")
output = json.dumps({"data": data}, indent=4, separators=(',', ': '))
logger.debug(json.dumps({"data": data}))
return output
def fix_ts(timestamp):
""" Remove milliseconds from timestamps """
s, ms = divmod(int(timestamp), 1000)
# import time
# s = int(time.time()) # Uncomment for testing
return s
def gather_array_health(configpath, arrayid):
""" Collects Array Health Scores """
logger = logging.getLogger('discovery')
logger.info("Starting Health Score Gathering")
PyU4V.univmax_conn.file_path = configpath
conn = PyU4V.U4VConn()
logger.debug("Collecting Health")
health = conn.system.get_system_health(array_id=arrayid)
logger.debug(health)
# Loop through the collected health stats and send to
# Zabbix via the sender
for i in health['health_score_metric']:
host = host_base.format(arrayid=arrayid)
metric_key = '{base}health.{metric}[{arrayid}]'.format(
base=key_base, metric=i['metric'],
arrayid=arrayid)
# Health Score may not be populated if we're between system checks
# So valide it's there before we try to send it
if 'health_score' in i:
score = i['health_score']
timestamp = fix_ts(i['data_date'])
logger.debug(f"Sending Metric {host} - {metric_key} - "
f"{score} - {timestamp}")
health_metric = ZabbixMetric(host, metric_key, score, timestamp)
ZabbixSender(zabbix_server=zabbix_ip,
zabbix_port=zabbix_port).send([health_metric])
else:
logger.debug(f"No health score available for {i['metric']}")
logger.info("Completed Health Score Gathering")
def process_perf_results(metrics, category):
""" Process metrics collected from the _stats function by category """
logger = logging.getLogger('discovery')
host = host_base.format(arrayid=metrics['array_id'])
# This dict maps the category to the identifiers in the result set
# that are used in identifiers for Zabbix keys
category_map = {"Array": ["array_id"],
"FEDirector": ["director_id"],
"FEPort": ["director_id", "port_id"],
"BEDirector": ["director_id"],
"BEPort": ["director_id", "port_id"],
"RDFDirector": ["director_id"],
"RDFPort": ["director_id", "port_id"],
"IMDirector": ["director_id"],
"EDSDirector": ["director_id"],
"StorageGroup": ["storage_group_id"],
"SRP": ["srp_id"],
"Board": ["board_id"],
"DiskGroup": ["disk_group_id"],
"PortGroup": ["port_group_id"],
"BeEmulation": ["be_emulation_id"],
"FeEmulation": ["fe_emulation_id"],
"EDSEmulation": ["eds_emulation_id"],
"IMEmulation": ["im_emulation_id"],
"RDFEmulation": ["rdf_emulation_id"],
"Host": ["host_id"],
"Initiator": ["initiator_id"],
"RDFA": ["ra_group_id"],
"RDFS": ["rs_group_id"],
"ISCSITarget": ['iscsi_target_id']
}
# Based on category, pull our our identifiers and format
id_values = list()
for i in category_map[category]:
id_values.append(metrics[i])
ident = "-".join(id_values)
cat = category.lower()
for metric_data in metrics['result']:
# Drop the ms from our timestamp, we've only got
# 5 minute granularity at best here
timestamp = fix_ts(metric_data['timestamp'])
send_metrics = list()
# Bundle up all our metrics into a single list to send to Zabbix
for metric, score in metric_data.items():
if 'timestamp' in metric: # ignore the second timestamp
continue
key = generate_metric_key(key_base, cat, metric, ident)
logger.debug(f"Built Metric: {key} for {host} - ts: {timestamp}")
send_metrics.append(ZabbixMetric(host, key, score, timestamp))
logger.debug("Sending Metrics")
# Send the actual metrics list
res = ZabbixSender(zabbix_server=zabbix_ip,
zabbix_port=zabbix_port).send(send_metrics)
logger.info(res)
logger.debug("Completed sending Metrics")
def gather_dir_perf(configpath, arrayid, category, hours=None):
""" Collects Director Level Performance Statistics """
logger = logging.getLogger('discovery')
logger.info(f"Starting {category} Perf Stats Collection")
PyU4V.univmax_conn.file_path = configpath
conn = PyU4V.U4VConn()
# Map our function to to it's matching ports
# FEDirector = FEPorts, etc..
port_cat = category.replace('Director', 'Port')
# Function map to make this a generalized function vs. having
# an individual one. PyU4V provides a generalized function but
# it seems to be troublesome.
func_map = {'FEDirector':
{'keys': conn.performance.get_frontend_director_keys,
'stats': conn.performance.get_frontend_director_stats},
'BEDirector':
{'keys': conn.performance.get_backend_director_keys,
'stats': conn.performance.get_backend_director_stats},
'RDFDirector':
{'keys': conn.performance.get_rdf_director_keys,
'stats': conn.performance.get_rdf_director_stats},
'EDSDirector':
{'keys': conn.performance.get_eds_director_keys,
'stats': conn.performance.get_eds_director_stats},
'IMDirector':
{'keys': conn.performance.get_im_director_keys,
'stats': conn.performance.get_im_director_stats},
'FEPort':
{'keys': conn.performance.get_frontend_port_keys,
'stats': conn.performance.get_frontend_port_stats},
'BEPort':
{'keys': conn.performance.get_backend_port_keys,
'stats': conn.performance.get_backend_port_stats},
'RDFPort':
{'keys': conn.performance.get_rdf_port_keys,
'stats': conn.performance.get_rdf_port_stats}}
# Gather the keys for the director, this will throw an exception if
# the box doesn't have a specific director type (like RDF)
try:
directors = func_map[category]['keys'](array_id=arrayid)
logger.debug(directors)
except PyU4V.utils.exception.ResourceNotFoundException:
logger.info(f"No {category} Directors found")
for director in directors:
dir_id = director['directorId']
logger.info(f"Collecting for {category} director {dir_id}")
# this will be the kwargs passed to the stats function when called
metric_params = {'recency': metric_recency,
'array_id': arrayid,
'metrics': 'KPI',
'director_id': dir_id}
# Handle where we want multiple hours of data
if hours:
recent_time = conn.performance.get_last_available_timestamp()
start_time, end_time = conn.performance.get_timestamp_by_hour(
end_time=recent_time, hours_difference=hours)
metric_params['start_time'] = start_time
metric_params['end_time'] = end_time
# Gather metrics, but gracefully handle if they're not recent enough
try:
metrics = func_map[category]['stats'](**metric_params)
except PyU4V.utils.exception.VolumeBackendAPIException:
logger.info("Current metrics do not meet recency requirements")
break
logger.debug(metrics)
# Send them off to be processed and sent to Zabbix
process_perf_results(metrics, category)
# Port Level Stats (if they exist) follows the same pattern
# but not all directors have ports (EDS and IM for ex.)
try:
if port_cat in func_map:
ports = func_map[port_cat]['keys'](array_id=arrayid,
director_id=dir_id)
logger.debug(ports)
else:
ports = list()
except PyU4V.utils.exception.ResourceNotFoundException:
logger.debug(f"No ports found for dir: {dir_id} may be offline")
continue
for port in ports:
port_id = port['portId']
logger.info(f"Collecting metrics for {category}"
f" {dir_id} port {port_id}")
try:
metric_params['port_id'] = port_id
metrics = func_map[port_cat]['stats'](**metric_params)
except PyU4V.utils.exception.VolumeBackendAPIException:
logger.info("Metrics not read, recency not met")
continue
logger.debug(metrics)
process_perf_results(metrics, port_cat)
logger.info("Completed Director Performance Gathering")
def gather_perf(configpath, arrayid, category, hours=None):
""" Generalized non-Director performance gathering """
logger = logging.getLogger('discovery')
logger.info(f"Starting {category} Stats Collection ")
PyU4V.univmax_conn.file_path = configpath
conn = PyU4V.U4VConn()
# Map our categories to functions and what arguments map to responses
func_map = {'PortGroup':
{'keys': conn.performance.get_port_group_keys,
'stats': conn.performance.get_port_group_stats,
'args': {'port_group_id': 'portGroupId'}},
'SRP':
{'keys': conn.performance.get_storage_resource_pool_keys,
'stats': conn.performance.get_storage_resource_pool_stats,
'args': {'srp_id': 'srpId'}},
'StorageGroup':
{'keys': conn.performance.get_storage_group_keys,
'stats': conn.performance.get_storage_group_stats,
'args': {'storage_group_id': 'storageGroupId'}},
'DiskGroup':
{'keys': conn.performance.get_disk_group_keys,
'stats': conn.performance.get_disk_group_stats,
'args': {'disk_group_id': 'diskGroupId'}},
'Board':
{'keys': conn.performance.get_board_keys,
'stats': conn.performance.get_board_stats,
'args': {'board_id': 'boardId'}},
'BeEmulation':
{'keys': conn.performance.get_backend_emulation_keys,
'stats': conn.performance.get_backend_emulation_stats,
'args': {'emulation_id': 'beEmulationId'}},
'FeEmulation':
{'keys': conn.performance.get_frontend_emulation_keys,
'stats': conn.performance.get_frontend_emulation_stats,
'args': {'emulation_id': 'feEmulationId'}},
'EDSEmulation':
{'keys': conn.performance.get_eds_emulation_keys,
'stats': conn.performance.get_eds_emulation_stats,
'args': {'emulation_id': 'edsEmulationId'}},
'IMEmulation':
{'keys': conn.performance.get_im_emulation_keys,
'stats': conn.performance.get_im_emulation_stats,
'args': {'emulation_id': 'imEmulationId'}},
'RDFEmulation':
{'keys': conn.performance.get_rdf_emulation_keys,
'stats': conn.performance.get_rdf_emulation_stats,
'args': {'emulation_id': 'rdfEmulationId'}},
'Host':
{'keys': conn.performance.get_host_keys,
'stats': conn.performance.get_host_stats,
'args': {'host_id': 'hostId'}},
'Initiator':
{'keys': conn.performance.get_initiator_perf_keys,
'stats': conn.performance.get_initiator_stats,
'args': {'initiator_id': 'initiatorId'}},
'RDFS':
{'keys': conn.performance.get_rdfs_keys,
'stats': conn.performance.get_rdfs_stats,
'args': {'rdfs_group_id': 'rsGroupId'}},
'RDFA':
{'keys': conn.performance.get_rdfa_keys,
'stats': conn.performance.get_rdfa_stats,
'args': {'rdfa_group_id': 'raGroupId'}},
'ISCSITarget':
{'keys': conn.performance.get_iscsi_target_keys,
'stats': conn.performance.get_iscsi_target_stats,
'args': {'iscsi_target_id': 'iscsiTargetId'}},
'Array':
{'keys': conn.performance.get_array_keys,
'stats': conn.performance.get_array_stats,
'args': {}}
}
try:
if 'Array' not in category:
items = func_map[category]['keys'](array_id=arrayid)
else:
# Special case, array object can't have array_id passed
items = func_map[category]['keys']()
logger.debug(items)
except PyU4V.utils.exception.ResourceNotFoundException:
logger.info(f"No {category} found")
return
# this will be the kwargs passed to the stats function when called
metric_params = {'recency': metric_recency,
'metrics': 'KPI'}
# Handle where we want multiple hours of data
if hours:
recent_time = conn.performance.get_last_available_timestamp()
start_time, end_time = conn.performance.get_timestamp_by_hour(
end_time=recent_time, hours_difference=hours)
metric_params['start_time'] = start_time
metric_params['end_time'] = end_time
if 'Array' not in category:
metric_params['array_id'] = arrayid
for item in items:
# We need to dynamically update the dict we're using for kwargs
# to include the appropriate parameters for this category item
for m_key, i_key in func_map[category]['args'].items():
metric_params[m_key] = item[i_key]
logger.debug("Metric Parameters to be passed")
logger.debug(metric_params)
try:
metrics = func_map[category]['stats'](**metric_params)
logger.debug("Metrics returned")
logger.debug(metrics)
except PyU4V.utils.exception.VolumeBackendAPIException:
logger.info(f"Metrics not read for {category}, recency not met")
return
process_perf_results(metrics, category)
logger.info(f"Completed {category} Stats Collection")
def do_array_discovery(configpath, arrayid):
""" Perform a discovery of the array attached to U4V """
logger = logging.getLogger('discovery')
logger.info("Starting | |
"""
rotate = np.random.random() < self.rotate_ratio
if rotate:
angle = self.angle
for key in self.keys:
img = results[key].copy()
results[key] = self._rotate_img(img, angle)
else:
angle = 0
results['rotate'] = rotate
results['rotate_angle'] = angle
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'keys={self.keys}, '
repr_str += f'angle={self.angle}, '
repr_str += f'rotate_ratio={self.rotate_ratio}, '
repr_str += f'scale={self.scale}, '
repr_str += f'center={self.center}, '
repr_str += f'img_fill_val={self.img_fill_val}, '
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the images to align with network downsample factor for testing.
See `Reshape` for more explanation. `numpy.pad` is used for the pad
operation.
Required keys are the keys in attribute "keys", added or
modified keys are "test_trans" and the keys in attribute
"keys". All keys in "keys" should have the same shape. "test_trans" is used
to record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be padded.
ds_factor (int): Downsample factor of the network. The height and
weight will be padded to a multiple of ds_factor. Default: 1.
min_size (int | float | tuple[float], optional): Minumum size of the
output image. Default: None.
kwargs (option): any keyword arguments to be passed to `numpy.pad`.
"""
def __init__(self, keys, ds_factor=1, min_size=None, **kwargs):
if isinstance(min_size, (int, float)):
min_size = (min_size, min_size)
elif isinstance(min_size, tuple):
assert len(min_size) == 2, 'center with type tuple must have '\
f'2 elements. got {len(min_size)} elements.'
else:
assert min_size is None, 'min_size must be None or type int, '\
f'float or tuple, got type {type(min_size)}.'
self.keys = keys
self.ds_factor = ds_factor
self.min_size = min_size
self.kwargs = kwargs
def _get_padded_shape(self, h, w):
new_h = self.ds_factor * ((h - 1) // self.ds_factor + 1)
new_w = self.ds_factor * ((w - 1) // self.ds_factor + 1)
if self.min_size is not None:
min_h, min_w = self.min_size
if new_h < min_h:
new_h = self.ds_factor * ((min_h - 1) // self.ds_factor + 1)
if new_w < min_w:
new_w = self.ds_factor * ((min_w - 1) // self.ds_factor + 1)
return new_h, new_w
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
new_h, new_w = self._get_padded_shape(h, w)
pad_h = new_h - h
pad_w = new_w - w
if new_h != h or new_w != w:
pad_width = ((0, pad_h), (0, pad_w), (0, 0))
for key in self.keys:
results[key] = np.pad(results[key],
pad_width[:results[key].ndim],
**self.kwargs)
results['pad'] = (pad_h, pad_w)
return results
def __repr__(self):
repr_str = self.__class__.__name__
kwargs_str = ', '.join(
[f'{key}={val}' for key, val in self.kwargs.items()])
repr_str += (f'(keys={self.keys}, ds_factor={self.ds_factor}, '
f'{kwargs_str})')
return repr_str
@PIPELINES.register_module()
class RandomAffine:
"""Random affine transformation of input images keeping center invariant
This class is adopted from
https://github.com/pytorch/vision/blob/v0.5.0/torchvision/transforms/transforms.py#L1015
It should be noted that in
https://github.com/Yaoyi-Li/GCA-Matting/blob/master/dataloader/data_generator.py#L70
random flip is added. See explanation of `flip_ratio` below.
Required keys are the keys in attribute "keys", modified keys
are keys in attribute "keys".
Args:
keys (Sequence[str]): The images to be affined.
degrees (sequence or float or int): Range of degrees to select from. If it
is a float instead of a tuple like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): Tuple of maximum absolute fraction for
horizontal and vertical translations. For example translate=(a, b),
then horizontal shift is randomly sampled in the range
-img_width * a < dx < img_width * a and vertical shift is randomly
sampled in the range -img_height * b < dy < img_height * b.
Default: None.
scale (tuple, optional): Scaling factor interval, e.g (a, b), then
scale is randomly sampled from the range a <= scale <= b.
Default: None.
shear (sequence or float or int, optional): Range of shear degrees to
select from. If shear is a float, a shear parallel to the x axis
and a shear parallel to the y axis in the range (-shear, +shear)
will be applied. Else if shear is a tuple of 2 values, a x-axis
shear and a y-axis shear in (shear[0], shear[1]) will be applied.
Default: None.
resample ('nearest', 'bilinear', 'bicubic'}, optional):
An optional resampling filters. Default: 'nearest'
flip_ratio (float, optional): Probability of the image being flipped.
The flips in horizontal direction and vertical direction are
independent. The image may be flipped in both directions.
Default: None.
"""
_str_to_cv2_interpolation = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC
}
def __init__(self,
keys,
degrees,
translate=None,
scale=None,
shear=None,
resample='nearest',
flip_ratio=None):
self.keys = keys
if isinstance(degrees, numbers.Number):
assert degrees >= 0, ('If degrees is a single number, '
'it must be positive.')
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
'degrees should be a list or tuple and it must be of length 2.'
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
'translate should be a list or tuple and it must be of length 2.'
for t in translate:
assert 0.0 <= t <= 1.0, ('translation values should be '
'between 0 and 1.')
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
'scale should be a list or tuple and it must be of length 2.'
for s in scale:
assert s > 0, 'scale values should be positive.'
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
assert shear >= 0, ('If shear is a single number, '
'it must be positive.')
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and \
(len(shear) == 2 or len(shear) == 4), \
'shear should be a list or tuple and it must be of length 2 or 4.'
# X-Axis shear with [min, max]
if len(shear) == 2:
self.shear = [shear[0], shear[1], 0., 0.]
elif len(shear) == 4:
self.shear = [s for s in shear]
else:
self.shear = shear
if flip_ratio is not None:
assert isinstance(flip_ratio,
float), 'flip_ratio should be a float.'
self.flip_ratio = flip_ratio
else:
self.flip_ratio = 0
assert resample in self._str_to_cv2_interpolation, \
'Resample options should be one of "nearest", "bilinear" and "bicubic"'
self.resample = resample
@staticmethod
def _get_params(degrees, translate, scale_ranges, shears, flip_ratio,
img_size):
"""Get parameters for affine transformation.
Returns:
paras (tuple): Params to be passed to the affine transformation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (np.random.uniform(scale_ranges[0], scale_ranges[1]),
np.random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
if len(shears) == 2:
shear = [np.random.uniform(shears[0], shears[1]), 0.]
elif len(shears) == 4:
shear = [np.random.uniform(shears[0], shears[1]),
np.random.uniform(shears[2], shears[3])]
else:
shear = 0.0
# Because `flip` is used as a multiplier in line 604 and 605,
# so -1 stands for flip and 1 stands for no flip. Thus `flip`
# should be an 'inverse' flag as the result of the comparison.
flip = (np.random.rand(2) > flip_ratio).astype(np.int) * 2 - 1
return angle, translations, scale, shear, flip
@staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear,
flip):
"""Helper method to compute inverse matrix for affine transformation.
As it is explained in PIL.Image.rotate, we need compute INVERSE of
affine transformation matrix: M = T * C * RSS * C^-1 where
T is translation matrix:
[1, 0, tx | 0, 1, ty | 0, 0, 1];
C is translation matrix to keep center:
[1, 0, cx | 0, 1, cy | 0, 0, 1];
RSS is rotation with scale and shear matrix.
It is different from the original function in torchvision.
1. The order are changed to flip -> scale -> rotation -> shear.
2. x and y have different scale factors.
RSS(shear, a, scale, f) =
[ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
[ sin(a)*scale_x*f cos(a)*scale_y 0]
[ 0 0 1]
Thus, the | |
<gh_stars>10-100
""" Data objects in group "Zone Airflow"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class ZoneInfiltrationDesignFlowRate(DataObject):
""" Corresponds to IDD object `ZoneInfiltration:DesignFlowRate`
Infiltration is specified as a design level which is modified by a Schedule fraction, temperature difference and wind speed:
Infiltration=Idesign * FSchedule * (A + B*|(Tzone-Todb)| + C*WindSpd + D * WindSpd**2)
If you use a ZoneList in the Zone or ZoneList name field then this definition applies
to all the zones in the ZoneList.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'zone or zonelist name',
{'name': u'Zone or ZoneList Name',
'pyname': u'zone_or_zonelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'schedule name',
{'name': u'Schedule Name',
'pyname': u'schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'design flow rate calculation method',
{'name': u'Design Flow Rate Calculation Method',
'pyname': u'design_flow_rate_calculation_method',
'default': u'Flow/Zone',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Flow/Zone',
u'Flow/Area',
u'Flow/ExteriorArea',
u'Flow/ExteriorWallArea',
u'AirChanges/Hour'],
'autocalculatable': False,
'type': 'alpha'}),
(u'design flow rate',
{'name': u'Design Flow Rate',
'pyname': u'design_flow_rate',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'flow per zone floor area',
{'name': u'Flow per Zone Floor Area',
'pyname': u'flow_per_zone_floor_area',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s-m2'}),
(u'flow per exterior surface area',
{'name': u'Flow per Exterior Surface Area',
'pyname': u'flow_per_exterior_surface_area',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s-m2'}),
(u'air changes per hour',
{'name': u'Air Changes per Hour',
'pyname': u'air_changes_per_hour',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'1/hr'}),
(u'constant term coefficient',
{'name': u'Constant Term Coefficient',
'pyname': u'constant_term_coefficient',
'default': 1.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'temperature term coefficient',
{'name': u'Temperature Term Coefficient',
'pyname': u'temperature_term_coefficient',
'default': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'velocity term coefficient',
{'name': u'Velocity Term Coefficient',
'pyname': u'velocity_term_coefficient',
'default': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'velocity squared term coefficient',
{'name': u'Velocity Squared Term Coefficient',
'pyname': u'velocity_squared_term_coefficient',
'default': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Zone Airflow',
'min-fields': 12,
'name': u'ZoneInfiltration:DesignFlowRate',
'pyname': u'ZoneInfiltrationDesignFlowRate',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def zone_or_zonelist_name(self):
"""field `Zone or ZoneList Name`
Args:
value (str): value for IDD Field `Zone or ZoneList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_or_zonelist_name` or None if not set
"""
return self["Zone or ZoneList Name"]
@zone_or_zonelist_name.setter
def zone_or_zonelist_name(self, value=None):
"""Corresponds to IDD field `Zone or ZoneList Name`"""
self["Zone or ZoneList Name"] = value
@property
def schedule_name(self):
"""field `Schedule Name`
Args:
value (str): value for IDD Field `Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `schedule_name` or None if not set
"""
return self["Schedule Name"]
@schedule_name.setter
def schedule_name(self, value=None):
"""Corresponds to IDD field `Schedule Name`"""
self["Schedule Name"] = value
@property
def design_flow_rate_calculation_method(self):
"""field `Design Flow Rate Calculation Method`
| The entered calculation method is used to create the maximum amount of infiltration
| for this set of attributes
| Choices: Flow/Zone => Design Flow Rate -- simply enter Design Flow Rate
| Flow/Area => Flow per Zone Floor Area - Value * Floor Area (zone) = Design Flow Rate
| Flow/ExteriorArea => Flow per Exterior Surface Area - Value * Exterior Surface Area (zone) = Design Flow Rate
| Flow/ExteriorWallArea => Flow per Exterior Surface Area - Value * Exterior Wall Surface Area (zone) = Design Flow Rate
| AirChanges/Hour => Air Changes per Hour - Value * Floor Volume (zone) adjusted for m3/s = Design Volume Flow Rate
| "Idesign" in Equation is the result.
| Default value: Flow/Zone
Args:
value (str): value for IDD Field `Design Flow Rate Calculation Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `design_flow_rate_calculation_method` or None if not set
"""
return self["Design Flow Rate Calculation Method"]
@design_flow_rate_calculation_method.setter
def design_flow_rate_calculation_method(self, value="Flow/Zone"):
"""Corresponds to IDD field `Design Flow Rate Calculation Method`"""
self["Design Flow Rate Calculation Method"] = value
@property
def design_flow_rate(self):
"""field `Design Flow Rate`
| Units: m3/s
| IP-Units: ft3/min
Args:
value (float): value for IDD Field `Design Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `design_flow_rate` or None if not set
"""
return self["Design Flow Rate"]
@design_flow_rate.setter
def design_flow_rate(self, value=None):
"""Corresponds to IDD field `Design Flow Rate`"""
self["Design Flow Rate"] = value
@property
def flow_per_zone_floor_area(self):
"""field `Flow per Zone Floor Area`
| Units: m3/s-m2
Args:
value (float): value for IDD Field `Flow per Zone Floor Area`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `flow_per_zone_floor_area` or None if not set
"""
return self["Flow per Zone Floor Area"]
@flow_per_zone_floor_area.setter
def flow_per_zone_floor_area(self, value=None):
"""Corresponds to IDD field `Flow per Zone Floor Area`"""
self["Flow per Zone Floor Area"] = value
@property
def flow_per_exterior_surface_area(self):
"""field `Flow per Exterior Surface Area`
| use key Flow/ExteriorArea for all exterior surface area
| use key Flow/ExteriorWallArea to include only exterior wall area
| Units: m3/s-m2
Args:
value (float): value for IDD Field `Flow per Exterior Surface Area`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `flow_per_exterior_surface_area` or None if not set
"""
return self["Flow per Exterior Surface Area"]
@flow_per_exterior_surface_area.setter
def flow_per_exterior_surface_area(self, value=None):
"""Corresponds to IDD field `Flow per Exterior Surface Area`"""
self["Flow per Exterior Surface Area"] = value
@property
def air_changes_per_hour(self):
"""field `Air Changes per Hour`
| Units: 1/hr
Args:
value (float): value for IDD Field `Air Changes per Hour`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `air_changes_per_hour` or None if not set
"""
return self["Air Changes per Hour"]
@air_changes_per_hour.setter
def air_changes_per_hour(self, value=None):
"""Corresponds to IDD field `Air Changes per Hour`"""
self["Air Changes per Hour"] = value
@property
def constant_term_coefficient(self):
"""field `Constant Term Coefficient`
| "A" in Equation
| Default value: 1.0
Args:
value (float): value for IDD Field `Constant Term Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `constant_term_coefficient` or None if not set
"""
return self["Constant Term Coefficient"]
@constant_term_coefficient.setter
def constant_term_coefficient(self, value=1.0):
"""Corresponds to IDD field `Constant Term Coefficient`"""
self["Constant Term Coefficient"] = value
@property
def temperature_term_coefficient(self):
"""field `Temperature Term Coefficient`
| "B" in Equation
Args:
value (float): value for IDD Field `Temperature Term Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `temperature_term_coefficient` or None if not set
"""
return self["Temperature Term Coefficient"]
@temperature_term_coefficient.setter
def temperature_term_coefficient(self, value=None):
"""Corresponds to IDD field `Temperature Term Coefficient`"""
self["Temperature Term Coefficient"] = value
@property
def velocity_term_coefficient(self):
"""field `Velocity Term Coefficient`
| "C" in Equation
Args:
value (float): value for IDD Field `Velocity Term Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `velocity_term_coefficient` or None if not set
"""
return self["Velocity Term Coefficient"]
@velocity_term_coefficient.setter
def velocity_term_coefficient(self, value=None):
"""Corresponds to IDD field `Velocity Term Coefficient`"""
self["Velocity Term Coefficient"] = value
@property
def velocity_squared_term_coefficient(self):
"""field `Velocity Squared Term Coefficient`
| "D" in Equation
Args:
value (float): value for IDD Field `Velocity Squared Term Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `velocity_squared_term_coefficient` or None if not set
"""
return self["Velocity Squared Term Coefficient"]
@velocity_squared_term_coefficient.setter
def velocity_squared_term_coefficient(self, value=None):
"""Corresponds to IDD field `Velocity Squared Term Coefficient`"""
self["Velocity Squared Term Coefficient"] = value
class ZoneInfiltrationEffectiveLeakageArea(DataObject):
""" Corresponds to IDD object `ZoneInfiltration:EffectiveLeakageArea`
Infiltration is specified as effective leakage area at 4 Pa, schedule fraction, stack and wind coefficients, and
is a function of temperature difference and | |
kwargs['labeler_algo'] = 'densenet'
if labeler_weight_filepath is None:
# kwargs['labeler_weight_filepath'] = 'zebra_v1'
# kwargs['labeler_weight_filepath'] = 'seaturtle'
# kwargs['labeler_weight_filepath'] = 'giraffe_v1'
# kwargs['labeler_weight_filepath'] = 'lynx_v3'
# kwargs['labeler_weight_filepath'] = 'seaturtle_v3'
# kwargs['labeler_weight_filepath'] = 'jaguar_v3'
# kwargs['labeler_weight_filepath'] = 'hendrik_dorsal_v2'
# kwargs['labeler_weight_filepath'] = 'spotted_skunk_v0'
# kwargs['labeler_weight_filepath'] = 'nassau_grouper_v0'
# kwargs['labeler_weight_filepath'] = 'spotted_dolphin_v0'
# kwargs['labeler_weight_filepath'] = 'seadragon_v1'
kwargs['labeler_weight_filepath'] = 'seadragon_v2'
else:
kwargs['labeler_weight_filepath'] = labeler_weight_filepath
kwargs['labeler_axis_aligned'] = use_axis_aligned_chips
label_dict = labeler_tp_tn_fp_fn(ibs, category_list, species_mapping=species_mapping, viewpoint_mapping=viewpoint_mapping,
test_gid_set=test_gid_set, **kwargs)
if config_list is None:
config_list = [
# {'label': 'Giraffe', 'category_list': None},
# {'label': 'Masai Giraffe', 'category_list': ['giraffe_masai']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Lynx', 'category_list': ['lynx_pardinus']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Sea Turtle Head', 'category_list': ['turtle_sea+head']},
# {'label': 'Manta', 'category_list': ['manta_ray_giant']},
# {'label': 'Jaguar', 'category_list': ['jaguar']},
# {'label': 'Dorsal Fin', 'category_list': ['dolphin_bottlenose_fin']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Whale Fluke', 'category_list': ['whale_fluke']},
# {'label': 'Grevy\'s Zebra', 'category_list': ['zebra_grevys']},
# {'label': 'Plains Zebra', 'category_list': ['zebra_plains']},
# {'label': 'Spotted Skunk', 'category_list': ['skunk_spotted']},
# {'label': 'Nassau Grouper', 'category_list': ['grouper_nassau']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
{'label': 'Weedy SD ', 'category_list': ['seadragon_weedy']},
{'label': 'Weedy Head', 'category_list': ['seadragon_weedy+head']},
{'label': 'Leafy SD ', 'category_list': ['seadragon_leafy']},
{'label': 'Leafy Head', 'category_list': ['seadragon_leafy+head']},
]
color_list = [(0.0, 0.0, 0.0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
axes_ = plt.subplot(131)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
area_list = []
for color, config in zip(color_list, config_list):
ret = labeler_precision_recall_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
area = ret[0]
area_list.append(area)
plt.title('Precision-Recall Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(132)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
for color, config in zip(color_list, config_list):
labeler_roc_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
plt.title('ROC Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
key_list = sorted(label_dict.keys())
fuzzy = fuzzy_dict is not None
if not fuzzy:
fuzzy_dict = {}
for index1, label1 in enumerate(key_list):
if label1 == 'ignore':
fuzzy_list = []
else:
species, viewpoint = label1.strip().split(':')
fuzzy_list = []
for index2, label2 in enumerate(key_list):
if species in label2:
fuzzy_list.append(index2)
fuzzy_dict[index1] = set(fuzzy_list)
axes_ = plt.subplot(133)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, fuzzy_rate = labeler_confusion_matrix_algo_plot(
ibs,
key_list,
species_mapping=species_mapping,
viewpoint_mapping=viewpoint_mapping,
category_mapping=category_mapping,
fig_=fig_,
axes_=axes_,
fuzzy_dict=fuzzy_dict,
test_gid_set=test_gid_set,
**kwargs
)
if fuzzy:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Fuzzy = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
else:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Species = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
# area_list_ = area_list[1:]
area_list_ = area_list
mAP = sum(area_list_) / len(area_list_)
args = (mAP * 100.0, )
plt.title('Confusion Matrix\nmAP = %0.02f' % args, y=1.19)
fig_filename = 'labeler-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def canonical_precision_recall_algo(ibs, species, **kwargs):
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list)
def canonical_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def canonical_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def canonical_confusion_matrix_algo_plot(ibs, label, color, conf, species, output_cases=False, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'canonical-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
positive_path = join(output_path, 'positive')
negative_path = join(output_path, 'negative')
ut.delete(output_path)
ut.ensuredir(output_path)
ut.ensuredir(positive_path)
ut.ensuredir(negative_path)
config = {
'dim_size': (192, 192),
'resize_dim': 'wh',
}
chip_list = ibs.depc_annot.get_property('chips', test_aid_set, 'img', config=config)
zipped = zip(test_aid_set, chip_list, label_list, prediction_list)
for aid, chip, label, prediction in zipped:
if label == prediction:
continue
# Get path
image_path = positive_path if label == 'positive' else negative_path
image_filename = 'hardidx_%d_pred_%s_case_fail.jpg' % (aid, prediction, )
image_filepath = join(image_path, image_filename)
# Save path
cv2.imwrite(image_filepath, chip)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, **kwargs)
@register_ibs_method
def canonical_precision_recall_algo_display(ibs, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400)
config_list = [
{'label': 'CA V1 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v1', 'species': 'zebra_grevys'}, # SMALLER DATASET
{'label': 'CA V2 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v2', 'species': 'zebra_grevys'}, # BROKEN L/R AUGMENTATION
{'label': 'CA V3 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v3', 'species': 'zebra_grevys'}, # LARGER DATASET, TOO HARSH AUGMENTATION
{'label': 'CA V4 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4', 'species': 'zebra_grevys'}, # BETTER AUGMENTATION
# {'label': 'CA V4 Model 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:0', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:1', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:2', 'species': 'zebra_grevys'},
]
color_list = []
# color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_precision_recall_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_roc_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
output_cases=True, **best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'canonical-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def _canonical_get_boxes(ibs, gid_list, species):
from ibeis.web.appfuncs import CANONICAL_PART_TYPE
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
aid_list = ibs.filter_annotation_set(aid_list, species=species)
flag_list = ibs.get_annot_canonical(aid_list)
part_rowids_list = ibs.get_annot_part_rowids(aid_list)
part_types_list = list(map(ibs.get_part_types, part_rowids_list))
aid_set = []
bbox_set = []
zipped = zip(aid_list, flag_list, part_rowids_list, part_types_list)
for aid, flag, part_rowid_list, part_type_list in zipped:
part_rowid_ = None
if flag:
for part_rowid, part_type in zip(part_rowid_list, part_type_list):
if part_type == CANONICAL_PART_TYPE:
assert part_rowid_ is None, 'Cannot have multiple CA for one image'
part_rowid_ = part_rowid
if part_rowid_ is not None:
axtl, aytl, aw, ah = ibs.get_annot_bboxes(aid)
axbr, aybr = axtl + aw, aytl + ah
pxtl, pytl, pw, ph = ibs.get_part_bboxes(part_rowid_)
pxbr, pybr = pxtl + pw, pytl + ph
x0 = pxtl - axtl
y0 = pytl - aytl
x1 = axbr | |
<reponame>haisenzhao/CarpentryCompiler
# -*- coding: utf-8 -*-
#!/usr/bin/python
import xml.etree.ElementTree as ET
import sys
import os
import re
import itertools
import shutil
# static variables
uuid_rgxp = r"[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}"
statENodes = 0
def mk_sexp(assigns):
sequify = []
if len(assigns) >= 3:
# print "3 assigns"
# for a in assigns:
# print a
# print "\n"
for x in range(len(assigns) - 2):
sequify.append("(Seq " + assigns[x])
final_seq = "(Seq " + assigns[len(assigns) -
2] + assigns[len(assigns) - 1]
num_close_paren = len(assigns) - 1
close_parens = ')' * num_close_paren
return (''.join(sequify) + final_seq + close_parens)
elif len(assigns) == 2:
#print("2 assigns")
final_seq = "(Seq " + assigns[0] + assigns[1] + ")"
return final_seq
elif len(assigns) == 1:
#print("1 assign")
return assigns[0]
else:
return "(Empty)"
def chopsaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
# TODO: this sometimes is Lumber, sometimes is Var
lumber = "( Lumber " + ids[0] + ")"
face = "( Face " + ids[1] + ")"
edge = "( Edge " + ids[2] + ")"
rsplt = rhs.split(',')
ang1 = "(Angle (Float " + rsplt[3].split('(')[1] + " ))"
ang2 = "(Angle (Float " + rsplt[4] + " ))"
lnth = "(Length (Float " + rsplt[5].split(')')[0] + " ))"
ref = "(Refc " + ang1 + ang2 + lnth + ")"
# TODO: everything is stackable for now
stk = "(Bool true)"
height = "(Height (Float " + rsplt[6].split(')')[0] + " ))"
rhs = "(Chopsaw " + lumber + face + edge + ref + stk + height + ")"
assign = lhs + rhs + " )"
return assign
def mk_xy(p):
px = "(Float " + p.split('/')[0] + " )"
py = "(Float " + p.split('/')[1] + " )"
pt = "(Tup " + px + py + " )"
return pt
''' The version using paths as references
def mk_path(ps, pathType):
path = []
p1 = ps[0].split('(')[1]
path.append(mk_xy(p1))
# first and last are taken care of above
for i in range(1, len(ps) - 2):
path.append(mk_xy(ps[i]))
pn = ps[len(ps) - 1].split(')')[0]
path.append(mk_xy(pn))
spath = "(" + pathType + " "
for i in range(0, len(path)):
spath = spath + path[i]
return (spath + ")")
def bandsaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
lumber = "( Lumber " + ids[0] + ")"
rsplt = rhs.split(',')
height = "(Height (Float "+rsplt[len(rsplt)-1].split(')')[0]+"))"
rsplt = rhs.split('Ref((')[0]
rsplt = rsplt.split(',')
ps = []
# last two list elements will always be about ref.
for i in range(1, len(rsplt) - 1):
ps.append(rsplt[i].strip(')'))
path = mk_path(ps, "Refb")
stk = "(Bool true)"
rhs = "(Bandsaw " + lumber + path + stk + height + ")"
assign = lhs + rhs + " )"
return assign
def jigsaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
lumber = "( Lumber " + ids[0] + ")"
rsplt = rhs.split(',')
height = "(Height (Float "+rsplt[len(rsplt)-1].split(')')[0]+"))"
rsplt = rhs.split('Ref((')[0]
rsplt = rsplt.split(',')
ps = []
# last two list elements will always be about ref.
for i in range(1, len(rsplt) - 1):
ps.append(rsplt[i].strip(')'))
path = mk_path(ps, "Refj")
stk = "(Bool true)"
rhs = "(Jigsaw " + lumber + path + stk + height + ")"
assign = lhs + rhs + " )"
return assign
'''
def mk_ref(x, y):
px = "(Float " + x + " )"
py = "(Float " + y + " )"
pt = "(Tup " + px + py + " )"
return pt
def mk_path(ps, pathType):
path = []
p1 = ps[0].split('(')[1]
path.append(mk_xy(p1))
# first and last are taken care of above
for i in range(1, len(ps) - 2):
path.append(mk_xy(ps[i]))
pn = ps[len(ps) - 1].split(')')[0]
path.append(mk_xy(pn))
spath = "(" + pathType + " "
for i in range(0, len(path)):
spath = spath + path[i]
return (spath + ")")
def mk_refs(ps, refType):
path = []
for i in range(0, len(ps)):
tmpPs = ps[i].split(',')
path.append(mk_ref(tmpPs[1], tmpPs[3]))
spath = "(" + refType + " "
for i in range(0, len(path)):
spath = spath + path[i]
return (spath + ")")
def bandsaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
lumber = "( Lumber " + ids[0] + ")"
rsplt = rhs.split(',')
height = "(Height (Float "+rsplt[len(rsplt)-1].split(')')[0]+"))"
path = rhs.split('Ref((')[1].split('))')[0].split('), (')
path = mk_refs(path, "Refb")
stk = "(Bool true)"
rhs = "(Bandsaw " + lumber + path + stk + height + ")"
assign = lhs + rhs + " )"
return assign
def jigsaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
lumber = "( Lumber " + ids[0] + ")"
rsplt = rhs.split(',')
height = "(Height (Float "+rsplt[len(rsplt)-1].split(')')[0]+"))"
path = rhs.split('Ref((')[1].split('))')[0].split('), (')
path = mk_refs(path, "Refj")
stk = "(Bool true)"
rhs = "(Jigsaw " + lumber + path + stk + height + ")"
assign = lhs + rhs + " )"
return assign
def drill(lhs, rhs):
return "TODO_DRILL"
def tracksaw(lhs, rhs):
ids = re.findall(uuid_rgxp, rhs)
# TODO: this sometimes is Lumber, sometimes is Var
lumber = "( Lumber " + ids[0] + ")"
face = "( Face " + ids[1] + ")"
edge = "( Edge " + ids[2] + ")"
rsplt = rhs.split(',')
ang1 = "(Angle (Float " + rsplt[3].split('(')[1] + " ))"
ang2 = "(Angle (Float " + rsplt[4] + " ))"
lnth = "(Length (Float " + rsplt[5].split(')')[0] + " ))"
ref = "(Refk " + ang1 + ang2 + lnth + ")"
# TODO: everything is stackable for now
stk = "(Bool true)"
height = "(Height (Float " + rsplt[6].split(')')[0] + " ))"
rhs = "(Tracksaw " + lumber + face + edge + ref + stk + height + ")"
assign = lhs + rhs + " )"
return assign
def parse_xml(i):
ip = i
tree = ET.parse(ip)
root = tree.getroot()
equiv_progs = []
original_progs = []
with open(ip, "r") as in_f:
for prog in root.findall('Program'):
equiv_assigns = []
equiv_originals = []
for equiv in prog.findall('equivalent'):
lines = equiv.findall('line')
line_assigns = []
line_originals = []
for line in lines:
if(line.text[0:6] == 'return'):
continue
tool_split = line.text.split("=")
lhs1 = tool_split[0]
lhs_lst = lhs1.split(',')
varbs = []
if (len(lhs_lst) > 1):
fst = "(Var " + lhs_lst[0].split('(')[1] + ")"
lst = "(Var " + \
lhs_lst[len(lhs_lst) - 1].split(')')[0] + ")"
varbs.append(fst)
for i in range(1, len(lhs_lst) - 1):
varbs.append("(Var " + lhs_lst[i] + ")")
varbs.append(lst)
lhs = "(Assign ( Tup "
for v in varbs:
lhs = lhs + v
lhs = lhs + ")"
else:
#print("only one")
fst = "(Var " + lhs_lst[0].split('(')[1] + ")"
varbs.append(fst)
lhs = "(Assign ( Tup "
for v in varbs:
lhs = lhs + v
lhs = lhs # + ")"
# print(lhs)
rhs = tool_split[1]
if "Chopsaw" in rhs:
assign = chopsaw(lhs, rhs)
line_assigns.append(assign)
line_originals.append(line.text)
# print(assign)
# print(line.text)
elif "Bandsaw" in rhs:
assign = bandsaw(lhs, rhs)
line_assigns.append(assign)
line_originals.append(line.text)
elif "Jigsaw" in rhs:
assign = jigsaw(lhs, rhs)
line_assigns.append(assign)
line_originals.append(line.text)
elif "Tracksaw" in rhs:
assign = tracksaw(lhs, rhs)
line_assigns.append(assign)
line_originals.append(line.text)
elif "Drill" in rhs:
assign = drill(lhs, rhs)
line_assigns.append(assign)
line_originals.append(line.text)
if len(line_assigns) != 0:
equiv_assigns.append(line_assigns)
equiv_originals.append(line_originals)
print(line_assigns[0])
exit(-1)
# for eqp in itertools.product(*equiv_assigns):
# equiv_progs.append(mk_sexp(eqp))
# for eqp in itertools.product(*equiv_originals):
# original_progs.append(''.join(eqp))
return equiv_progs, original_progs
def process_xml(i, o):
ip = i
tree = ET.parse(ip)
root = tree.getroot()
parsed_file = []
original_file = []
original_cutLineId_file = []
prog_id = []
prog_wlc = []
prog_arr = []
for prog in root.findall('Program'):
parsed_prog = []
original_prog = []
original_cutLineId_prog = []
for equiv in prog.findall('equivalent'):
lines = equiv.findall('line')
parsed_equiv = []
original_equiv = []
prog_equiv = []
for line in lines:
if(line.text[0:6] == 'return'):
continue
assign = ''
tool_split = line.text.split("=")
lhs1 = tool_split[0]
lhs_lst = lhs1.split(',')
varbs = []
if (len(lhs_lst) > 1):
fst = "(Var " + lhs_lst[0].split('(')[1] + ")"
lst = "(Var " + \
lhs_lst[len(lhs_lst) - 1].split(')')[0] + ")"
varbs.append(fst)
for i in range(1, len(lhs_lst) - 1):
varbs.append("(Var " + lhs_lst[i] + ")")
varbs.append(lst)
lhs = "(Assign ( Tup "
for v in varbs:
lhs = lhs + v
lhs = lhs + ")"
else:
#print("only one")
fst = "(Var " + lhs_lst[0].split('(')[1] + ")"
varbs.append(fst)
lhs = "(Assign ( Tup "
| |
"""
This module contains report class for classification estimators. Report includes:
* features scatter plots, distributions, correlations
* learning curve
* roc curve
* efficiencies
* metric vs cut
* feature importance
* feature importance by shuffling the feature column
All methods return objects, which have `plot` method (details see in :class:`rep.plotting`),
these objects contain raw information about things to be plotted.
"""
from __future__ import division, print_function, absolute_import
from itertools import islice
from collections import OrderedDict, defaultdict
import itertools
import numpy
from .. import utils
from .. import plotting
from ._base import AbstractReport
from .metrics import OptimalMetric, LogLoss
from ..estimators.interface import Classifier
from ..utils import get_columns_dict
__author__ = '<NAME>, <NAME>'
BAR_TYPES = {'error_bar', 'bar'}
class ClassificationReport(AbstractReport):
"""
Test estimators on any data. Supports ROC curve, prediction distribution, features information
(correlation matrix, distribution, scatter plots for pairs of features),
efficiencies for thresholds (evaluate flatness of predictions for important feature),
correlation with prediction for necessary feature, any metrics of quality.
Parameters:
-----------
:param classifiers: estimators
:type classifiers: dict[str, Classifier]
:param LabeledDataStorage lds: data
"""
def __init__(self, classifiers, lds):
for name, classifier in classifiers.items():
assert isinstance(classifier, Classifier), "Object {} doesn't implement interface".format(name)
AbstractReport.__init__(self, lds=lds, estimators=classifiers)
def _predict(self, estimator, X):
return estimator.predict_proba(X)
@staticmethod
def _check_labels(labels_dict, class_labels):
""" Normalizes the names for labels.
:param labels_dict: dict(label -> name) or None,
if None, the classes will be named 0: bck and 1: signal
:param class_labels: array of shape [n_samples] with labels of events,
used here to define the set of used labels.
"""
labels_dict_init = OrderedDict()
all_classes = set(class_labels)
if labels_dict is None:
labels_dict_init[0] = 'bck'
labels_dict_init[1] = 'signal'
else:
for key, value in labels_dict.items():
if key in all_classes:
labels_dict_init[key] = value
assert set(labels_dict_init.keys()).issubset(all_classes), \
'Labels must be a subset of {}, but {}'.format(all_classes, list(labels_dict_init.keys()))
return labels_dict_init
def features_pdf(self, features=None, mask=None, bins=30, ignored_sideband=0.0, labels_dict=None, grid_columns=2):
"""
Features distribution (with errors)
:param features: using features (if None then use classifier's features)
:type features: None or list[str]
:param mask: mask for data, which will be used
:type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame)
:param bins: count of bins or array with boarders
:type bins: int or array-like
:param labels_dict: label -- name for class label
if None then {0: 'bck', '1': 'signal'}
:type labels_dict: None or OrderedDict(int: str)
:param int grid_columns: count of columns in grid
:param float ignored_sideband: float from (0, 1), part of events ignored from the left and from the right
:rtype: plotting.GridPlot
"""
features = self.common_features if features is None else features
pdf = defaultdict(OrderedDict)
_, df, class_labels, weight = self._apply_mask(mask, self._get_features(features), self.target, self.weight)
labels_dict = self._check_labels(labels_dict, class_labels)
pdf_plots = []
for feature in df.columns:
for label, name in labels_dict.items():
pdf[feature][name] = \
utils.calc_hist_with_errors(df[feature][class_labels == label].values,
weight[class_labels == label], bins, ignored_sideband=ignored_sideband)
plot_fig = plotting.ErrorPlot(pdf[feature])
plot_fig.xlabel = feature
plot_fig.ylabel = 'Normed event counts'
plot_fig.figsize = (8, 6)
pdf_plots.append(plot_fig)
return plotting.GridPlot(grid_columns, *pdf_plots)
def features_correlation_matrix_by_class(self, features=None, mask=None, tick_labels=None, vmin=-1, vmax=1,
labels_dict=None, grid_columns=2):
"""
Correlation between features (built separately for each class)
:param features: using features (if None then use classifier's features)
:type features: None or list[str]
:param mask: mask for data, which will be used
:type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame)
:param labels_dict: label -- name for class label
if None then {0: 'bck', '1': 'signal'}
:type labels_dict: None or OrderedDict(int: str)
:param tick_labels: names for features in matrix
:type tick_labels: None or array-like
:param int vmin: min of value for min color
:param int vmax: max of value for max color
:param int grid_columns: count of columns in grid
:rtype: plotting.GridPlot
"""
features = self.common_features if features is None else features
_, df, class_labels = self._apply_mask(mask, self._get_features(features), self.target)
features_names = list(df.columns)
if tick_labels is None:
tick_labels = features_names
labels_dict = self._check_labels(labels_dict, class_labels)
correlation_plots = []
color_map = itertools.cycle(['Reds', 'Blues', 'Oranges'])
for label, name in labels_dict.items():
plot_corr = plotting.ColorMap(
utils.calc_feature_correlation_matrix(df[features_names][class_labels == label]),
labels=tick_labels, vmin=vmin, vmax=vmax, cmap=next(color_map))
plot_corr.title = 'Correlation for %s events' % name
plot_corr.fontsize = 10
plot_corr.figsize = (len(features) // 5 + 2, len(features) // 5)
correlation_plots.append(plot_corr)
return plotting.GridPlot(grid_columns, *correlation_plots)
def scatter(self, correlation_pairs, mask=None, marker_size=20, alpha=0.1, labels_dict=None, grid_columns=2):
"""
Correlation between pairs of features
:param list[tuple] correlation_pairs: pairs of features along which scatter plot will be build.
:param mask: mask for data, which will be used
:type mask: None or array-like or str or function(pandas.DataFrame)
:param int marker_size: size of marker for each event on the plot
:param float alpha: blending parameter for scatter
:param labels_dict: label -- name for class label
if None then {0: 'bck', '1': 'signal'}
:type labels_dict: None or OrderedDict(int: str)
:param int grid_columns: count of columns in grid
:rtype: plotting.GridPlot
"""
features = list(set(itertools.chain.from_iterable(correlation_pairs)))
_, df, class_labels = self._apply_mask(mask, self._get_features(features), self.target)
labels_dict = self._check_labels(labels_dict, class_labels)
correlation_plots = []
corr_pairs = OrderedDict()
for feature1_c, feature2_c in correlation_pairs:
feature1, feature2 = list(get_columns_dict([feature1_c, feature2_c]).keys())
corr_pairs[(feature1, feature2)] = OrderedDict()
for label, name in labels_dict.items():
corr_pairs[(feature1, feature2)][name] = (df[feature1][class_labels == label].values,
df[feature2][class_labels == label].values)
plot_fig = plotting.ScatterPlot(corr_pairs[(feature1, feature2)], alpha=alpha, size=marker_size)
plot_fig.xlabel = feature1
plot_fig.ylabel = feature2
plot_fig.figsize = (8, 6)
correlation_plots.append(plot_fig)
return plotting.GridPlot(grid_columns, *correlation_plots)
def roc(self, mask=None, signal_label=1, physical_notion=True):
"""
Calculate roc functions for data and return roc plot object
:param mask: mask for data, which will be used
:type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame)
:param int grid_columns: count of columns for multi-rocs
:param bool physical_notion: if set to True, will show signal efficiency vs background rejection,
otherwise TPR vs FPR.
:rtype: plotting.FunctionsPlot
"""
roc_curves = OrderedDict()
mask, = self._apply_mask(mask)
classes_labels = set(numpy.unique(self.target[mask]))
assert len(classes_labels) == 2 and signal_label in classes_labels, \
'Classes must be 2 instead of {}'.format(classes_labels)
for name, prediction in self.prediction.items():
labels_active = numpy.array(self.target[mask] == signal_label, dtype=int)
(tpr, tnr), _, _ = utils.calc_ROC(prediction[mask, signal_label], labels_active,
sample_weight=self.weight[mask])
if physical_notion:
roc_curves[name] = (tpr, tnr)
xlabel = 'Signal sensitivity'
ylabel = 'Bg rejection eff (specificity)'
else:
roc_curves[name] = (1 - tnr, tpr)
xlabel = 'false positive rate'
ylabel = 'true positive rate'
plot_fig = plotting.FunctionsPlot(roc_curves)
plot_fig.xlabel = xlabel
plot_fig.ylabel = ylabel
plot_fig.title = 'ROC curves'
return plot_fig
def prediction_pdf(self, mask=None, target_class=1, bins=30, size=2, log=False, plot_type='error_bar',
normed=True, labels_dict=None):
"""
Distribution of prediction for signal and bck separately with errors
:param mask: mask for data, which will be used
:type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame)
:param target_class: draw probabilities of being classified as target_class
(default 1, will draw signal probabilities).
If None, will draw probability corresponding to right class of each event.
:type target_class: int or None
:param bins: number of bins in histogram
:type bins: int or array-like
:param int size: points size on plots
:param bool log: use logarithmic scale
:param bool normed: draw normed pdf or not (normed by default)
:param str plot_type: 'error_bar' for error type and 'bar' for hist type
:param labels_dict: names for class labels as dictionary
if None then {0: 'bck', '1': 'signal'}
:type labels_dict: None or OrderedDict(int: str)
:rtype: plotting.ErrorPlot or plotting.BarPlot
"""
assert plot_type in BAR_TYPES, 'Value for plot_type must be in ' + str(BAR_TYPES)
data = OrderedDict()
mask, = self._apply_mask(mask)
class_labels, weight = self.target[mask], self.weight[mask]
labels_dict = self._check_labels(labels_dict, class_labels)
filled_type = itertools.cycle(['not_filled', 'filled'])
for name, prediction in self.prediction.items():
prediction = prediction[mask]
for label, name_label in labels_dict.items():
label_mask = class_labels == label
target_label = label if target_class is None else target_class
plot_name = '{name} for {cl}'.format(name=name_label, cl=name)
if plot_type == 'error_bar':
data[plot_name] = utils.calc_hist_with_errors(
prediction[label_mask, target_label],
weight[label_mask], bins, normed=normed, x_range=(0, 1))
else:
data[plot_name] = (prediction[label_mask, target_label], weight[label_mask], next(filled_type))
if plot_type == 'error_bar':
plot_fig = plotting.ErrorPlot(data, size=size, log=log)
else:
plot_fig = plotting.BarPlot(data, bins=bins, normalization=normed, value_range=(0, 1))
plot_fig.xlabel = 'prediction'
plot_fig.ylabel = 'density' if normed else 'Event count'
return plot_fig
def efficiencies(self, features, thresholds=None, mask=None, bins=30, labels_dict=None, ignored_sideband=0.0,
errors=False, grid_columns=2):
"""
Efficiencies for spectators
:param features: using features (if None then use classifier's spectators)
:type features: None or list[str]
:param bins: bins for histogram
:type bins: int or array-like
:param mask: mask for data, which will be used
:type | |
IPv6 interface statistics last reset: never
IPv6 interface RP-traffic statistics: (forwarded/originated/consumed)
Unicast packets: 0/0/0
Unicast bytes: 0/0/0
Multicast packets: 0/12/9
Multicast bytes: 0/1144/640
'''
ShowRoutingVrfAll = {
'vrf':
{'VRF1':
{'address_family':
{'vpnv4 unicast':
{'bgp_distance_internal_as': 33,
'bgp_distance_local': 55,
'ip':
{'10.2.2.2/24':
{'ubest_num': '1',
'mbest_num': '0',
'best_route':
{'unicast':
{'nexthop':
{'Null0':
{'protocol':
{'bgp':
{'uptime': '5w0d',
'preference': '55',
'metric': '0',
'protocol_id': '100',
'interface':
'Ethernet2/1',
'attribute':
'discard',
'tag':
'100'}}}}}}}}}}},
'default':
{'address_family':
{'ipv4 unicast':
{'bgp_distance_internal_as': 33,
'bgp_distance_local': 55,
'ip':
{'10.169.2.2/24':
{'ubest_num': '1',
'mbest_num': '0',
'best_route':
{'unicast':
{'nexthop':
{'Null0':
{'protocol':
{'bgp':
{'uptime': '5w0d',
'preference': '55',
'metric': '0',
'protocol_id': '100',
'interface': 'Ethernet2/1',
'attribute': 'discard',
'tag': '100'}}}}}}}}}}}}}
ShowRoutingVrfAll_vrf1 = {
'vrf':
{'VRF1':
{'address_family':
{'vpnv4 unicast':
{'bgp_distance_internal_as': 33,
'bgp_distance_local': 55,
'ip':
{'10.2.2.2/24':
{'ubest_num': '1',
'mbest_num': '0',
'best_route':
{'unicast':
{'nexthop':
{'Null0':
{'protocol':
{'bgp':
{'uptime': '5w0d',
'preference': '55',
'metric': '0',
'protocol_id': '100',
'interface':
'Ethernet2/1',
'attribute':
'discard',
'tag':
'100'}}}}}}}}}}}, }}
ShowRoutingIpv6VrfAll = {
"vrf": {
"VRF1": {
"address_family": {
"ipv6 unicast": {
"ip": {
"2001:db8:1:1::1/128": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8:1:1::1": {
"protocol": {
"local": {
"interface": "Ethernet1/1",
"metric": "0",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
},
"2001:db8:1:1::/64": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8:1:1::1": {
"protocol": {
"direct": {
"interface": "Ethernet1/1",
"metric": "0",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
},
"2001:db8:2:2::2/128": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8:2:2::2": {
"protocol": {
"local": {
"interface": "Ethernet1/1",
"metric": "0",
"tag": "222",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
},
"2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8::5054:ff:fed5:63f9": {
"protocol": {
"local": {
"interface": "Ethernet2/1",
"metric": "0",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
},
"2001:db8::/64": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8::5054:ff:fed5:63f9": {
"protocol": {
"direct": {
"interface": "Ethernet1/1",
"metric": "0",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
},
"2001:db8:3:3::3/64": {
"attach": "attached",
"best_route": {
"unicast": {
"nexthop": {
"2001:db8:2:2::2": {
"protocol": {
"direct": {
"interface": "Ethernet2/1",
"metric": "0",
"tag": "222",
"uptime": "00:15:46",
"preference": "0"
}
}
}
}
}
},
"mbest_num": "0",
"ubest_num": "1"
}}}}}}
}
InterfaceOpsOutput_custom_info = {
'Ethernet2/1': {'access_vlan': 1,
'auto_negotiate': False,
'bandwidth': 768,
'counters': {'in_broadcast_pkts': 0,
'in_crc_errors': 0,
'in_errors': 0,
'in_mac_pause_frames': 0,
'in_multicast_pkts': 0,
'in_octets': 0,
'in_pkts': 0,
'in_unicast_pkts': 0,
'in_unknown_protos': 0,
'last_clear': 'never',
'out_broadcast_pkts': 0,
'out_discard': 0,
'out_errors': 0,
'out_mac_pause_frames': 0,
'out_multicast_pkts': 0,
'out_octets': 0,
'out_pkts': 0,
'out_unicast_pkts': 0,
'rate': {'in_rate': 0,
'in_rate_pkts': 0,
'load_interval': 0,
'out_rate': 0,
'out_rate_pkts': 0}},
'delay': 3330,
'description': 'desc',
'duplex_mode': 'full',
'enabled': True,
'encapsulation': {'encapsulation': 'arpa'},
'flow_control': {'receive': False, 'send': False},
'port_channel': {'port_channel_member': True,
'port_channel_member_intfs': ['Port-channel1']},
'ipv4': {'10.2.2.2/24': {'ip': '10.2.2.2',
'prefix_length': '24',
'origin': 'bgp',
'route_tag': '100',
'secondary': True},
'10.3.3.3/24': {'ip': '10.3.3.3',
'prefix_length': '24',
'secondary': True},
'10.4.4.4/24': {'ip': '10.4.4.4',
'prefix_length': '24',
'route_tag': '10',
'secondary': True},
'unnumbered': {'interface_ref': 'loopback0'}},
'mac_address': 'aaaa.bbbb.cccc',
'medium': 'broadcast',
'mtu': 1600,
'oper_status': 'up',
'port_speed': '1000',
'last_change': '00:00:29',
'phys_address': '5254.003b.4aca',
'switchport_mode': 'trunk',
'switchport_enable': True,
'trunk_vlans': '100,300',
'type': '10/100/1000 Ethernet',
'vrf': 'VRF1'}, }
InterfaceOpsOutput_info = {
'Ethernet2/1': {'auto_negotiate': False,
'bandwidth': 768,
'counters': {'in_broadcast_pkts': 0,
'in_crc_errors': 0,
'in_errors': 0,
'in_mac_pause_frames': 0,
'in_multicast_pkts': 0,
'in_octets': 0,
'in_pkts': 0,
'in_unicast_pkts': 0,
'in_unknown_protos': 0,
'last_clear': 'never',
'out_broadcast_pkts': 0,
'out_discard': 0,
'out_errors': 0,
'out_mac_pause_frames': 0,
'out_multicast_pkts': 0,
'out_octets': 0,
'out_pkts': 0,
'out_unicast_pkts': 0,
'rate': {'in_rate': 0,
'in_rate_pkts': 0,
'load_interval': 0,
'out_rate': 0,
'out_rate_pkts': 0}},
'delay': 3330,
'description': 'desc',
'duplex_mode': 'full',
'enabled': True,
'encapsulation': {'encapsulation': 'arpa'},
'flow_control': {'receive': False, 'send': False},
'port_channel': {'port_channel_member': True,
'port_channel_member_intfs': ['Port-channel1']},
'ipv4': {'10.2.2.2/24': {'ip': '10.2.2.2',
'prefix_length': '24',
'origin': 'bgp',
'route_tag': '100',
'secondary': True},
'10.3.3.3/24': {'ip': '10.3.3.3',
'prefix_length': '24',
'secondary': True},
'10.4.4.4/24': {'ip': '10.4.4.4',
'prefix_length': '24',
'route_tag': '10',
'secondary': True},
'unnumbered':{'interface_ref': 'loopback0'}},
'ipv6': {'2001:db8:1:1::1/64': {'ip': '2001:db8:1:1::1',
'prefix_length': '64',
'status': 'valid'},
'2001:db8:2:2::2/64': {'anycast': True,
'ip': '2001:db8:2:2::2',
'prefix_length': '64',
'status': 'valid'},
'2001:db8:3:3::3/64': {'ip': '2001:db8:3:3::3',
'prefix_length': '64',
'origin': 'direct',
'route_tag': '222',
'status': 'valid'},
'2001:db8:4:4:a8aa:bbff:febb:cccc/64': {'ip': '2001:db8:4:4:a8aa:bbff:febb:cccc',
'prefix_length': '64',
'eui_64': True,
'origin': 'local',
'status': 'valid'}},
'mac_address': 'aaaa.bbbb.cccc',
'medium': 'broadcast',
'mtu': 1600,
'oper_status': 'up',
'port_speed': '1000',
'last_change': '00:00:29',
'phys_address': '5254.003b.4aca',
'type': '10/100/1000 Ethernet',
'vrf': 'VRF1'},
'Ethernet2/1.10': {'bandwidth': 768,
'delay': 10,
'enabled': False,
'encapsulation': {'encapsulation': 'dot1q',
'first_dot1q': '10'},
'mac_address': '5254.003b.4af8',
'medium': 'broadcast',
'mtu': 1600,
'oper_status': 'down',
'phys_address': '5254.003b.4aca',
'port_channel': {'port_channel_member': False},
'type': '10/100/1000 Ethernet',
'vlan_id': '10',
'vrf': 'default'},
'Ethernet2/1.20': {'bandwidth': 768,
'delay': 10,
'enabled': True,
'encapsulation': {'encapsulation': 'dot1q',
'first_dot1q': '20'},
'mac_address': '5254.003b.4af8',
'medium': 'p2p',
'mtu': 1600,
'oper_status': 'up',
'phys_address': '5254.003b.4aca',
'port_channel': {'port_channel_member': False},
'type': '10/100/1000 Ethernet',
'vlan_id': '20',
'vrf': 'default'},
'Ethernet2/10': {'vrf': 'default'},
'Ethernet2/11': {'vrf': 'default'},
'Ethernet2/12': {'vrf': 'default'},
'Ethernet2/13': {'vrf': 'default'},
'Ethernet2/14': {'vrf': 'default'},
'Ethernet2/15': {'vrf': 'default'},
'Ethernet2/16': {'vrf': 'default'},
'Ethernet2/17': {'vrf': 'default'},
'Ethernet2/18': {'vrf': 'default'},
'Ethernet2/19': {'vrf': 'default'},
'Ethernet2/2': {'access_vlan': 1,
'auto_negotiate': False,
'bandwidth': 1000000,
'counters': {'in_broadcast_pkts': 0,
'in_crc_errors': 0,
'in_errors': 0,
'in_mac_pause_frames': 0,
'in_multicast_pkts': 0,
'in_octets': 0,
'in_pkts': 0,
'in_unicast_pkts': 0,
'in_unknown_protos': 0,
'last_clear': 'never',
'out_broadcast_pkts': 0,
'out_discard': 0,
'out_errors': 0,
'out_mac_pause_frames': 0,
'out_multicast_pkts': 0,
'out_octets': 0,
'out_pkts': 0,
'out_unicast_pkts': 0,
'rate': {'in_rate': 0,
'in_rate_pkts': 0,
'load_interval': 0,
'out_rate': 0,
'out_rate_pkts': 0}},
'delay': 10,
'duplex_mode': 'full',
'enabled': True,
'encapsulation': {'encapsulation': 'arpa'},
'flow_control': {'receive': False, 'send': False},
'port_channel': {'port_channel_member': True,
'port_channel_member_intfs': ['Port-channel1']},
'mac_address': '5254.00ac.b52e',
'medium': 'broadcast',
'mtu': 1500,
'oper_status': 'up',
'phys_address': '5254.00ac.b52e',
'port_speed': '1000',
'last_change': '00:07:28',
'switchport_mode': 'trunk',
'switchport_enable': True,
'trunk_vlans': '100,300',
'type': '10/100/1000 Ethernet'},
'Ethernet2/20': {'vrf': 'default'},
'Ethernet2/21': {'vrf': 'default'},
'Ethernet2/22': {'vrf': 'default'},
'Ethernet2/23': {'vrf': 'default'},
'Ethernet2/24': {'vrf': 'default'},
'Ethernet2/25': {'vrf': 'default'},
'Ethernet2/26': {'vrf': 'default'},
'Ethernet2/27': {'vrf': 'default'},
'Ethernet2/28': {'vrf': 'default'},
'Ethernet2/29': {'vrf': 'default'},
'Ethernet2/3': {'access_vlan': 100,
'switchport_mode': 'access',
'switchport_enable': True,
'trunk_vlans': '1-4094'},
'Ethernet2/30': {'vrf': 'default'},
'Ethernet2/31': {'vrf': 'default'},
'Ethernet2/32': {'vrf': 'default'},
'Ethernet2/33': {'vrf': 'default'},
'Ethernet2/34': {'vrf': 'default'},
'Ethernet2/35': {'vrf': 'default'},
'Ethernet2/36': {'vrf': 'default'},
'Ethernet2/37': {'vrf': 'default'},
'Ethernet2/38': {'vrf': 'default'},
'Ethernet2/39': {'vrf': 'default'},
'Ethernet2/4': {'vrf': 'default'},
'Ethernet2/40': {'vrf': 'default'},
'Ethernet2/41': {'vrf': 'default'},
'Ethernet2/42': {'vrf': 'default'},
'Ethernet2/43': {'vrf': 'default'},
'Ethernet2/44': {'vrf': 'default'},
'Ethernet2/45': {'vrf': 'default'},
'Ethernet2/46': {'vrf': 'default'},
'Ethernet2/47': {'vrf': 'default'},
'Ethernet2/48': {'vrf': 'default'},
'Ethernet2/5': {'vrf': 'default'},
'Ethernet2/6': {'vrf': 'default'},
'Ethernet2/7': {'vrf': 'default'},
'Ethernet2/8': {'vrf': 'default'},
'Ethernet2/9': {'vrf': 'default'},
'Ethernet3/1': {'vrf': 'default'},
'Ethernet3/10': {'vrf': 'default'},
'Ethernet3/11': {'vrf': 'default'},
'Ethernet3/12': {'vrf': 'default'},
'Ethernet3/13': {'vrf': 'default'},
'Ethernet3/14': {'vrf': 'default'},
'Ethernet3/15': {'vrf': 'default'},
'Ethernet3/16': {'vrf': 'default'},
'Ethernet3/17': {'vrf': 'default'},
'Ethernet3/18': {'vrf': 'default'},
'Ethernet3/19': {'vrf': 'default'},
'Ethernet3/2': {'vrf': 'default'},
'Ethernet3/20': {'vrf': 'default'},
'Ethernet3/21': {'vrf': 'default'},
'Ethernet3/22': {'vrf': 'default'},
'Ethernet3/23': {'vrf': 'default'},
'Ethernet3/24': {'vrf': 'default'},
'Ethernet3/25': {'vrf': 'default'},
'Ethernet3/26': {'vrf': 'default'},
'Ethernet3/27': {'vrf': 'default'},
'Ethernet3/28': {'vrf': 'default'},
'Ethernet3/29': {'vrf': 'default'},
'Ethernet3/3': {'vrf': 'default'},
'Ethernet3/30': {'vrf': 'default'},
'Ethernet3/31': {'vrf': 'default'},
'Ethernet3/32': {'vrf': 'default'},
'Ethernet3/33': {'vrf': 'default'},
'Ethernet3/34': {'vrf': 'default'},
'Ethernet3/35': {'vrf': 'default'},
'Ethernet3/36': {'vrf': 'default'},
'Ethernet3/37': {'vrf': 'default'},
'Ethernet3/38': {'vrf': 'default'},
'Ethernet3/39': {'vrf': 'default'},
'Ethernet3/4': {'vrf': 'default'},
'Ethernet3/40': {'vrf': 'default'},
'Ethernet3/41': {'vrf': 'default'},
'Ethernet3/42': {'vrf': 'default'},
'Ethernet3/43': {'vrf': 'default'},
'Ethernet3/44': {'vrf': 'default'},
'Ethernet3/45': {'vrf': 'default'},
'Ethernet3/46': {'vrf': 'default'},
'Ethernet3/47': {'vrf': 'default'},
'Ethernet3/48': {'vrf': 'default'},
'Ethernet3/5': {'vrf': 'default'},
'Ethernet3/6': {'vrf': 'default'},
'Ethernet3/7': {'vrf': 'default'},
'Ethernet3/8': {'vrf': 'default'},
'Ethernet3/9': {'vrf': 'default'},
'Ethernet4/1': {'vrf': 'default'},
'Ethernet4/10': {'vrf': 'default'},
'Ethernet4/11': {'vrf': 'default'},
'Ethernet4/12': {'vrf': 'default'},
'Ethernet4/13': {'vrf': 'default'},
'Ethernet4/14': {'vrf': 'default'},
'Ethernet4/15': {'vrf': 'default'},
'Ethernet4/16': {'vrf': 'default'},
'Ethernet4/17': {'vrf': 'default'},
'Ethernet4/18': {'vrf': 'default'},
'Ethernet4/19': {'vrf': 'default'},
'Ethernet4/2': {'vrf': 'default'},
'Ethernet4/20': {'vrf': 'default'},
'Ethernet4/21': {'vrf': 'default'},
'Ethernet4/22': {'vrf': 'default'},
'Ethernet4/23': {'vrf': 'default'},
'Ethernet4/24': {'vrf': 'default'},
'Ethernet4/25': {'vrf': 'default'},
'Ethernet4/26': {'vrf': 'default'},
'Ethernet4/27': {'vrf': 'default'},
'Ethernet4/28': {'vrf': 'default'},
'Ethernet4/29': {'vrf': 'default'},
'Ethernet4/3': {'vrf': 'default'},
'Ethernet4/30': {'vrf': 'default'},
'Ethernet4/31': {'vrf': 'default'},
'Ethernet4/32': {'vrf': 'default'},
'Ethernet4/33': {'vrf': 'default'},
'Ethernet4/34': {'vrf': 'default'},
'Ethernet4/35': {'vrf': 'default'},
'Ethernet4/36': {'vrf': 'default'},
'Ethernet4/37': {'vrf': 'default'},
'Ethernet4/38': {'vrf': 'default'},
'Ethernet4/39': {'vrf': 'default'},
'Ethernet4/4': {'vrf': 'default'},
'Ethernet4/40': {'vrf': 'default'},
'Ethernet4/41': {'vrf': 'default'},
'Ethernet4/42': {'vrf': 'default'},
'Ethernet4/43': {'vrf': 'default'},
'Ethernet4/44': {'vrf': 'default'},
'Ethernet4/45': {'vrf': 'default'},
'Ethernet4/46': {'vrf': 'default'},
'Ethernet4/47': {'vrf': 'default'},
'Ethernet4/48': {'vrf': 'default'},
'Ethernet4/5': {'vrf': 'default'},
'Ethernet4/6': {'vrf': 'default'},
'Ethernet4/7': {'vrf': 'default'},
'Ethernet4/8': {'vrf': 'default'},
'Ethernet4/9': {'vrf': 'default'},
'Null0': {'vrf': 'default'},
'Mgmt0':
{'auto_negotiate': True,
'bandwidth': 1000000,
'counters':
{'in_broadcast_pkts': 4,
'in_multicast_pkts': 2,
'in_octets': 4726,
'in_pkts': 2,
'in_unicast_pkts': 0,
'rate':
| |
from __future__ import division, print_function
import numpy as np
from math import factorial
from functools import wraps
from itertools import combinations, permutations
from collections import defaultdict
from pyknotid.representations.gausscode import GaussCode
from pyknotid.utils import vprint
def _to_GaussCode(rep):
if not isinstance(rep, GaussCode):
rep = GaussCode(rep)
return rep
def require_GaussCode(func):
'''Convert the first argument of the function to a GaussCode.'''
@wraps(func)
def wrapper(rep, *args, **kwargs):
rep = _to_GaussCode(rep)
return func(rep, *args, **kwargs)
return wrapper
def validate_diagram(d):
'''Do some basic checks on whether an Arrow diagram (as a string of
numbered crossings and signs) is valid.'''
entries = d.split(',')
counts = defaultdict(lambda: 0)
signs = defaultdict(lambda: 0)
for entry in entries:
number = entry[:-1]
if entry[-1] not in ('+', '-'):
raise ValueError('Arrow diagrams must have crossing + or - ',
'but this has {}'.format(entry))
counts[number] += 1
signs[number] += (1 if entry[-1] == '+' else -1)
if any([item != 2 for item in counts.values()]):
raise ValueError('Diagram {} appears invalid: some indices '
'appear only once'.format(d))
if any([item != 0 for item in signs.values()]):
raise ValueError('Diagram {} appears invalid: sum of signs for '
'some crossings do not equal 0'.format(d))
@require_GaussCode
def writhing_numbers(gc, diagrams, based=False):
'''Returns the signed sum of representations of the given Arrow
diagrams in the given representation.
Parameters
----------
gc : A :class:`~pyknotid.representations.gausscode.GaussCode` or
equivalent representation.
The knot for which to find the writhes.
diagrams : str or list or tuple
A list of strings, or single string, representing Arrow
diagrams, e.g. '1-2+1+2-' for Vassiliev 2.
based : bool
Whether the diagrams have basepoints (if True, assumed to be
just before the first entry).
'''
if not isinstance(diagrams, (list, tuple)):
diagrams = [diagrams]
multipliers = defaultdict(lambda : {})
use_multipliers = False
if any(['d' in diagram for diagram in diagrams]):
use_multipliers = True
if use_multipliers:
for di, diagram in enumerate(diagrams):
terms = diagram.split(',')
for term in terms:
if term[-1] == 'd':
multiplier = 2
else:
multiplier = 1
term = int(term[:-2]) if 'd' in term else int(term[:-1])
multipliers[diagram.replace('d', '')][term] = multiplier
diagrams[di] = diagram.replace('d', '')
for d in diagrams:
validate_diagram(d)
level = 0
code = gc._gauss_code
code = code[0]
gc_len = len(gc)
code_len = len(code)
from pyknotid.invariants import _crossing_arrows_and_signs
arrows, signs = _crossing_arrows_and_signs(code, gc.crossing_numbers)
crossing_numbers = list(gc.crossing_numbers)
# degrees = [len(diagram.split(',')) for diagram in diagrams]
degrees = defaultdict(lambda: [])
for diagram in diagrams:
degrees[len(diagram.split(',')) // 2].append(diagram)
relations = {diagram: [] for diagram in diagrams}
for diagram in diagrams:
degree = len(diagram.split(',')) // 2
num_relations = factorial(degree - 1) * 4
terms = diagram.split(',')
numbers = [term[:-1] for term in terms]
number_strs = list(sorted(set(numbers), key=lambda j: int(j)))
for i, number in enumerate(number_strs):
for oi, other_number in enumerate(number_strs[i+1:]):
oi += i + 1
if i != 0:
if terms.index(number + '-') < terms.index(other_number + '-'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] < l[oi][0])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] > l[oi][0])
if terms.index(number + '-') < terms.index(other_number + '+'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] < l[oi][1])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] > l[oi][1])
if terms.index(number + '+') < terms.index(other_number + '-'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] < l[oi][0])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] > l[oi][0])
# This one seems to not be necessary for diagrams where all arrows cross?
if terms.index(number + '+') < terms.index(other_number + '+'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] < l[oi][1])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] > l[oi][1])
if i == 0:
continue
if terms.index(number + '+') < terms.index(number + '-'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] < l[i][0])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] > l[i][0])
max_degree = max(degrees.keys())
representations_sums = {d: 0 for d in diagrams}
used_sets = {d: set() for d in diagrams}
combs = combinations(crossing_numbers, max_degree)
try:
num_combs = (factorial(len(crossing_numbers)) //
factorial(max_degree) //
factorial(len(crossing_numbers) - max_degree))
except ValueError:
num_combs = 0
strs = [None for _ in range(max_degree)] * 2
order = [None for _ in range(max_degree)] * 2
for ci, comb in enumerate(combs):
if ci % 10000 == 0:
vprint('\rCombination {} of {} '.format(ci + 1, num_combs),
newline=False, condition=(ci % 10000) == 0)
if based:
perms = [comb]
else:
perms = permutations(comb)
ordered_indices = tuple(sorted(comb))
for diagram in diagrams:
if ordered_indices not in used_sets[diagram]:
break
else:
continue
for perm in perms:
cur_arrows = [list(arrows[i]) for i in perm]
a1s = cur_arrows[0][0]
if based:
a1s = 0
for i, arrow in enumerate(cur_arrows):
arrow[0] = (arrow[0] - a1s) % code_len
arrow[1] = (arrow[1] - a1s) % code_len
for diagram in diagrams:
if ordered_indices in used_sets[diagram]:
continue
for relation in relations[diagram]:
if not relation(cur_arrows):
break
else:
if use_multipliers:
representations_sums[diagram] += (
reduce(lambda x, y: x*y,
[signs[arrow_i]**multipliers[diagram][num+1] for num, arrow_i in enumerate(perm)]))
else:
representations_sums[diagram] += (
reduce(lambda x, y: x*y,
[signs[arrow_i] for arrow_i in perm]))
used_sets[diagram].add(ordered_indices)
vprint()
return representations_sums
def vassiliev_2(gc):
results = writhing_numbers(gc, '1-,2+,1+,2-', based=True)
print('results', results)
return results['1-,2+,1+,2-']
def vassiliev_3(gc):
results = writhing_numbers(gc, ['1-,2+,3-,1+,2-,3+',
'1-,2-,3+,1+,3-,2+'], based=False,
forbid_all_symmetry=True)
print('results', results)
return results['1-,2-,3+,1+,3-,2+'] // 2 + results['1-,2+,3-,1+,2-,3+']
def vassiliev_4(gc):
d4 = ['1-,2-,3+,1+,4-,3-,2+,4+',
'1-,2+,3-,1+,4-,3+,2-,4+',
'1-,2+,3-,4+,2-,1+,4-,3+',
'1-,2-,3+,1+,4-,2+,3-,4+',
'1-,2+,3-,4+,1+,2-,4-,3+',
'1-,2+,3-,4-,1+,4+,3+,2-',
'1-,2+,3-,4-,1+,3+,4+,2-',
'1-,2+,3-,4+,1+,4-,3+,2-',
'1-,2+,3+,4-,1+,3-,4+,2-',
'1-,2+,3-,4-,1+,4+,2-,3+',
'1-,2-,3+,4-,2+,1+,3-,4+',
'1-,2-,3+,4-,1+,2+,3-,4+',
'1-,2-,1+,3-,2+,4-,3+,4+',
'1-,2-,1+,3+,2+,4-,3-,4+']
d3 = ['1-,2+,3-d,1+,3+d,2-',
'1-d,2+,3-,1+d,2-,3+']
results_4 = writhing_numbers(gc, d4)
results_3 = writhing_numbers(gc, d3)
total = (results_4[d4[0]] + 6*results_4[d4[1]] + 2*results_4[d4[2]] +
3*results_4[d4[3]] + results_4[d4[4]] + 2*results_4[d4[5]] +
2*results_4[d4[6]] - results_4[d4[7]] + results_4[d4[8]] +
results_4[d4[9]] + 2*results_4[d4[10]] + 2*results_4[d4[11]] +
results_4[d4[12]] + results_4[d4[13]] +
results_3[d3[0]] + results_3[d3[1]])
print('results 4')
for diagram, value in results_4.items():
print(diagram, ':', value)
print('results 3')
for diagram, value in results_3.items():
print(diagram, ':', value)
return total
def slip_vassiliev_2(gc):
codes = ['1-,2+,3+,1+,2-,3-',
'1-,2+,3+,1+,3-,2-',
'1-,2-,3+,1+,2+,3-',
'1-,2-,3+,2+,1+,3-']
# codes = ['1-,2+,3+,1+,2-,3-']
results = writhing_numbers(gc, codes, based=True)
gc = gc.flipped()
results2 = writhing_numbers(gc, codes, based=True)
for code in codes:
print('{}: {}'.format(code, results[code]))
return np.sum(results.values()) + np.sum(results2.values())
return np.sum(results.values()), np.sum(results2.values())
def vassiliev_2_long_form(gc):
results_1 = writhing_numbers(gc, ['1-,2+,1+,2-'], based=False)
results_2 = writhing_numbers(gc, ['1-,2-d,3-,1+,2+d,3+',
'1-,2-,3-d,1+,3+d,2+',
'1-d,2-,3-,2+,1+d,3+',
'1-,2-,1+,3-d,3+d,2+',
'1-,2-,1+,3+d,3-d,2+'],
based=False)
num_crossings = float(len(gc.crossing_numbers))
total = np.sum(results_2.values()) + results_1.values()[0]
if total != 0 and num_crossings == 0:
raise ValueError('Found no crossings but non-zero total vassiliev')
if num_crossings == 0:
return 0
return total / num_crossings
@require_GaussCode
def writhing_numbers_numpy(gc, diagrams, based=False):
'''Returns the signed sum of representations of the given Arrow
diagrams in the given representation.
Parameters
----------
gc : A :class:`~pyknotid.representations.gausscode.GaussCode` or
equivalent representation.
The knot for which to find the writhes.
diagrams : str or list or tuple
A list of strings, or single string, representing Arrow
diagrams, e.g. '1-2+1+2-' for Vassiliev 2.
based : bool
Whether the diagrams have basepoints (if True, assumed to be
just before the first entry).
'''
if not isinstance(diagrams, (list, tuple)):
diagrams = [diagrams]
for d in diagrams:
validate_diagram(d)
level = 0
code = gc._gauss_code
code = code[0]
gc_len = len(gc)
code_len = len(code)
from pyknotid.invariants import _crossing_arrows_and_signs
arrows, signs = _crossing_arrows_and_signs(code, gc.crossing_numbers)
crossing_numbers = list(gc.crossing_numbers)
# degrees = [len(diagram.split(',')) for diagram in diagrams]
degrees = defaultdict(lambda: [])
for diagram in diagrams:
degrees[len(diagram.split(',')) // 2].append(diagram)
relations = {diagram: [] for diagram in diagrams}
for diagram in diagrams:
degree = len(diagram.split(',')) // 2
num_relations = factorial(degree - 1) * 4
terms = diagram.split(',')
numbers = [term[:-1] for term in terms]
number_strs = list(sorted(set(numbers), key=lambda j: int(j)))
for i, number in enumerate(number_strs):
for oi, other_number in enumerate(number_strs[i+1:]):
oi += i + 1
if i != 0:
if terms.index(number + '-') < terms.index(other_number + '-'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] < l[oi][0])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] > l[oi][0])
if terms.index(number + '-') < terms.index(other_number + '+'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] < l[oi][1])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][0] > l[oi][1])
if terms.index(number + '+') < terms.index(other_number + '-'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] < l[oi][0])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] > l[oi][0])
if i == 0:
if terms.index(number + '+') < terms.index(other_number + '+'):
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] < l[oi][1])
else:
relations[diagram].append(lambda l, i=i, oi=oi: l[i][1] > l[oi][1])
max_degree = max(degrees.keys())
used_sets = set()
# representations_sums = [0 for _ in diagrams]
representations_sums = {d: 0 for d in diagrams}
used_sets = {d: set() for d in | |
<gh_stars>0
## LSDMap_VectorTools.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with vector data using shapely
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## FJC
## 26/06/17
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from . import LSDMap_GDALIO as LSDMap_IO
from shapely.geometry import Point, Polygon
import os
from os.path import exists
from osgeo import ogr, osr
import LSDPlottingTools as LSDPT
import gdal as gdal
from osgeo.gdalconst import GA_ReadOnly
from LSDMapFigure import PlottingHelpers as Helper
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# BASIN FUNCTIONS
# These functions do various operations on basin polygons
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
def GetBasinOutlines(DataDirectory, basins_fname):
"""
This function takes in the raster of basins and gets a dict of basin polygons,
where the key is the basin key and the value is a shapely polygon of the basin.
IMPORTANT: In this case the "basin key" is usually the junction number:
this function will use the raster values as keys and in general
the basin rasters are output based on junction indices rather than keys
Args:
DataDirectory (str): the data directory with the basin raster
basins_fname (str): the basin raster
Returns:
list of shapely polygons with the basins
Author: FJC
"""
# read in the basins raster
this_fname = basins_fname.split('.')
print(basins_fname)
OutputShapefile = this_fname[0]+'.shp'
# polygonise the raster
BasinDict = LSDMap_IO.PolygoniseRaster(DataDirectory, basins_fname, OutputShapefile)
return BasinDict
def GetMultipleBasinOutlines(DataDirectory):
"""
This function takes in multiple rasters of basins and gets a dict of basin polygons,
where the key is the basin key derived from the file name and the value is a shapely polygon of the basin.
IMPORTANT: In this case the "basin key" is usually the junction number:
this function will use the raster values as keys and in general
the basin rasters are output based on junction indices rather than keys
Args:
DataDirectory (str): the data directory with the basin raster
Returns:
list of shapely polygons with the basins
Author: MDH
"""
# get a list of basins and declare the dictionary to populate
basin_dict = Helper.MapBasinsToKeys(DataDirectory)
BasinsDict = {}
#loop across the basins
for outlet_jn, basin_key in basin_dict.iteritems():
this_fname = "basin"+str(outlet_jn)+"_AllBasins.bil"
TempBasins = GetBasinOutlines(DataDirectory,this_fname)
for temp_outlet, temp_basin_key in TempBasins.iteritems():
if len(TempBasins) > 1:
print("WARNING: MULTIPLE BASINS IN basin #", outlet_jn)
TempBasins[int(outlet_jn)] = TempBasins.pop(temp_outlet)
BasinsDict.update(TempBasins)
return BasinsDict
def GetBasinCentroids(DataDirectory, basins_fname):
"""
This function takes in the raster of basins and returns a dict where the
key is the basin key and the value is the shapely point of the centroid
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of centroid points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# get the centroids
CentroidDict = {}
for basin_key, basin in BasinDict.iteritems():
CentroidDict[basin_key] = Point(basin.centroid)
return CentroidDict
def GetPointWithinBasins(DataDirectory,basins_fname):
"""
This function takes in the raster of basin and returns a dict where the
key is the basin key and the value is a shapely point that is representative
of the basin (guaranteed to be within the polygon)
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# get the centroids
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
PointDict[basin_key] = Point(basin.representative_point())
return PointDict
def GetPointsWithinMultipleBasins(DataDirectory,basins_fname):
"""
This function takes in rasters of basins and returns a dict where the
key is the basin key and the value is a shapely point that is representative
of the basin (guaranteed to be within the polygon)
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetMultipleBasinOutlines(DataDirectory)
print("BASIN DICT IS")
print(BasinDict)
# get the centroids
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
PointDict[basin_key] = Point(basin.representative_point())
print("POINT DICT IS")
print(PointDict)
return PointDict
def GetPointWithinBasinsBuffered(DataDirectory,basins_fname, basin_list = [], buffer_frac=0.1):
"""
This function takes in the raster of basins, and buffers each basin
(makes each one smaller). It then gets the centroid of each buffered
basin and returns as a dict where the key is the basin key and the value
is a shapely point that is the centroid of the buffered basin.
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
This doesn't work at the moment - need to think of a way to specify the buffer
distance appropriately.
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
buffer_frac (float): the fraction of the basin to be removed by the
buffer, default = 0.1
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# buffer and get the centre of the buffered polygons
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
# get the x and y lengths of the basin and append to list
print("This basin key is: "+str(basin_key))
lengths = []
bounds = basin.bounds
lengths.append(bounds[2] - bounds[0])
lengths.append(bounds[3] - bounds[1])
print(min(lengths))
# buffer with a fraction of the minimum length
new_basin = Polygon(basin.buffer(min(lengths)*buffer_frac*-1))
# get the centroid of the buffered basin
PointDict[basin_key] = Point(new_basin.centroid)
return PointDict
##### This part is copied from the LSD_GeologyTools.py file to make the functions accessible from another scripts and thus easier to ingest, it will be cleaned up at some points.
##### the aim of those functions is to raterize a lithologic raster
def readFile(filename):
print("Hey buddy, Reading the file: "+filename)
filehandle = gdal.Open(filename, GA_ReadOnly )
if filehandle == None:
raise Exception("Unable to read the data file")
band1 = filehandle.GetRasterBand(1)
geotransform = filehandle.GetGeoTransform()
geoproj = filehandle.GetProjection()
Z = band1.ReadAsArray()
xsize = filehandle.RasterXSize
ysize = filehandle.RasterYSize
return xsize,ysize,geotransform,geoproj,Z
def writeFile(filename,geotransform,geoprojection,data):
(x,y) = data.shape
format = "GTiff".encode('utf-8')
noDataValue = -9999
driver = gdal.GetDriverByName(format)
# you can change the dataformat but be sure to be able to store negative values including -9999
dst_datatype = gdal.GDT_Float32
#print(data)
dst_ds = driver.Create(filename,y,x,1,dst_datatype)
dst_ds.GetRasterBand(1).WriteArray(data)
dst_ds.GetRasterBand(1).SetNoDataValue( noDataValue )
dst_ds.SetGeoTransform(geotransform)
dst_ds.SetProjection(geoprojection)
return 1
def Rasterize_BGS_geologic_maps(shapefile_name):
# The shapefile to be rasterized:
print('Rasterize ' + shapefile_name)
#get path and filename seperately
shapefilefilepath = LSDPT.GetPath(shapefile_name)
shapefilename = LSDPT.GetFileNameNoPath(shapefile_name)
shapefileshortname = LSDPT.GetFilePrefix(shapefile_name)
print("Shapefile name is: "+shapefilename)
# now get the the fields from the shapefile
daShapefile = shapefile_name
dataSource = ogr.Open(daShapefile)
daLayer = dataSource.GetLayer(0)
# lets see what the layers are
print("Let me tell you what the names of the fields are!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
# The raster file to be created and receive the rasterized shapefile
outrastername = shapefileshortname + '.tif'
outraster = shapefilefilepath+os.sep+ outrastername
outcsv = shapefilefilepath+os.sep+shapefileshortname+'_lithokey.csv'
print("Full name of out raster is: "+outraster)
# Rasterize!!
system_call = 'gdal_rasterize -a BGSREF -l ' + shapefileshortname +' -tr 90 -90 -a_nodata -9999 ' + shapefile_name + ' ' + outraster
print("System call is: ")
print(system_call)
os.system(system_call)
# now convert the raster to UTM, as well as delete the stupid TIF
# The raster file to be created and receive the rasterized shapefile
outrastername_bil = shapefileshortname + '.bil'
outraster_bil = shapefilefilepath+os.sep+ outrastername_bil
print("Full name of out raster is: "+outraster_bil)
# This assumes UTM zone 30, because why would we do any work in East Anglia?
system_call2 = 'gdalwarp -t_srs EPSG:32630 -of ENVI -dstnodata -9999 ' + outraster + ' ' + outraster_bil
os.system(system_call2)
# Now get rid of the tif
system_call3 = 'rm '+ outraster
os.system(system_call3)
# Make a key for the bedrock
geol_dict = dict()
for feature in daLayer:
ID = feature.GetField("BGSREF")
GEOL = feature.GetField("RCS_D")
if ID not in geol_dict:
print("I found a new rock type, ID: "+ str(ID)+ " and rock type: " + str(GEOL))
geol_dict[ID] = GEOL
print("The rocks are: ")
print(geol_dict)
with open(outcsv, 'wb') as f:
f.write('ID,rocktype\n')
for key in geol_dict:
f.write(str(key)+','+ str(geol_dict[key])+'\n')
print("All done")
def Rasterize_geologic_maps_pythonic(shapefile_name, raster_resolution = 400, geol_field = "xx"):
# The shapefile to be rasterized:
print('Rasterize ' + shapefile_name)
#get path and filename seperately
shapefilefilepath = LSDPT.GetPath(shapefile_name)
shapefilename = LSDPT.GetFileNameNoPath(shapefile_name)
shapefileshortname = LSDPT.GetFilePrefix(shapefile_name)
print("Shapefile name is: "+shapefilename)
# now get the the fields from the shapefile
daShapefile = shapefile_name
dataSource = ogr.Open(daShapefile)
daLayer = dataSource.GetLayer(0)
# lets see what the layers are
print("Let me tell you what the names of the fields are!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
# The raster file to be created and receive the rasterized shapefile
outrastername = shapefileshortname + '.tif'
print("The new raster is: "+outrastername)
outraster = shapefilefilepath+ outrastername
outcsv = shapefilefilepath+shapefileshortname+'_lithokey.csv'
print("Full name of out raster is: "+outraster)
# Create the destination data source
inGridSize=float(raster_resolution)
xMin, xMax, yMin, yMax = daLayer.GetExtent()
xRes = int((xMax - xMin) / inGridSize)
yRes = int((yMax - yMin) / | |
graph
update_grouping(diagram, diagram.connectivity_graph)
# Reset RST graph if requested
if mode == 'rst':
# Create a new RST graph for the Diagram object
diagram.rst_graph = nx.DiGraph()
# Update grouping information from the grouping graph to the new RST
# graph
update_grouping(diagram, diagram.rst_graph)
# Flag the graph for re-drawing
diagram.update = True
return
# If requested, delete grouping nodes
if command == 'rm':
# Prepare input for validation
user_input = prepare_input(user_input, 1)
# Check if RST relations need to be included in validation
if mode == 'rst':
# Validate input against relations as well
valid = validate_input(user_input, current_graph,
groups=True, rst=True)
else:
# Check input against the current graph
valid = validate_input(user_input, current_graph, groups=True)
# If the input is not valid, continue
if not valid:
return
# If input is valid, proceed
if valid:
# Generate a dictionary mapping group aliases to IDs
group_dict = replace_aliases(current_graph, 'group')
# Replace aliases with valid identifiers, if used
user_input = [group_dict[u] if u in group_dict.keys()
else u for u in user_input]
# If annotating RST relations, check RST relations as well
if mode == 'rst':
# Generate a dictionary mapping relation aliases to IDs
rel_dict = replace_aliases(current_graph, 'relation')
# Replace aliases with valid identifiers, if used
user_input = [rel_dict[u] if u in rel_dict.keys()
else u.upper() for u in user_input]
# Remove the designated nodes from the graph
current_graph.remove_nodes_from(user_input)
# Flag the graph for re-drawing
diagram.update = True
return
# if requested, split a node
if command == 'split':
# Begin by checking the number of desired splits
n_splits = int(user_input.split()[1])
# Prepare input for validation
user_input = prepare_input(user_input, 2)
# Validate input – only diagram elements can be split
valid = validate_input(user_input, current_graph)
# If the input is valid, proceed
if valid:
# Set up a placeholder list for split nodes
split_list = []
# Get properties of the node to duplicate
for n in user_input:
# Generate new identifiers for split nodes by taking the node
# name in uppercase and adding the number of split after stop.
split_ids = [n.upper() + '.{}'.format(i)
for i in range(1, n_splits + 1)]
# Get the attribute of the node that is being split
attr_dict = current_graph.nodes[n.upper()]
# Add parent node information to the dictionary
attr_dict['copy_of'] = n.upper()
# Loop over split ids
for s in split_ids:
# Append a tuple of identifier and attributes to split_list
split_list.append((s, attr_dict))
# Remove node from the RST graph
current_graph.remove_node(n.upper())
# Add split nodes to the graph
current_graph.add_nodes_from(split_list)
# Flag the graph for re-drawing
diagram.update = True
return
# Define a dictionary of available commands during annotation
commands = {'rst': ['rels', 'split', 'ungroup'],
'connectivity': ['ungroup'],
'generic': ['acap', 'cap', 'comment', 'done', 'exit', 'export',
'free', 'info', 'isolate', 'macrogroups', 'next',
'reset', 'rm'],
'tasks': ['conn', 'group', 'rst']
}
info = {'layout': "---\n"
"Enter the identifiers of diagram elements you wish to\n"
"group together. Separate the identifiers with a comma.\n"
"\n"
"Example of valid input: b1, a1, t1\n\n"
""
"This command groups nodes B1, A1 and T1 together under a\n"
"grouping node.\n"
"---\n"
"Grouping nodes may be deleted using command rm.\n\n"
"Example command: rm g1\n\n"
"This command deletes group G1. Multiple groups can be\n"
"deleted by entering multiple identifiers, e.g. rm g1 g2 g3\n"
"---\n"
"To add macro-grouping information to a node, group, image\n"
"constant or their groups, enter the command 'macro' and \n"
"by the identifier or identifiers.\n\n"
"Example command: macro i0\n\n"
"A list of available macro-groups can be printed using the\n"
"command 'macrogroups'. This command will also print all\n"
"currently defined macro-groups.\n"
"---\n",
'rst': "---\n"
"Enter the command 'new' to create a new RST relation.\n"
"The tool will then ask you to enter a valid name for the\n"
"relation. Relations can be deleted using the command 'rm'.\n"
"Names are entered by using abbreviations, which can be listed\n"
"using the command 'relations'.\n\n"
"The tool will infer the type of relation and ask you to enter\n"
"either a nucleus and satellites or several nuclei.\n"
"---\n"
"If diagram elements are picked out by multiple rhetorical\n"
"relations, you can use the command 'split' to split the node.\n"
"This creates multiple instances of the same node, which can \n"
"be picked out as parts of different rhetorical relations.\n\n"
"Example command: split 2 b1\n\n"
"This command splits node B1 into two nodes, which are given\n"
"identifiers B1.1 and B1.2.\n"
"---\n",
'connectivity': "---\n"
"Drawing a connection between nodes requires three\n"
"types of information: source, connection type and\n"
"target.\n\n"
"The sources and targets must be valid identifiers,\n"
"elements and groups or lists of valid identifiers\n"
"separated using commas.\n\n"
"Example command: t1 > b0, b1\n\n"
"The connection type must be one of the following\n"
"shorthand aliases:\n\n"
"- for undirected lines\n"
"> for unidirectional arrow\n"
"<> for bidirectional arrow\n"
"---\n",
'generic': "Other valid commands include:\n\n"
"acap: Save a screen capture for all graphs in diagram.\n"
"cap: Save a screen capture of the current visualisation.\n"
"comment: Enter a comment about current diagram.\n"
"free: Remove all edges leading to a node, e.g. free b0.\n"
"exit: Exit the annotator immediately.\n"
"export: Export the current graph into DOT format. \n"
"done: Mark the current diagram as complete and move on.\n"
"hide: Hide the layout segmentation.\n"
"info: Print this message.\n"
"isolate: Remove isolates from the graph.\n"
"next: Save current work and move on to the next diagram.\n"
"reset: Reset the current annotation.\n"
"show: Show the layout segmentation. Use e.g. show b0 to\n"
" a single unit.\n"
"---",
}
# Define a dictionary of various prompts presented to user during annotation
prompts = {'nucleus_id': "[RST] Enter the identifier of nucleus: ",
'satellite_id': "[RST] Enter the identifier(s) of satellite(s): ",
'layout_default': "[GROUPING] Please enter nodes to group or a valid"
" command: ",
'comment': "Enter comment: ",
'rst_default': "[RST] Please enter a valid command: ",
'rel_prompt': "[RST] Please enter relation name: ",
'nuclei_id': "[RST] Enter the identifiers of the nuclei: ",
'macro_group': "[GROUPING] Please enter macro-group type: ",
'conn_default': "[CONNECTIVITY] Please enter a connection or a valid"
" command: ",
'table_rows': "[GROUPING] How many rows does the table have? ",
'table_cols': "[GROUPING] How many columns does the table have? ",
'table_axes': "[GROUPING] How many axes have labels? "
}
# Define a dictionary of various error messages that may arise during annotation
messages = {'nucleus_err': "Sorry, a mononuclear relation cannot have more "
"than one nucleus. Please try again.",
'nuclei_err': "Sorry, a multinuclear relation must have more than "
"one nucleus. Please try again.",
'layout_complete': "[ERROR] Grouping annotation is marked as "
"complete.",
'conn_complete': "[ERROR] Connectivity annotation is marked as "
"complete.",
'rst_complete': "[ERROR] RST annotation is marked as complete. "
}
# Define a dictionary of RST relations / types and their aliases (keys)
rst_relations = {'anti': {'name': 'antithesis', 'kind': 'mono'},
'back': {'name': 'background', 'kind': 'mono'},
'circ': {'name': 'circumstance', 'kind': 'mono'},
'conc': {'name': 'concession', 'kind': 'mono'},
'cond': {'name': 'condition', 'kind': 'mono'},
'elab': {'name': 'elaboration', 'kind': 'mono'},
'enab': {'name': 'enablement', 'kind': 'mono'},
'eval': {'name': 'evaluation', 'kind': 'mono'},
'evid': {'name': 'evidence', 'kind': 'mono'},
'pret': {'name': 'interpretation', 'kind': 'mono'},
'just': {'name': 'justify', 'kind': 'mono'},
'mean': {'name': 'means', 'kind': 'mono'},
'moti': {'name': 'motivation', 'kind': 'mono'},
'nvoc': {'name': 'nonvolitional-cause', 'kind': 'mono'},
'nvor': {'name': 'nonvolitional-result', 'kind': 'mono'},
'otws': {'name': 'otherwise', 'kind': 'mono'},
'prep': {'name': 'preparation', 'kind': 'mono'},
'purp': {'name': 'purpose', 'kind': 'mono'},
'rest': {'name': 'restatement', 'kind': 'multi'},
'solu': {'name': 'solutionhood', 'kind': 'mono'},
'summ': {'name': 'summary', 'kind': 'mono'},
'unls': {'name': 'unless', 'kind': 'mono'},
'volc': {'name': 'volitional-cause', 'kind': 'mono'},
'volr': {'name': 'volitional-result', 'kind': 'mono'},
'cont': {'name': 'contrast', 'kind': 'multi'},
'join': {'name': 'joint', 'kind': 'multi'},
'list': {'name': 'list', 'kind': 'multi'},
'sequ': {'name': 'sequence', 'kind': 'multi'},
'cseq': {'name': 'cyclic sequence', 'kind': 'multi'}, # NEW
'iden': {'name': 'identification', 'kind': 'mono'},
'casc': {'name': 'class-ascription', 'kind': 'mono'},
'pasc': {'name': 'property-ascription', 'kind': 'mono'},
'poss': {'name': 'possession', 'kind': 'mono'},
'proj': {'name': 'projection', 'kind': 'mono'},
'conn': {'name': 'connected', 'kind': 'multi'}, # NEW!
'titl': {'name': 'title', 'kind': 'mono'},
'conj': {'name': 'conjunction', 'kind': 'multi'}, # NEW!
| |
import cStringIO, sys, csv, copy, ImageDraw, Image, ImageClass
from FindCenter import findCenter, showIm, getBiImList, getEllipse, getView
from PyQt4 import QtGui
from myFunc import detect_peaks, pil16pil8, a16a8, getStrVal
from myMath import fitLine, fitCirc
from myFigure import myFigure
from scipy import ndimage, optimize
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import filters
def getRing(dataList,tol,kernelSize,nIter, ringCenterY, ringSizePrev, meanSizeRate,\
centralPlane, ringCenterXPrev):
karnel = np.ones([kernelSize,kernelSize])
karnel = 1.0*karnel/sum(karnel)
dataListConv = []
maximaList = []
maxPointsX = []
maxPointsY = []
for data in dataList:
data = np.array(filters.gaussian_filter(data,kernelSize),dtype = np.uint8)
if np.sum(data) == 0:
data = dataListConv[-1]
maxima, x, y = detect_peaks(data)
maximaList.append(maxima)
dataListConv.append(data)
maxPointsX.append(x)
maxPointsY.append(y)
pointsX, pointsY, pointsXerr, planes, ringCenterYEst, ringSizeEst, ringCenterXEst = estimateRing(dataListConv, centralPlane, ringCenterXPrev, tol, meanSizeRate, ringSizePrev)
if len(pointsX)>0:
for i in range(nIter-1):
pointsZ = []
planesZ = np.arange(planes.size)*zPixelScale #z positions of all planes
largestSize, j = 0, 0
for i in range(planes.size): #get points of only allowed planes
if planes[i]>0:
pointsZ.append(planesZ[i])
pointsZ.append(planesZ[i])
if ringSizeEst[j]>largestSize:
largestSize = ringSizeEst[j]
j+=1
if centralPlane==None: centralPlane=len(dataListConv)/2
if ringCenterXPrev==None:
imBiTmp = getBiImList([dataListConv[centralPlane]],1,Ham)[0][-1]
eParams = getEllipse(imBiTmp)
embMiddleX, embMiddleY = eParams[1] #Y is AP direction
ringCenterXPrev=embMiddleX
centerX, centerY, radius = sizeFilterFit(ringSizeEst, pointsX, pointsXerr, pointsZ,\
ringSizePrev, meanSizeRate, [ringCenterXPrev, centralPlane*zPixelScale])
if radius > 0:
ringSizeEst = np.zeros(len(dataListConv))
planes = np.zeros(len(dataListConv))
pointsX, pointsY, pointsXerr = [], [], []
ringSize = np.zeros(len(dataListConv))
for i in range(len(dataListConv)):
if abs(i*zPixelScale-centerY)<radius:
ringSize[i]=2*np.sqrt(radius**2-(i*zPixelScale-centerY)**2)
first, second, errF, errS = refineRing(dataListConv[i], tol, centerX, ringCenterYEst, ringSize[i])
if first!=None:
pointsX.append(first[1])
pointsY.append(first[0])
pointsX.append(second[1])
pointsY.append(second[0])
pointsXerr.append(errF)
pointsXerr.append(errS)
planes[i]=1
else:
ringSize[i]=0
tol = tol/1.5
else:
centerX, centerY = findMidbody(dataList,ringCenterY,tol,ringCenterXPrev)
break
else:
radius = 0
centerX, centerY = findMidbody(dataList,ringCenterY,tol,ringCenterXPrev)
return pointsX, pointsY, pointsXerr, planes, ringCenterYEst
def estimateRing(dataListConv, centralPlane, ringCenterXPrev, tol, meanSizeRate, ringSizePrev):
planes = np.zeros(len(dataListConv))
pointsX, pointsY, pointsXerr = [], [], []
ringCenterY = np.array([])
ringCenterX = np.array([])
ringSize = np.array([])
if centralPlane == None:
k = 2*len(dataListConv)/4
elif ringSizePrev>100:
k = 3*max(0,min(centralPlane,len(dataListConv)-1))/4
else:
k = max(0,min(centralPlane,len(dataListConv)-1))
frames1=range(k,-1,-1)
frames2=range(k+1,len(dataListConv))
if check: print('k={0}, first set {1} frames, second set {2} frames'.format(k, len(frames1), len(frames2)))
for i in frames1:
first, second, errFirst, errSecond = findRing(dataListConv[i], tol, ringSizePrev, ringCenterY, ringCenterX, ringSize, meanSizeRate, ringCenterXPrev)
if first != None:
ringCenterY = np.append(ringCenterY,(first[0]+second[0])/2)
ringCenterX = np.append(ringCenterX,(first[1]+second[1])/2)
ringSize = np.append(ringSize,abs(second[1]-first[1]))
pointsX.append(first[1])
pointsY.append(first[0])
pointsX.append(second[1])
pointsY.append(second[0])
pointsXerr.append(errFirst)
pointsXerr.append(errSecond)
planes[i]=1
if check:#Check which maxima are found. All maxima in red, ring maxima in blue
maxima, maxPointsX, maxPointsY = detect_peaks(dataListConv[i])
checkMaximaFound(dataListConv[i],[maxPointsX,maxPointsY],first,second, np.mean(ringCenterX),np.mean(ringCenterY))
pointsX=pointsX[::-1]
pointsY=pointsY[::-1]
pointsXerr=pointsXerr[::-1]
if ringCenterY.size>1: ringCenterY=ringCenterY[::-1]
if ringSize.size>1: ringSize=ringSize[::-1]
if ringCenterX.size>1: ringCenterX=ringCenterX[::-1]
for i in frames2:
first, second, errFirst, errSecond = findRing(dataListConv[i], tol, ringSizePrev, ringCenterY, ringCenterX, ringSize, meanSizeRate, ringCenterXPrev)
if first != None:
ringCenterY = np.append(ringCenterY,(first[0]+second[0])/2)
ringCenterX = np.append(ringCenterX,(first[1]+second[1])/2)
ringSize = np.append(ringSize,abs(second[1]-first[1]))
pointsX.append(first[1])
pointsY.append(first[0])
pointsX.append(second[1])
pointsY.append(second[0])
pointsXerr.append(errFirst)
pointsXerr.append(errSecond)
planes[i]=1
if check:#Check which maxima are found. All maxima in red, ring maxima in blue
maxima, maxPointsX, maxPointsY = detect_peaks(dataListConv[i])
checkMaximaFound(dataListConv[i],[maxPointsX,maxPointsY],first,second, np.mean(ringCenterX),np.mean(ringCenterY))
return pointsX, pointsY, pointsXerr, planes, np.mean(ringCenterY), ringSize, ringCenterX
def findRing(data, tol, ringSizePrev, ringCenterY, ringCenterX,\
ringSize, meanSizeRate,ringCenterXPrev): #finds two points that belong to the ring and satisfy criteria
def popMaxima(maxima,data):
pop = [np.where(data*maxima==np.max(data*maxima))[0][0],\
np.where(data*maxima==np.max(data*maxima))[1][0]]
maxima[max(pop[0]-maxDist,0):min(pop[0]+maxDist,maxima.shape[0]),\
max(pop[1]-maxDist,0):min(pop[1]+maxDist,maxima.shape[1])]=0
return pop, maxima
def checkSizePrevUpper(point1, point2, ringSizePrev, meanSizeRate):
if meanSizeRate!=0: return abs(point1[1]-point2[1])<ringSizePrev-0.5*meanSizeRate+tol
else: return abs(point1[1]-point2[1])<ringSizePrev
def checkSizePrevLower(point1, point2, ringSizePrev, meanSizeRate):
if meanSizeRate!=0: return 0.8*ringSizePrev-1*meanSizeRate-2*tol<abs(point1[1]-point2[1])
elif not lateStage: return 0.5*ringSizePrev<abs(point1[1]-point2[1])
else:return abs(point1[1]-point2[1])>20
def checkCenterPrev(point1, point2, ringCenterXPrev, ringSizePrev):
return abs(ringCenterXPrev-(point1[1]+point2[1])/2)<0.5*ringSizePrev/2
def checkSize(point1,point2, ringSize):
return ringSize[-1]-4*tol<abs(point1[1]-point2[1])<ringSize[-1]+2*tol
def checkCenterY(point1,ringCenterY):
return abs(ringCenterY-point1[0])<3*tol
def checkCenterX(point1, point2, ringCenterX, ringSizePrev):
return abs(ringCenterX-(point1[1]+point2[1])/2)<min(2*tol,0.5*ringSizePrev)
def checkYEmb(point1, embMiddleY):
return embMiddleY-20<point1[0]<embMiddleY+100
def checkSameY(point1, point2):
return abs(point1[0]- point2[0]) < 4*tol
def checkXEmb(point1, point2, embMiddleX):
return not (abs(embMiddleX-point1[1])<30 or abs(embMiddleX-point2[1])<30)
def checkPoints(point1, point2, ringCenterX, ringCenterY, ringSize, ringCenterXPrev, ringSizePrev, meanSizeRate, embMiddleX, embMiddleY, lateStage):
if not checkSizePrevUpper(point1, point2, ringSizePrev, meanSizeRate):
if check: print('large size relative to previous ring', point2)
return False
if ringSize.size==0 and not checkSizePrevLower(point1, point2, ringSizePrev, meanSizeRate):
if check: print('small size relative to previous ring', point2)
return False
if not checkCenterPrev(point1, point2, ringCenterXPrev, ringSizePrev):
if check: print('wrong center relative to previous ring', point2)
return False
if ringSize.size>0 and not checkSize(point1, point2, ringSize):
if check: print('wrong size', point2)
return False
if ringCenterX.size>0 and not checkCenterX(point1, point2, np.mean(ringCenterX), ringSizePrev):
if check: print('wrong center X', point2)
return False
if not checkSameY(point1, point2):
if check: print('not on on the same line', point2)
return False
if not lateStage and not checkXEmb(point1, point2, embMiddleX):
if check: print('too close to the embryo middle X', point2)
return False
return True
maxima, maxPointsX, maxPointsY = detect_peaks(data)
orderedMaxima = []
newMaxima = copy.deepcopy(maxima)
count = 0
imBiTmp = getBiImList([data],1,Ham)[0][-1]
eParams = getEllipse(imBiTmp)
embMiddleX, embMiddleY = eParams[1] #Y is AP direction
if check:
print('embMiddleX = ',embMiddleX, 'embMiddleY = ',embMiddleY)
print('ringCenterY', ringCenterY)
print('ringCenterX',ringCenterX,'ringSize',ringSize, 'rate',meanSizeRate)
print('ringSizePrev',ringSizePrev,'centXPrev',ringCenterXPrev)
if ringCenterXPrev == None: ringCenterXPrev = embMiddleX
while True:
tmp, newMaxima = popMaxima(newMaxima,data)
if count > 100 or tmp[1] == 0:
print('Not found')
return None, None, None, None
if check:
print('max [y,x]',tmp,data[tmp[0],tmp[1]])
if checkYEmb(tmp, embMiddleY):
if ringCenterY.size==0 or checkCenterY(tmp, np.mean(ringCenterY)):
orderedMaxima.append(tmp)
if check: print('point added')
for point in orderedMaxima[:-1]:
if checkPoints(orderedMaxima[-1],point, ringCenterX, ringCenterY, ringSize, ringCenterXPrev, ringSizePrev, meanSizeRate, embMiddleX, embMiddleY, lateStage):
first = point
second = orderedMaxima[-1]
firstErr = getError(first,data)
secondErr = getError(second, data)
return first, second, firstErr, secondErr
elif check: print('far from previous center Y')
elif check: print('far from embryo middle')
count+=1
def sizeFilterFit(ringSize, pointsX, pointsXerr, pointsZ, ringSizePrev,meanSizeRate, ringCenterPrev = [0,0]): #determines which points to consider for the ring fit
''' finds points to use for ring fitting '''
def getErr(v):
i = max(0,int(v[0]))
k = min(int(np.ceil(v[1])),len(pointsX)/2)
if i + 2 < k:
centerX, centerY, radius, errorbars, resid = fitCirc(pointsX[2*i:2*k],pointsZ[2*i:2*k],pointsXerr[2*i:2*k],(ringSizePrev-0.3*meanSizeRate)/2, aveR=True)
if np.isnan(errorbars[1]) or radius*2>2*max(ringSize) or radius*2>1.2*ringSizePrev:
return 10000.
if ringCenterPrev!=[0,0] and np.sqrt((centerX-ringCenterPrev[0])**2+(centerY-ringCenterPrev[1])**2)>1.4*ringSizePrev/2-radius: #the center movement can't allow crossing of the ring with previous
return 10000.
return errorbars[0]*errorbars[1]*(resid/(k-i))**1.#*errorbars[1]
else: return 10000.
if ringCenterPrev[0] != 0: #check that all points are inside the previous ring
indexes = []
for i in range(len(pointsX)/2):
if not (checkInRing([pointsX[2*i],pointsZ[2*i]], ringSizePrev, ringCenterPrev, tol = 0.4*ringSizePrev) and\
checkInRing([pointsX[2*i+1],pointsZ[2*i+1]], ringSizePrev, ringCenterPrev, tol = 0.4*ringSizePrev)):
indexes.append(2*i)
indexes.append(2*i+1)
pointsXFull = pointsX
pointsXerrFull = pointsXerr
pointsZFull = pointsZ
pointsX = list(np.delete(pointsX, indexes))
pointsZ = list(np.delete(pointsZ, indexes))
pointsXerr = list(np.delete(pointsXerr, indexes))
if check:
checkRingFigure([pointsXFull,pointsZFull,pointsXerrFull],[ringCenterPrev[0],ringCenterPrev[1],ringSizePrev/2],[pointsX,pointsZ,pointsXerr])
print('outliers', indexes)
if len(pointsX) > 3: #remove outliers
res = optimize.brute(getErr,[[0,len(pointsX)/2-1],[1,len(pointsX)/2]],Ns=len(pointsX)/2)
else:
res = [0,0]
pointsBot = int(res[0])
pointsTop = int(np.ceil(res[1]))
pointsXFull = pointsX
pointsXerrFull = pointsXerr
pointsZFull = pointsZ
pointsX = pointsX[2*pointsBot:2*pointsTop]
pointsXerr = pointsXerr[2*pointsBot:2*pointsTop]
pointsZ = pointsZ[2*pointsBot:2*pointsTop] #get points of only allowed planes
if len(pointsX)>3:
centerX, centerY, radius, errorbars = fitCirc(pointsX,pointsZ,pointsXerr,(ringSizePrev-0.3*meanSizeRate)/2)
else:
radius = 0
centerX = np.mean(pointsX)
centerY = np.mean(pointsZ)
if check:
checkRingFigure([pointsXFull,pointsZFull,pointsXerrFull],[centerX,centerY,radius],[pointsX,pointsZ,pointsXerr])
return centerX, centerY, radius
def refineRing(data, tol, ringCenterX, ringCenterY, ringSize):
maxima, maxPointsX, maxPointsY = detect_peaks(data)
left = (ringCenterX-ringSize/2, ringCenterY)
right = (ringCenterX+ringSize/2, ringCenterY)
distL = np.sqrt((maxPointsX-left[0])**2+(maxPointsY-left[1])**2)
distR = np.sqrt((maxPointsX-right[0])**2+(maxPointsY-right[1])**2)
first = np.array([maxPointsX[np.argmin(distL)],maxPointsY[np.argmin(distL)]])[::-1]
errFirst = getError(first,data)
second = np.array([maxPointsX[np.argmin(distR)],maxPointsY[np.argmin(distR)]])[::-1]
errSecond = getError(second,data)
if check and first!= None:
fig = plt.figure()
ax = fig.add_subplot(111) #full
ax.imshow(data)
ax.autoscale(False)
ax.plot(maxPointsX,maxPointsY, 'ro')
ax.plot(first[1],first[0],'bo')
ax.plot(second[1],second[0],'bo')
ax.plot(ringCenterX,ringCenterY,'go')
plt.show()
return first, second, errFirst, errSecond
def getError(point, data): #calculate error using intensity values
subArray = data[point[0],:]
value = subArray[point[1]]
#value thresh is estimated from image. No rigorous estimation
thresh = 10
err1 = subArray.size-point[1]
for i in range(subArray.size-point[1]):
if subArray[point[1]+i]<value-thresh:
err1 = i
break
elif subArray[point[1]+i]>value:
value = subArray[point[1]+i]
err2 = point[1]
value = subArray[point[1]]
for k in range(point[1]):
if subArray[point[1]-k]<value-thresh:
err2 = k
break
elif subArray[point[1]-k]>value:
value = subArray[point[1]-k]
error = max(err1,err2)
return error
def findMidbody(dataList,ringCenterY,tol, centerXIni):#finds midbody
maxIntPos = []
maxInt = []
for data in dataList:
area = data[int(ringCenterY-tol):int(ringCenterY+tol),\
int(centerXIni-tol):int(centerXIni+tol)]
tmp, x, y = detect_peaks(area)
maxIntPos.append([np.where(area*tmp == np.max(area*tmp))[0][0],\
np.where(area*tmp == np.max(area*tmp))[1][0]])
maxInt.append(area[maxIntPos[-1][0],maxIntPos[-1][1]])
midBodIndex = int(np.mean(np.where(maxInt==np.max(maxInt))[0]))
centerY = midBodIndex*zPixelScale
centerX = maxIntPos[midBodIndex][1] + int(centerXIni-tol)
return centerX, centerY
def readFromCsv(fileName):
csvFile = csv.reader(open(fileName, 'rb'), delimiter=' ')
rows = [row for row in csvFile]
embryoDiam, embryoDiamErr = [float(rows[1][i]) for i in [2,3]]
angle = float(rows[2][2])
embCenterX, embCenterXerr = [float(rows[3][i]) for i in [2,3]]
embCenterY, embCenterYerr = [],[]
flag = False
for row in rows[5:]:
if flag:
timePointStart = int(row[0])
break
if row[0] == 'timePoint':flag=True
if | |
co-exist on the NetScaler appliance.<br/>Possible values = ON, OFF
"""
try :
self._l2conn = l2conn
except Exception as e:
raise e
@property
def oracleserverversion(self) :
"""Oracle server version.<br/>Default value: 10G<br/>Possible values = 10G, 11G.
"""
try :
return self._oracleserverversion
except Exception as e:
raise e
@oracleserverversion.setter
def oracleserverversion(self, oracleserverversion) :
"""Oracle server version.<br/>Default value: 10G<br/>Possible values = 10G, 11G
"""
try :
self._oracleserverversion = oracleserverversion
except Exception as e:
raise e
@property
def mssqlserverversion(self) :
"""For a load balancing virtual server of type MSSQL, the Microsoft SQL Server version. Set this parameter if you expect some clients to run a version different from the version of the database. This setting provides compatibility between the client-side and server-side connections by ensuring that all communication conforms to the server's version.<br/>Default value: 2008R2<br/>Possible values = 70, 2000, 2000SP1, 2005, 2008, 2008R2, 2012.
"""
try :
return self._mssqlserverversion
except Exception as e:
raise e
@mssqlserverversion.setter
def mssqlserverversion(self, mssqlserverversion) :
"""For a load balancing virtual server of type MSSQL, the Microsoft SQL Server version. Set this parameter if you expect some clients to run a version different from the version of the database. This setting provides compatibility between the client-side and server-side connections by ensuring that all communication conforms to the server's version.<br/>Default value: 2008R2<br/>Possible values = 70, 2000, 2000SP1, 2005, 2008, 2008R2, 2012
"""
try :
self._mssqlserverversion = mssqlserverversion
except Exception as e:
raise e
@property
def mysqlprotocolversion(self) :
"""MySQL protocol version that the virtual server advertises to clients.
"""
try :
return self._mysqlprotocolversion
except Exception as e:
raise e
@mysqlprotocolversion.setter
def mysqlprotocolversion(self, mysqlprotocolversion) :
"""MySQL protocol version that the virtual server advertises to clients.
"""
try :
self._mysqlprotocolversion = mysqlprotocolversion
except Exception as e:
raise e
@property
def mysqlserverversion(self) :
"""MySQL server version string that the virtual server advertises to clients.<br/>Minimum length = 1<br/>Maximum length = 31.
"""
try :
return self._mysqlserverversion
except Exception as e:
raise e
@mysqlserverversion.setter
def mysqlserverversion(self, mysqlserverversion) :
"""MySQL server version string that the virtual server advertises to clients.<br/>Minimum length = 1<br/>Maximum length = 31
"""
try :
self._mysqlserverversion = mysqlserverversion
except Exception as e:
raise e
@property
def mysqlcharacterset(self) :
"""Character set that the virtual server advertises to clients.
"""
try :
return self._mysqlcharacterset
except Exception as e:
raise e
@mysqlcharacterset.setter
def mysqlcharacterset(self, mysqlcharacterset) :
"""Character set that the virtual server advertises to clients.
"""
try :
self._mysqlcharacterset = mysqlcharacterset
except Exception as e:
raise e
@property
def mysqlservercapabilities(self) :
"""Server capabilities that the virtual server advertises to clients.
"""
try :
return self._mysqlservercapabilities
except Exception as e:
raise e
@mysqlservercapabilities.setter
def mysqlservercapabilities(self, mysqlservercapabilities) :
"""Server capabilities that the virtual server advertises to clients.
"""
try :
self._mysqlservercapabilities = mysqlservercapabilities
except Exception as e:
raise e
@property
def appflowlog(self) :
"""Apply AppFlow logging to the virtual server.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._appflowlog
except Exception as e:
raise e
@appflowlog.setter
def appflowlog(self, appflowlog) :
"""Apply AppFlow logging to the virtual server.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._appflowlog = appflowlog
except Exception as e:
raise e
@property
def netprofile(self) :
"""Name of the network profile to associate with the virtual server. If you set this parameter, the virtual server uses only the IP addresses in the network profile as source IP addresses when initiating connections with servers.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._netprofile
except Exception as e:
raise e
@netprofile.setter
def netprofile(self, netprofile) :
"""Name of the network profile to associate with the virtual server. If you set this parameter, the virtual server uses only the IP addresses in the network profile as source IP addresses when initiating connections with servers.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._netprofile = netprofile
except Exception as e:
raise e
@property
def icmpvsrresponse(self) :
"""How the NetScaler appliance responds to ping requests received for an IP address that is common to one or more virtual servers. Available settings function as follows:
* If set to PASSIVE on all the virtual servers that share the IP address, the appliance always responds to the ping requests.
* If set to ACTIVE on all the virtual servers that share the IP address, the appliance responds to the ping requests if at least one of the virtual servers is UP. Otherwise, the appliance does not respond.
* If set to ACTIVE on some virtual servers and PASSIVE on the others, the appliance responds if at least one virtual server with the ACTIVE setting is UP. Otherwise, the appliance does not respond.
Note: This parameter is available at the virtual server level. A similar parameter, ICMP Response, is available at the IP address level, for IPv4 addresses of type VIP. To set that parameter, use the add ip command in the CLI or the Create IP dialog box in the GUI.<br/>Default value: PASSIVE<br/>Possible values = PASSIVE, ACTIVE.
"""
try :
return self._icmpvsrresponse
except Exception as e:
raise e
@icmpvsrresponse.setter
def icmpvsrresponse(self, icmpvsrresponse) :
"""How the NetScaler appliance responds to ping requests received for an IP address that is common to one or more virtual servers. Available settings function as follows:
* If set to PASSIVE on all the virtual servers that share the IP address, the appliance always responds to the ping requests.
* If set to ACTIVE on all the virtual servers that share the IP address, the appliance responds to the ping requests if at least one of the virtual servers is UP. Otherwise, the appliance does not respond.
* If set to ACTIVE on some virtual servers and PASSIVE on the others, the appliance responds if at least one virtual server with the ACTIVE setting is UP. Otherwise, the appliance does not respond.
Note: This parameter is available at the virtual server level. A similar parameter, ICMP Response, is available at the IP address level, for IPv4 addresses of type VIP. To set that parameter, use the add ip command in the CLI or the Create IP dialog box in the GUI.<br/>Default value: PASSIVE<br/>Possible values = PASSIVE, ACTIVE
"""
try :
self._icmpvsrresponse = icmpvsrresponse
except Exception as e:
raise e
@property
def rhistate(self) :
"""Route Health Injection (RHI) functionality of the NetSaler appliance for advertising the route of the VIP address associated with the virtual server. When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
* If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: PASSIVE<br/>Possible values = PASSIVE, ACTIVE.
"""
try :
return self._rhistate
except Exception as e:
raise e
@rhistate.setter
def rhistate(self, rhistate) :
"""Route Health Injection (RHI) functionality of the NetSaler appliance for advertising the route of the VIP address associated with the virtual server. When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
* If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: PASSIVE<br/>Possible values = PASSIVE, ACTIVE
"""
try :
self._rhistate = rhistate
except Exception as e:
raise e
@property
def newservicerequest(self) :
"""Number of requests, or percentage of the load on existing services, by which to increase the load on a new service at each interval in slow-start mode. A non-zero value indicates that slow-start is applicable. A zero value indicates that the global RR startup parameter is applied. Changing the value to zero will cause services currently in slow start to take the full traffic as determined by the LB method. Subsequently, any new services added will use the global RR factor.<br/>Default | |
a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given. In general, each axis can be an axis number
(integer), dimension coordinate name (string) or a standard axis type (string). The
current standard axis types are 'time', 'vertical', 'y', and 'x'.
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
.. versionchanged:: 1.0
Changed signature from ``(f, **kwargs)``
See Also
--------
laplacian, first_derivative
"""
pos_kwarg, positions, axes = _process_gradient_args(f, axes, coordinates, deltas)
return tuple(first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes))
@exporter.export
def laplacian(f, axes=None, coordinates=None, deltas=None):
"""Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given. In general, each axis can be an axis number
(integer), dimension coordinate name (string) or a standard axis type (string). The
current standard axis types are 'time', 'vertical', 'y', and 'x'.
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
Returns
-------
array-like
The laplacian
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
.. versionchanged:: 1.0
Changed signature from ``(f, **kwargs)``
See Also
--------
gradient, second_derivative
"""
pos_kwarg, positions, axes = _process_gradient_args(f, axes, coordinates, deltas)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes)]
return sum(derivs)
def _broadcast_to_axis(arr, axis, ndim):
"""Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
"""
if arr.ndim == 1 and arr.ndim < ndim:
new_shape = [1] * ndim
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
def _process_gradient_args(f, axes, coordinates, deltas):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes_given = axes is not None
axes = axes if axes_given else range(f.ndim)
def _check_length(positions):
if axes_given and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif not axes_given and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if deltas is not None:
if coordinates is not None:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(deltas)
return 'delta', deltas, axes
elif coordinates is not None:
_check_length(coordinates)
return 'x', coordinates, axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.')
def _process_deriv_args(f, axis, x, delta):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(axis if axis is not None else 0, n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.')
if delta is not None:
if x is not None:
raise ValueError('Cannot specify both "x" and "delta".')
delta = np.atleast_1d(delta)
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if not hasattr(delta, 'units') and delta_units is not None:
delta = delta * delta_units
else:
delta = _broadcast_to_axis(delta, axis, n)
elif x is not None:
x = _broadcast_to_axis(x, axis, n)
delta = np.diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return n, axis, delta
@exporter.export
@preprocess_and_wrap(wrap_like='input_dir')
def parse_angle(input_dir):
"""Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East.
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc.
Returns
-------
`pint.Quantity`
The angle in degrees
"""
if isinstance(input_dir, str):
# abb_dirs = abbrieviated directions
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else: # handle unrecognizable scalar
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
def _clean_direction(dir_list, preprocess=False):
"""Handle None if preprocess, else handles anything not in DIR_STRS."""
if preprocess: # primarily to remove None from list so ','.join works
return [UND if not isinstance(the_dir, str) else the_dir
for the_dir in dir_list]
else: # remove extraneous abbrieviated directions
return [UND if the_dir not in DIR_STRS else the_dir
for the_dir in dir_list]
def _abbrieviate_direction(ext_dir_str):
"""Convert extended (non-abbrievated) directions to abbrieviation."""
return (ext_dir_str
.upper()
.replace('_', '')
.replace('-', '')
.replace(' ', '')
.replace('NORTH', 'N')
.replace('EAST', 'E')
.replace('SOUTH', 'S')
.replace('WEST', 'W')
)
@exporter.export
@preprocess_and_wrap()
def angle_to_direction(input_angle, full=False, level=3):
"""Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc.
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
"""
try: # strip units temporarily
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError: # no units associated
origin_units = units.degree
if not hasattr(input_angle, '__len__') or isinstance(input_angle, str):
input_angle = [input_angle]
scalar = True
else:
scalar = False
# clean any numeric strings, negatives, and None
# does not handle strings with alphabet
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'): # warns about the np.nan
input_angle[np.where(input_angle < 0)] = np.nan
input_angle = input_angle * origin_units
# normalizer used for angles > 360 degree to normalize between 0 - 360
normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int)
norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer)
if level == 3:
nskip = 1
elif level == 2:
| |
True, don't block waiting for artifact to exist if we fail to
immediately find it.
Raises:
ArtifactDownloadError: If the artifact fails to download from Google
Storage for any reason or that the regexp
defined by name is not specific enough.
"""
if not self._process_lock:
self._process_lock = _build_artifact_locks.lock(
os.path.join(self.install_dir, self.name))
real_install_dir = os.path.join(self.install_dir, self.install_subdir)
with self._process_lock:
common_util.MkDirP(real_install_dir)
if not self.ArtifactStaged():
# Delete any existing exception saved for this artifact.
self._ClearException()
found_artifact = False
if self.optional_name:
try:
# Check if the artifact named |optional_name| exists.
# Because this artifact may not always exist, don't bother
# to wait for it (set timeout=1).
new_names = downloader.Wait(
self.optional_name, self.is_regex_name, None, timeout=1)
self._UpdateName(new_names)
except ArtifactDownloadError:
self._Log('Unable to download %s; fall back to download %s',
self.optional_name, self.name)
else:
found_artifact = True
try:
# If the artifact should already have been uploaded, don't waste
# cycles waiting around for it to exist.
if not found_artifact:
timeout = 1 if no_wait else 10
new_names = downloader.Wait(
self.name, self.is_regex_name, self.alt_name, timeout)
self._UpdateName(new_names)
files = self.name if isinstance(self.name, list) else [self.name]
for filename in files:
self._Log('Downloading file %s', filename)
self.install_path = downloader.Fetch(filename, real_install_dir)
self._Setup()
self._MarkArtifactStaged()
except Exception as e:
# Convert an unknown exception into an ArtifactDownloadError.
if not isinstance(e, ArtifactDownloadError):
e = ArtifactDownloadError(e)
# Save the exception to a file for downloader.IsStaged to retrieve.
self._SaveException(e)
raise e
else:
self._Log('%s is already staged.', self)
def __str__(self):
"""String representation for the download."""
return '%s->%s' % (self.name, self.install_dir)
def __repr__(self):
return str(self)
class MultiArtifact(Artifact):
"""Wrapper for artifacts where name matches multiple items.."""
def __init__(self, *args, **kwargs):
"""Takes Artifact args.
Args:
*args: See Artifact documentation.
**kwargs: See Artifact documentation.
"""
super(MultiArtifact, self).__init__(*args, **kwargs)
self.single_name = False
def _UpdateName(self, names):
self.name = names if isinstance(names, list) else [names]
def _Setup(self):
super(MultiArtifact, self)._Setup()
self.installed_files = [os.path.join(self.install_dir, self.install_subdir,
name) for name in self.name]
class AUTestPayload(MultiArtifact):
"""Wrapper for AUTest delta payloads which need additional setup."""
def _Setup(self):
super(AUTestPayload, self)._Setup()
# Rename to update.gz.
# TODO(crbug.com/1008058): Change the devserver such that this renaming is
# not needed anymore.
for name in self.name:
dest_name = (devserver_constants.UPDATE_METADATA_FILE
if name.endswith('.json')
else devserver_constants.UPDATE_FILE)
install_path = os.path.join(self.install_dir, self.install_subdir, name)
new_install_path = os.path.join(self.install_dir, self.install_subdir,
dest_name)
self._Log('moving %s to %s', install_path, new_install_path)
shutil.move(install_path, new_install_path)
# Reflect the rename in the list of installed files.
self.installed_files.remove(install_path)
self.installed_files.append(new_install_path)
class BundledArtifact(Artifact):
"""A single build artifact bundle e.g. zip file or tar file."""
def __init__(self, *args, **kwargs):
"""Takes Artifact args with some additional ones.
Args:
*args: See Artifact documentation.
**kwargs: See Artifact documentation.
files_to_extract: A list of files to extract. If set to None, extract
all files.
exclude: A list of files to exclude. If None, no files are excluded.
"""
self._files_to_extract = kwargs.pop('files_to_extract', None)
self._exclude = kwargs.pop('exclude', None)
super(BundledArtifact, self).__init__(*args, **kwargs)
# We modify the marker so that it is unique to what was staged.
if self._files_to_extract:
self.marker_name = self._SanitizeName(
'_'.join(['.' + self.name] + self._files_to_extract))
def _RunUnzip(self, list_only):
# Unzip is weird. It expects its args before any excludes and expects its
# excludes in a list following the -x.
cmd = ['unzip', '-qql' if list_only else '-o', self.install_path]
if not list_only:
cmd += ['-d', self.install_dir]
if self._files_to_extract:
cmd.extend(self._files_to_extract)
if self._exclude:
cmd.append('-x')
cmd.extend(self._exclude)
try:
return cros_build_lib.run(
cmd, capture_output=True, encoding='utf-8').stdout.splitlines()
except cros_build_lib.RunCommandError as e:
raise ArtifactDownloadError(
'An error occurred when attempting to unzip %s:\n%s' %
(self.install_path, e))
def _Setup(self):
extract_result = self._Extract()
if self.store_installed_files:
# List both the archive and the extracted files.
self.installed_files.append(self.install_path)
self.installed_files.extend(extract_result)
def _Extract(self):
"""Extracts files into the install path."""
if self.name.endswith('.zip'):
return self._ExtractZipfile()
else:
return self._ExtractTarball()
def _ExtractZipfile(self):
"""Extracts a zip file using unzip."""
file_list = [os.path.join(self.install_dir, line[30:].strip())
for line in self._RunUnzip(True)
if not line.endswith('/')]
if file_list:
self._RunUnzip(False)
return file_list
def _ExtractTarball(self):
"""Extracts a tarball using tar.
Detects whether the tarball is compressed or not based on the file
extension and extracts the tarball into the install_path.
"""
try:
return common_util.ExtractTarball(self.install_path, self.install_dir,
files_to_extract=self._files_to_extract,
excluded_files=self._exclude,
return_extracted_files=True)
except common_util.CommonUtilError as e:
raise ArtifactDownloadError(str(e))
class AutotestTarball(BundledArtifact):
"""Wrapper around the autotest tarball to download from gsutil."""
def __init__(self, *args, **kwargs):
super(AutotestTarball, self).__init__(*args, **kwargs)
# We don't store/check explicit file lists in Autotest tarball markers;
# this can get huge and unwieldy, and generally make little sense.
self.store_installed_files = False
def _Setup(self):
"""Extracts the tarball into the install path excluding test suites."""
super(AutotestTarball, self)._Setup()
# Deal with older autotest packages that may not be bundled.
autotest_dir = os.path.join(self.install_dir,
devserver_constants.AUTOTEST_DIR)
autotest_pkgs_dir = os.path.join(autotest_dir, 'packages')
if not os.path.exists(autotest_pkgs_dir):
os.makedirs(autotest_pkgs_dir)
if not os.path.exists(os.path.join(autotest_pkgs_dir, 'packages.checksum')):
cmd = ['autotest/utils/packager.py', '--action=upload', '--repository',
autotest_pkgs_dir, '--all']
try:
cros_build_lib.run(cmd, cwd=self.install_dir)
except cros_build_lib.RunCommandError as e:
raise ArtifactDownloadError(
'Failed to create autotest packages!:\n%s' % e)
else:
self._Log('Using pre-generated packages from autotest')
class SignedArtifact(Artifact):
"""Wrapper for signed artifacts which need a path translation."""
def _Setup(self):
super(SignedArtifact, self)._Setup()
# Rename to signed_image.bin.
install_path = os.path.join(self.install_dir, self.install_subdir,
self.name)
new_install_path = os.path.join(self.install_dir, self.install_subdir,
devserver_constants.SIGNED_IMAGE_FILE)
shutil.move(install_path, new_install_path)
# Reflect the rename in the list of installed files.
self.installed_files.remove(install_path)
self.installed_files = [new_install_path]
def _CreateNewArtifact(tag, base, name, *fixed_args, **fixed_kwargs):
"""Get a data wrapper that describes an artifact's implementation.
Args:
tag: Tag of the artifact, defined in artifact_info.
base: Class of the artifact, e.g., BundledArtifact.
name: Name of the artifact, e.g., image.zip.
*fixed_args: Fixed arguments that are additional to the one used in base
class.
**fixed_kwargs: Fixed keyword arguments that are additional to the one used
in base class.
Returns:
A data wrapper that describes an artifact's implementation.
"""
# pylint: disable=bad-option-value,super-on-old-class
class NewArtifact(base):
"""A data wrapper that describes an artifact's implementation."""
ARTIFACT_TAG = tag
ARTIFACT_NAME = name
def __init__(self, *args, **kwargs):
all_args = fixed_args + args
all_kwargs = {}
all_kwargs.update(fixed_kwargs)
all_kwargs.update(kwargs)
super(NewArtifact, self).__init__(self.ARTIFACT_NAME,
*all_args, **all_kwargs)
NewArtifact.__name__ = base.__name__
return NewArtifact
# TODO(dshi): Refactor the code here to split out the logic of creating the
# artifacts mapping to a different module.
chromeos_artifact_map = {}
def _AddCrOSArtifact(tag, base, name, *fixed_args, **fixed_kwargs):
"""Add a data wrapper for ChromeOS artifacts.
Add a data wrapper that describes a ChromeOS artifact's implementation to
chromeos_artifact_map.
"""
artifact = _CreateNewArtifact(tag, base, name, *fixed_args, **fixed_kwargs)
chromeos_artifact_map.setdefault(tag, []).append(artifact)
_AddCrOSArtifact(artifact_info.FULL_PAYLOAD, AUTestPayload,
r'chromeos_.*_full_dev.*bin(\.json)?\Z', is_regex_name=True,
alt_name=[u'chromeos_{build}_{board}_dev.bin',
u'chromeos_{build}_{board}_dev.bin.json'])
_AddCrOSArtifact(artifact_info.DELTA_PAYLOAD, AUTestPayload,
r'chromeos_.*_delta_dev.*bin(\.json)?\Z', is_regex_name=True,
install_subdir=AU_NTON_DIR)
_AddCrOSArtifact(artifact_info.STATEFUL_PAYLOAD, Artifact,
devserver_constants.STATEFUL_FILE)
_AddCrOSArtifact(artifact_info.BASE_IMAGE, BundledArtifact, IMAGE_FILE,
optional_name=BASE_IMAGE_FILE,
files_to_extract=[devserver_constants.BASE_IMAGE_FILE])
_AddCrOSArtifact(artifact_info.RECOVERY_IMAGE, BundledArtifact, IMAGE_FILE,
optional_name=RECOVERY_IMAGE_FILE,
files_to_extract=[devserver_constants.RECOVERY_IMAGE_FILE])
_AddCrOSArtifact(artifact_info.SIGNED_IMAGE, SignedArtifact,
SIGNED_RECOVERY_IMAGE_FILE)
_AddCrOSArtifact(artifact_info.DEV_IMAGE, BundledArtifact, IMAGE_FILE,
files_to_extract=[devserver_constants.IMAGE_FILE])
_AddCrOSArtifact(artifact_info.TEST_IMAGE, BundledArtifact, IMAGE_FILE,
optional_name=TEST_IMAGE_FILE,
files_to_extract=[devserver_constants.TEST_IMAGE_FILE])
_AddCrOSArtifact(artifact_info.AUTOTEST, AutotestTarball, AUTOTEST_FILE,
files_to_extract=None, exclude=['autotest/test_suites'])
_AddCrOSArtifact(artifact_info.CONTROL_FILES, BundledArtifact,
CONTROL_FILES_FILE)
_AddCrOSArtifact(artifact_info.AUTOTEST_PACKAGES, AutotestTarball,
AUTOTEST_PACKAGES_FILE)
_AddCrOSArtifact(artifact_info.TEST_SUITES, BundledArtifact, TEST_SUITES_FILE)
_AddCrOSArtifact(artifact_info.AU_SUITE, BundledArtifact, AU_SUITE_FILE)
_AddCrOSArtifact(artifact_info.AUTOTEST_SERVER_PACKAGE, Artifact,
AUTOTEST_SERVER_PACKAGE_FILE)
_AddCrOSArtifact(artifact_info.FIRMWARE, Artifact, FIRMWARE_FILE)
_AddCrOSArtifact(artifact_info.SYMBOLS, BundledArtifact, DEBUG_SYMBOLS_FILE,
files_to_extract=['debug/breakpad'])
_AddCrOSArtifact(artifact_info.SYMBOLS_ONLY, BundledArtifact,
DEBUG_SYMBOLS_ONLY_FILE,
files_to_extract=['debug/breakpad'])
_AddCrOSArtifact(artifact_info.FACTORY_IMAGE, BundledArtifact, FACTORY_FILE,
files_to_extract=[devserver_constants.FACTORY_IMAGE_FILE])
_AddCrOSArtifact(artifact_info.FACTORY_SHIM_IMAGE, BundledArtifact,
FACTORY_SHIM_FILE,
files_to_extract=[devserver_constants.FACTORY_SHIM_IMAGE_FILE])
_AddCrOSArtifact(artifact_info.QUICK_PROVISION, MultiArtifact,
QUICK_PROVISION_FILE)
# Add all the paygen_au artifacts in one go.
for c in devserver_constants.CHANNELS:
_AddCrOSArtifact(artifact_info.PAYGEN_AU_SUITE_TEMPLATE % {'channel': c},
BundledArtifact,
PAYGEN_AU_SUITE_FILE_TEMPLATE % {'channel': c})
#### Libiota Artifacts ####
_AddCrOSArtifact(artifact_info.LIBIOTA_TEST_BINARIES, Artifact,
LIBIOTA_TEST_BINARIES_FILE)
_AddCrOSArtifact(artifact_info.LIBIOTA_BOARD_UTILS, Artifact,
LIBIOTA_BOARD_UTILS_FILE)
android_artifact_map = {}
def _AddAndroidArtifact(tag, base, name, *fixed_args, **fixed_kwargs):
"""Add a data wrapper for android artifacts.
Add a data wrapper that describes an Android artifact's implementation to
android_artifact_map.
"""
artifact = _CreateNewArtifact(tag, base, name, *fixed_args, **fixed_kwargs)
android_artifact_map.setdefault(tag, []).append(artifact)
_AddAndroidArtifact(artifact_info.ANDROID_ZIP_IMAGES, Artifact,
ANDROID_IMAGE_ZIP, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_RADIO_IMAGE, Artifact,
ANDROID_RADIO_IMAGE)
_AddAndroidArtifact(artifact_info.ANDROID_BOOTLOADER_IMAGE, Artifact,
ANDROID_BOOTLOADER_IMAGE)
_AddAndroidArtifact(artifact_info.ANDROID_FASTBOOT, Artifact, ANDROID_FASTBOOT)
_AddAndroidArtifact(artifact_info.ANDROID_TEST_ZIP, BundledArtifact,
ANDROID_TEST_ZIP, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_VENDOR_PARTITION_ZIP, Artifact,
ANDROID_VENDOR_PARTITION_ZIP, is_regex_name=True)
_AddAndroidArtifact(artifact_info.AUTOTEST_SERVER_PACKAGE, Artifact,
ANDROID_AUTOTEST_SERVER_PACKAGE, is_regex_name=True)
_AddAndroidArtifact(artifact_info.TEST_SUITES, BundledArtifact,
ANDROID_TEST_SUITES, is_regex_name=True)
_AddAndroidArtifact(artifact_info.CONTROL_FILES, BundledArtifact,
ANDROID_CONTROL_FILES, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_NATIVETESTS_ZIP, BundledArtifact,
ANDROID_NATIVETESTS_FILE, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_CONTINUOUS_NATIVE_TESTS_ZIP,
BundledArtifact,
ANDROID_CONTINUOUS_NATIVE_TESTS_FILE,
is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_CONTINUOUS_INSTRUMENTATION_TESTS_ZIP,
BundledArtifact,
ANDROID_CONTINUOUS_INSTRUMENTATION_TESTS_FILE,
is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_CTS_ZIP, BundledArtifact,
ANDROID_CTS_FILE)
_AddAndroidArtifact(artifact_info.ANDROID_TARGET_FILES_ZIP, Artifact,
ANDROID_TARGET_FILES_ZIP, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_DTB_ZIP, Artifact,
ANDROID_DTB_ZIP, is_regex_name=True)
_AddAndroidArtifact(artifact_info.ANDROID_PUSH_TO_DEVICE_ZIP,
Artifact, ANDROID_PUSH_TO_DEVICE_ZIP)
_AddAndroidArtifact(artifact_info.ANDROID_SEPOLICY_ZIP,
Artifact, ANDROID_SEPOLICY_ZIP)
class BaseArtifactFactory(object):
"""A factory class that generates build artifacts from artifact names."""
def __init__(self, artifact_map, download_dir, artifacts, files, build,
requested_to_optional_map):
"""Initalizes the member variables for the factory.
Args:
artifact_map: A map from artifact names to ImplDescription objects.
download_dir: A directory to which artifacts are downloaded.
artifacts: List of artifacts to stage. These artifacts must be
defined in artifact_info.py and have a mapping in the
ARTIFACT_IMPLEMENTATION_MAP.
files: List of files to stage. These files are just downloaded and staged
as files into the download_dir.
build: The name of the build.
requested_to_optional_map: A map between an artifact X to a list of
artifacts Y. If X is requested, all items in Y should also get
triggered for download.
"""
self.artifact_map = artifact_map
self.download_dir = download_dir
self.artifacts = artifacts
self.files = files
self.build = build
self.requested_to_optional_map = requested_to_optional_map
def _Artifacts(self, names, is_artifact):
"""Returns the Artifacts from |names|.
If is_artifact is true, then these names define artifacts that | |
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.update_zoom_plot(dock_name,relatedTo)
elif self.docks[dock_name]['type'] == 'plot2D':
self.create_PlotWidget(dock_name)
# Attribution of the curves to the plots
flag = 0
self.docks[dock_name]['curve'] = {}
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
# if element of 'dock' (variables/observables) is a dict
for element_variable_dock in self.variables[variable]['dock']:
if isinstance(element_variable_dock,dict):
if dock_name in element_variable_dock.keys():
for real_dock_name in element_variable_dock.keys():
# assert only two variables to plot
assert len(element_variable_dock[real_dock_name]) == 2, f"list of variables/observables to plot on {real_dock_name} with dock type 'plot2D' must be exactly of length 2, provided was {len(element_variable_dock[real_dock_name])}"
list_variables_to_plot = element_variable_dock[real_dock_name]
# assert variables provided do exist
for variables_to_plot in list_variables_to_plot:
assert variables_to_plot in self.variables.keys() or variables_to_plot in self.params.keys(),f"variable '{variables_to_plot}' in 'dock' key of variable '{variable}' (variables/observables/params dictionnary) not understood. Must be in {list(dict(self.variables, **self.params).keys())}"
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)] = {}
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['curve'] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['variables_to_plot'] = list_variables_to_plot
flag += 1
else:
# Check validity of the provided dock_names
for real_dock_name in element_variable_dock.keys():
if real_dock_name not in self.docks.keys():
if [variable,element_variable_dock] not in self.warning_observables_docks:
self.warning_observables_docks.append([variable,element_variable_dock]) # to throw error only once
print(f"WARNING: check validity of dock_names you provided in the variables/observable dictionnary: {list(element_variable_dock.keys())}'")
if flag == 0: # Nothing plotted on the 'plot2D'
print(f"WARNING: nothing has been plotted on the 'plot2D' dock with name '{dock_name}'")
if 'zoomOf' in self.docks[dock_name].keys():
pass
elif self.docks[dock_name]['type'] == 'image':
self.create_ImageView(dock_name)
self.docks[dock_name]['actual_plot'].keyPressEvent = self.keyPressEvent
#self.docks[dock_name]['actual_plot'].enableAutoRange('xy', True)
########################## END figure layout and docks ##########################
############################ BEGIN Trees declaration ############################
# Variables Tree
self.tree = self.ui.tree
self.tree.setColumnCount(3)
self.tree.keyPressEvent = self.keyPressEvent # allow keys catching for focus on trees
self.tree.setHeaderLabels(['Variables','IC','plot'])
flag = 0
for variable in self.variables.keys():
temp = pg.TreeWidgetItem([variable])
temp.setForeground(0,QtGui.QBrush(QtGui.QColor(self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['hex'])))
# Create linedit (variables only)
if not self.variables[variable]['observable']:
self.variables[variable]['lineedit'] = QtGui.QLineEdit()
temp.setWidget(1, self.variables[variable]['lineedit'])
self.variables[variable]['lineedit'].setText(str(self.variables[variable]['value'][-1])) # set initial value
self.variables[variable]['lineedit'].returnPressed.connect(partial(self.update_lineedit_variable,variable))
# Create checkbox
self.variables[variable]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(2, self.variables[variable]['checkbox'])
self.tree.addTopLevelItem(temp)
self.variables[variable]['checkbox'].setChecked(self.variables[variable]['plot']) # set initial state
self.variables[variable]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.variables[variable]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,variable)) # connect checkbox
flag += 1
# Params Tree
self.tree_params = self.ui.tree_params
self.tree_params.setColumnCount(4)
self.tree_params.keyPressEvent = self.keyPressEvent
self.tree_params.setHeaderLabels(['Params','plot','value','slider'])
self.spinbox_precision = 3
for param in self.params.keys():
self.params[param]['slider_conversion_factor'] = int(1./self.params[param]['step']) # To test was: 5000 *10000
temp = pg.TreeWidgetItem([param])
# Spin boxes
self.params[param]['spinbox'] = QtGui.QDoubleSpinBox()
self.params[param]['spinbox'].setRange(self.params[param]['min'],self.params[param]['max'])
self.params[param]['spinbox'].setSingleStep(self.params[param]['step'])
if isinstance(self.params[param]['step'],int):
self.params[param]['spinbox'].setDecimals(0)
else:
self.params[param]['spinbox'].setDecimals(self.spinbox_precision)
temp.setWidget(2, self.params[param]['spinbox'])
self.tree_params.addTopLevelItem(temp)
self.params[param]['spinbox'].setValue(self.params[param]['value'][-1])
self.params[param]['spinbox'].setKeyboardTracking(False) # emit signal only when enter is pressed
self.params[param]['spinbox'].valueChanged.connect(partial(self.update_slider_params,param))
# Sliders
self.params[param]['slider'] = QtGui.QSlider()
self.params[param]['slider'].setRange(int(self.params[param]['min']*self.params[param]['slider_conversion_factor']),int(self.params[param]['max']*self.params[param]['slider_conversion_factor']))
self.params[param]['slider'].setSingleStep(1) # integers only
self.params[param]['slider'].setOrientation(QtCore.Qt.Orientation.Horizontal) # horizontale
temp.setWidget(3, self.params[param]['slider'])
self.tree.addTopLevelItem(temp)
value = np.round(self.params[param]['value'][-1]*self.params[param]['slider_conversion_factor'],self.spinbox_precision) # convert in slider integer unit
self.params[param]['slider'].setValue(int(value))
self.params[param]['slider'].valueChanged.connect(partial(self.update_spinbox_params,param))
# Create checkbox
self.params[param]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(1, self.params[param]['checkbox'])
self.tree.addTopLevelItem(temp)
self.params[param]['checkbox'].setChecked(self.params[param]['plot']) # set initial state
self.params[param]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.params[param]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,param)) # connect checkbox
flag += 1
# Kernel Tree
self.tree_kernels = self.ui.tree_kernels
self.tree_kernels.setColumnCount(2)
self.tree_kernels.keyPressEvent = self.keyPressEvent
self.tree_kernels.setHeaderLabels(['Kernels',''])
# Create a group of buttons to allow "exclusive" behavior
self.group_buttons_kernels = QtGui.QButtonGroup()
self.group_buttons_kernels.setExclusive(True)
for kernel in self.kernels.keys():
self.kernels[kernel]['checkbox'] = QtGui.QCheckBox()
self.group_buttons_kernels.addButton(self.kernels[kernel]['checkbox'], 1)
temp = pg.TreeWidgetItem([kernel])
temp.setWidget(1, self.kernels[kernel]['checkbox'])
self.tree_kernels.addTopLevelItem(temp)
if kernel == self.kernel:
self.kernels[kernel]['checkbox'].setChecked(True) # set initial state
self.kernels[kernel]['checkbox'].keyPressEvent = self.keyPressEvent
self.group_buttons_kernels.buttonClicked.connect(self.update_checkbox_kernel)
############################# END Trees declaration ############################
# Start showing the window
self.show()
# Connect timer to update the figure
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.run_simulator)
self.timer.start(10)
# Initial window states
if not self.streaming: self.timer.stop(); self.run_simulator()
self.update_pause_indicator()
self.update_record_state_indicator()
# If starts recording from beginning
if self.record_state:
self.toggle_record_state()
self.keyPressEvent("r")
self.t = 0
################################ BEGIN plots update ###################################
def update_zoom_plot(self,dock_name,relatedTo):
self.docks[dock_name]['actual_plot'].setXRange(*self.docks[relatedTo]['region'][dock_name].getRegion(), padding=0)
def update_xzoom_region(self,dock_name,relatedTo):
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.docks[relatedTo]['region'][dock_name].setRegion(self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[0])
def update_plots(self):
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'plot1D':
for variable in self.variables.keys():
if self.variables[variable]['plot']:
if 'dock' in self.variables[variable].keys():
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
else:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
for param in self.params.keys():
if self.params[param]['plot']:
if 'dock' in self.params[param].keys():
if dock_name in self.params[param]['dock']:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
else:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
elif self.docks[dock_name]['type'] == 'plot2D':
# plot the variable names that are pre stored in dock dict
for curve2D in self.docks[dock_name]['curve']:
# if there is a param in the list
list_params_in_variables_provided = [i for i in self.docks[dock_name]['curve'][curve2D]['variables_to_plot'] if i in list(self.params.keys())]
if len(list_params_in_variables_provided)==1:
param_provided = list_params_in_variables_provided[0]
index_param_provided = self.docks[dock_name]['curve'][curve2D]['variables_to_plot'].index(param_provided)
index_variable_provided = list(set([0,1]) - set([index_param_provided]))
if self.variables[curve2D.split('_plot2D_')[0]]['plot']:
if index_param_provided == 0:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif index_param_provided == 1:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
# no params provided
else:
# if variables specified, index 0 is to be plot
if self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['plot']:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif self.docks[dock_name]['type'] == 'image':
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
if self.variables[variable]['plot']:
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['actual_plot'].setImage(self.variables[variable]['value'])
# Update fps_label
self.update_fps_label()
def run_simulator(self,nstep_update_plot=None):
if not nstep_update_plot: nstep_update_plot = self.nstep_update_plot
# Calculation
for i in range(nstep_update_plot):
self.simulator()
# If recording
if self.record_state and (self.nstep%self.nstep_record == 0): # record every self.nstep_record
self.append_to_dataframe()
# Update main plots every nstep_update_plot (last occurence of the loop)
if i==nstep_update_plot-1:
self.update_plots()
# Update time_stamp and parameter dict last (then saved correspond to calculation)
self.time_stamp[:-1] = self.time_stamp[1:]
self.time_stamp[-1] += self.step_size
self.nstep += 1
for param in self.params.keys():
self.params[param]['value'][:-1] = self.params[param]['value'][1:]
# Fix app freezing on Windows systems (if event occurs must process it)
QtCore.QCoreApplication.processEvents()
################################# END plots update ###################################
def keyPressEvent(self, event):
""" Set keyboard interactions """
try: key = event.text()
except: key = event # allow calling keys programatically
if key in list(self.user_defined_keyPressEvent.keys()): # Interprete keys defined user file
self.user_defined_keyPressEvent[key](self,{key:value['value'] for (key,value) in self.variables.items()},{key:value['value'][-1] for (key,value) in self.params.items()})
elif key == ' ':
self.toggle_streaming()
elif key == 'q':
sys.exit()
elif key == 'h':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming()
self.display_help()
if previous_streaming_state: self.toggle_streaming()
elif key == 's' or key == 'r':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming() # pause it
if key=='s': self.save() # query filename and save initial screenshot
elif key=='r':
if not self.record_state:
self.save(record=True)
else:
self.toggle_record_state()
self.save_screenshot(self.filename_to_record_no_ext+'_END.png')
self.save_appended_dataframe()
self.filename_to_record_no_ext = None
if previous_streaming_state: self.toggle_streaming()
elif key == 'i':
self.change_ICs_variable()
elif key == 'c':
self.update_images_colormap()
else:
if key != "" and event.key() != QtCore.Qt.Key_Return:
print(f'Keyboard event "{key}" not None')
def create_PlotWidget(self,dock_name):
self.docks[dock_name]['actual_plot'] = pg.PlotWidget(**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def create_ImageView(self,dock_name):
# Item for displaying image data
pl = pg.PlotItem() # to get axis
img = pg.ImageItem(axisOrder='row-major') # to rotate 90 degree
# Create an ImageView Widget
self.docks[dock_name]['actual_plot'] = pg.ImageView(view=pl,imageItem=img,**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
# Set initial states
self.docks[dock_name]['actual_plot'].view.invertY(False)
self.docks[dock_name]['actual_plot'].view.setAspectLocked(False)
self.docks[dock_name]['actual_plot'].view.disableAutoRange(True)
self.docks[dock_name]['actual_plot'].ui.menuBtn.hide()
#self.docks[dock_name]['actual_plot'].ui.menuBtn.show()
#self.docks[dock_name]['actual_plot'].ui.histogram.hide()
#self.docks[dock_name]['actual_plot'].ui.roiBtn.hide()
# Set colormap to be used
gradient = Gradients[self.colormaps_list[self.flag_colormaps]]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def add_dock(self,dock_name):
''' Add a dock to the main window '''
if 'relativeTo' in self.docks[dock_name].keys():
relativeto_dock_name = self.docks[dock_name]['relativeTo']
assert 'dock' in self.docks[relativeto_dock_name].keys(), f"Dock '{relativeto_dock_name}' not understood. Docks that are 'relativeTo' another must be defined after it in the dictionnary of docks for consistent behavior"
self.docks[dock_name]['region'] = {} # 'region' key to be used later
self.docks[dock_name]['dock'] = Dock(dock_name, size=self.docks[dock_name]['size'], closable=True)
self.main_dock_area.addDock(**{key:value for key,value in self.docks[dock_name].items() if key in ['dock','position','relativeTo']}) # key used: 'dock', 'position' and 'relativeTo'
def repaint_all_plots(self):
for dock_name in self.docks.keys():
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].repaint()
def toggle_streaming(self):
self.streaming = not(self.streaming)
self.update_pause_indicator()
def update_pause_indicator(self):
if self.streaming:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['g']['hex'],(0,0,0)))
self.ui.run_label.setText(' Run ')
self.timer.start(10)
else:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
self.ui.run_label.setText(' Stop ')
self.timer.stop()
self.ui.run_label.repaint()
def update_images_colormap(self):
self.flag_colormaps += 1
cmap_name = self.colormaps_list[np.mod(self.flag_colormaps,len(self.colormaps_list))]
gradient = Gradients[cmap_name]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'image':
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.repaint_all_plots()
def update_record_state_indicator(self):
if self.record_state:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
else:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000','#000000','#000000'))
self.ui.record_label.repaint()
def update_ICs_button(self):
for variable in self.variables.keys():
if not self.variables[variable]['observable']:
value = np.array(self.variables[variable]['init_cond']).astype(self.variables[variable]['type']) # convert to array to be able to astype
self.variables[variable]['lineedit'].setText(str(value)) # set initial value
self.variables[variable]['value'] = self.variables[variable]['init_cond'] * np.ones(self.array_size).astype(self.variables[variable]['type'])
def display_help(self):
# Message must be a list of each line to display
text_help_dialog = ['Important Notes:','- (keyboard keys) do not work when focus is given to lineedits or spinboxes','- ("image" plots) you must pause to modify the aspect ratio, zoom or histogram range']
text_help_dialog += ['']
text_help_dialog += ['Usable keyboard keys:','- " ": toggle run/stop','- "q": close the window','- | |
<gh_stars>1-10
import os
import skimage
import numpy as np
import platform
import cv2
import time
import csv
from skimage import io, morphology, exposure, filters, measure, util, segmentation
from scipy import signal, ndimage
from itertools import zip_longest
from .common import imimposemin
from .cellseg import ReSegCells, NucCountBatch, stromal_nuclei_segmentation
from .cellshape import cell_shape_images, CellShapeAutoencoder
from .quant import MxIF_quantify, MxIF_quantify_stroma
def bwareafilt(image, range):
# filter objects by area
out = image.copy()
img_label = measure.label(image, connectivity=1)
component_sizes = np.bincount(img_label.ravel())
too_large = component_sizes > range[1]
too_small = component_sizes < range[0]
too_large_mask = too_large[img_label]
too_small_mask = too_small[img_label]
out[too_large_mask] = 0
out[too_small_mask] = 0
return out
def blurimg2_batch(nuc):
# find and exclude blurred regions from dapi image
nuc = np.uint8(
(nuc.astype(float) - float(np.amin(nuc[:])))
/ (float(np.amax(nuc[:])) - float(np.amin(nuc[:])))
* 255
)
nuc = exposure.equalize_adapthist(nuc)
edge = filters.sobel(nuc) # matlab uses threshold of 0.04
se = morphology.disk(20)
closed = morphology.binary_closing(edge, se)
return closed
def MaskFiltration(mask, low):
# filter mask
label_img = measure.label(mask)
properties = measure.regionprops(label_img)
areas = [prop.area for prop in properties]
filtMask = bwareafilt(
mask,
[(min(areas) + low * (max([area - min(areas) for area in areas]))), max(areas)],
)
return filtMask
def ML_probability(probs, LowAreaLim, thresh):
# process probability masks
tm = probs[:, :, 0] # get red or epithelial channel from epithelium
out = np.zeros(tm.shape, np.double)
tm = cv2.normalize(
tm, out, 1.0, 0.0, cv2.NORM_MINMAX, dtype=cv2.CV_64F
) # normalize red channel to grayscale
tm = tm > thresh # thresholding
if not np.amax(tm):
return tm
# morphological filtering to smooth edges
new = morphology.remove_small_objects(tm, 500)
if not np.amax(new):
return
new = morphology.binary_opening(new, morphology.disk(5))
new = morphology.binary_erosion(new, morphology.disk(3))
new = morphology.binary_closing(new, morphology.disk(7))
# more filtering
new = MaskFiltration(new, LowAreaLim)
new = morphology.binary_erosion(new, morphology.disk(3))
new = morphology.remove_small_holes(new, 1000)
new = morphology.binary_opening(new, morphology.disk(3))
new = morphology.binary_closing(new, morphology.disk(3))
new = morphology.remove_small_objects(new, 500)
new = morphology.dilation(new, morphology.disk(4))
return new
def SegDirFormatting(Directory):
"""
Create and format data structure of single cell segmentation
Parameters
----------
Directory : directory for slide containing AFRemoved images folder and
Registered images folder - assumes Round 001 is baseline and all fies are
tif format
Returns
-------
Tuple containing:
AFRemoved = string for location of AFRemoved images
DAPI =string for location of DAPI images
OutDir = cell array of strings for the output files
"""
OutDir = []
if platform.system() == "Windows":
AFRemoved = Directory + r"\AFRemoved"
DAPI = Directory + r"\RegisteredImages\S001"
Seg = Directory + r"\SegQuants"
if not os.path.exists(Seg):
os.makedirs(Seg)
if not os.path.exists(Seg + r"\Stacks"):
os.makedirs(Seg + r"\Stacks")
if not os.path.exists(Seg + r"\CellSeg"):
os.makedirs(Seg + r"\CellSeg")
if not os.path.exists(Seg + r"\CellSegFinal"):
os.makedirs(Seg + r"\CellSegFinal")
if not os.path.exists(Seg + r"\EpiMask"):
os.makedirs(Seg + r"\EpiMask")
if not os.path.exists(Seg + r"\Novlp"):
os.makedirs(Seg + r"\Novlp")
if not os.path.exists(Seg + r"\NucMask"):
os.makedirs(Seg + r"\NucMask")
if not os.path.exists(Seg + r"\SuperMem"):
os.makedirs(Seg + r"\SuperMem")
if not os.path.exists(Seg + r"\MemMask"):
os.makedirs(Seg + r"\MemMask")
if not os.path.exists(Seg + r"\NucMaskFinal"):
os.makedirs(Seg + r"\NucMaskFinal")
if not os.path.exists(Seg + r"\PosStats"):
os.makedirs(Seg + r"\PosStats")
if not os.path.exists(Seg + r"\ML"):
os.makedirs(Seg + r"\ML")
if not os.path.exists(Seg + r"\TumorMask"):
os.makedirs(Seg + r"\TumorMask")
if not os.path.exists(Seg + r"\CellShape"):
os.makedirs(Seg + r"\CellShape")
OutDir = [
(Seg + "\\" + file + "\\")
for file in os.listdir(Seg)
if not file.startswith(".")
]
else:
AFRemoved = Directory + r"/AFRemoved"
DAPI = Directory + r"/RegisteredImages/S001"
Seg = Directory + r"/SegQuants"
if not os.path.exists(Seg):
os.makedirs(Seg)
if not os.path.exists(Seg + r"/Stacks"):
os.makedirs(Seg + r"/Stacks")
if not os.path.exists(Seg + r"/CellSeg"):
os.makedirs(Seg + r"/CellSeg")
if not os.path.exists(Seg + r"/CellSegFinal"):
os.makedirs(Seg + r"/CellSegFinal")
if not os.path.exists(Seg + r"/EpiMask"):
os.makedirs(Seg + r"/EpiMask")
if not os.path.exists(Seg + r"/Novlp"):
os.makedirs(Seg + r"/Novlp")
if not os.path.exists(Seg + r"/NucMask"):
os.makedirs(Seg + r"/NucMask")
if not os.path.exists(Seg + r"/SuperMem"):
os.makedirs(Seg + r"/SuperMem")
if not os.path.exists(Seg + r"/MemMask"):
os.makedirs(Seg + r"/MemMask")
if not os.path.exists(Seg + r"/NucMaskFinal"):
os.makedirs(Seg + r"/NucMaskFinal")
if not os.path.exists(Seg + r"/PosStats"):
os.makedirs(Seg + r"/PosStats")
if not os.path.exists(Seg + r"/ML"):
os.makedirs(Seg + r"/ML")
if not os.path.exists(Seg + r"/TumorMask"):
os.makedirs(Seg + r"/TumorMask")
if not os.path.exists(Seg + r"/CellShape"):
os.makedirs(Seg + r"/CellShape")
OutDir = [
(Seg + r"/" + file + r"/")
for file in os.listdir(Seg)
if not file.startswith(".")
]
return (AFRemoved, DAPI, OutDir)
def CellSeg(SlideDir, quantify, shape, stroma, tumor, start):
"""
Wrapper for cell segmentation
Parameters
----------
SlideDir : directory for slide containing AFRemoved folder and Registered
images folder - assumes round 001 is baseline and all files are .tif
quantify : whether or not to quantify, 1=yes, 0=no
shape : whether or not to characterize shape, 1=yes, 0=no
stroma : whether or not to segment stroma, 1=yes, 0=no
tumor : whether or not to include tumors, 1=yes, 0=no
start : what image to start processing
Returns
-------
None - function saves images and quantifications.
"""
# Parse Direcotry supplied for cell segmentation
(AFRemoved, DAPI, OutDir) = SegDirFormatting(SlideDir)
# get formatting for AfRemoved, DAPI, and output directories
AFFiles = os.listdir(AFRemoved)
AFList = []
for file in AFFiles:
AFList.append(file.split("_AFRemoved_"))
AFList = np.asarray(AFList)
AFList = np.resize(AFList, (95, 1, 2))
PosList = np.unique(AFList[:, :, 1])
AFList = np.unique(AFList[:, :, 0]) # list of markers
PosList = np.char.replace(PosList, ".tif", "") # list of positions
OutPos = PosList
# Format DAPI images for Cytell based imaging
DapiList = sorted(DAPI + "/" + element for element in os.listdir(DAPI))
# make sure the number of DAPI images equals the number of positions
if len(DapiList) != len(PosList):
print("Error: Dapi Image Mismatch")
return
OutDir = sorted(OutDir)
# status updates
print("Segmentation of:", SlideDir, ";", str(len(PosList)), " Positions;\n")
# Segmentation and Quantification for each position
for i in range(start, len(PosList)):
print(f"{OutPos[i]}:")
# make Stacks of AFRemoved images and Dapi if they don't exist
if not os.path.exists(f"{OutDir[10]}{OutPos[i]}_stack.tif"):
print(f"Stack: {OutPos[i]}")
# form tif image stack for each position with images from each marker
# io.imsave(f"{OutDir[10]}{OutPos[i]}_stack.tif", io.imread(DapiList[i]))
stack = []
stack.append(io.imread(DapiList[i]))
for j in range(
len(AFList)
): # loop through AFRemoved images and append to tiff stack
stack.append(
io.imread(f"{AFRemoved}/{AFList[j]}_AFRemoved_{OutPos[i]}.tif")
)
stack = np.asarray(stack)
io.imsave(f"{OutDir[10]}{OutPos[i]}_stack.tif", stack)
# Check for probability files
if not os.path.exists(f"{OutDir[4]}epi_{OutPos[i]}_stack_Probabilities.png"):
print("No Epithelial Probability File")
continue
if not os.path.exists(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png"):
print("No Membrane/Nucleus Probabilty File")
continue
# nuclear segmentation and generate supermembrane and binary membrane mask
if not (
os.path.exists(f"{OutDir[7]}NucMask_{OutPos[i]}.png")
or os.path.exists(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
or os.path.exists(f"{OutDir[5]}MemMask_{OutPos[i]}.png")
):
# read in membrane probability file
Probs = io.imread(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png")
# threshold with nuclear probability >0.6 for nuclear mask
mask = np.where(Probs[:, :, 1] > 255 * 0.6, np.uint8(255), np.uint8(0))
io.imsave(f"{OutDir[7]}NucMask_{OutPos[i]}.png", mask)
io.imsave(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif", Probs[:, :, 0])
# thresholding for membrane mask
MemMask = np.where(Probs[:, :, 0] > 255 * 0.6, np.uint8(255), np.uint8(0))
io.imsave(f"{OutDir[5]}MemMask_{OutPos[i]}.png", MemMask)
else:
# read files if previously generated
mask = io.imread(f"{OutDir[7]}NucMask_{OutPos[i]}.png")
SuperMem = io.imread(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
MemMask = io.imread(f"{OutDir[5]}MemMask_{OutPos[i]}.png")
mask = np.where(mask > 0, np.uint8(1), np.uint8(0)) # make nuclear mask binary
# fill in small holes and smooth
mask = morphology.remove_small_holes(mask, 20 ** 3)
selem = morphology.disk(3)
mask = morphology.binary_opening(mask, selem)
# remove blurred nuclear regions
mask = np.multiply(mask, blurimg2_batch(io.imread(DapiList[i])))
s = mask.shape
pixadj = 1
if s[0] != 2048 or s[1] != 2048:
pixadj = 3
# generate epithelial mask from machine learning
if not (os.path.exists(OutDir[3] + "EpiMask_" + OutPos[i] + ".png")):
print("EpiMask Processing: ")
epiMask = io.imread(
OutDir[4] + "epi_" + OutPos[i] + "_stack_Probabilities.png"
)
epiMask = ML_probability(
epiMask, pixadj * 0.01, 0.45
) # create epi mask from probability map
io.imsave(
OutDir[3] + "EpiMask_" + OutPos[i] + ".png",
255 * np.array(epiMask, dtype=np.uint8),
)
else:
epiMask = np.array(
io.imread(OutDir[3] + "EpiMask_" + OutPos[i] + ".png"), dtype=bool
)
# thin membrane borders prior to initial watershed
MemMask = morphology.thin(MemMask)
# generate cell (re)segmentation and nuclear segmentation images
if (not os.path.exists(f"{OutDir[8]}NucMaskFinal_{OutPos[i]}.png")) or (
not os.path.exists(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")
):
print("CellSeg;")
if not (os.path.exists(f"{OutDir[0]}L2_{OutPos[i]}.tif")):
L2 = np.array(np.add(util.invert(epiMask), MemMask), dtype=np.uint8)
# watershed segmentation with nuclei as basins
L2 = segmentation.watershed(imimposemin(L2, mask), watershed_line=True)
L2 = np.array(L2, dtype=np.float_)
# return cells only in epithelial mask
L2 = np.multiply(L2, epiMask)
io.imsave(f"{OutDir[0]}L2_{OutPos[i]}.tif", np.int16(L2))
else:
L2 = io.imread(f"{OutDir[0]}L2_{OutPos[i]}.tif")
if not (os.path.exists(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")):
MemMask = np.array(
io.imread(f"{OutDir[5]}MemMask_{OutPos[i]}.png"), dtype=bool
)
start = time.time()
CellSeg = ReSegCells(L2, MemMask)
end = time.time()
print(end - start)
io.imsave(
f"{OutDir[0]}CellSeg_{OutPos[i]}.tif",
np.array(CellSeg, dtype=np.int16),
)
else:
CellSeg = io.imread(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")
if not (os.path.exists(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")):
CellSeg = io.imread(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")
SuperMem = io.imread(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
Probs = io.imread(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png")
# check for cells with | |
lowercase alphanumeric characters, '-' or '.'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
"""
)
DNSLabelName = pydantic.constr(
strip_whitespace=True,
min_length=1,
max_length=63,
regex="^[0-9a-zA-Z]([0-9a-zA-Z-])*[0-9A-Za-z]$",
)
DNSLabelName.__doc__ = (
"""DNSLabelName models a Kubernetes DNS Label Name identified used to name some resource types.
Valid DNS Label Names conform to [RFC 1123](https://tools.ietf.org/html/rfc1123) and must:
* contain at most 63 characters
* contain only lowercase alphanumeric characters or '-'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
"""
)
ContainerTagName = pydantic.constr(
strip_whitespace=True,
min_length=1,
max_length=128,
regex="^[0-9a-zA-Z]([0-9a-zA-Z_\\.\\-/:@])*$",
) # NOTE: This regex is not a full validation
ContainerTagName.__doc__ = (
"""ContainerTagName models the name of a container referenced in a Kubernetes manifest.
Valid container tags must:
* be valid ASCII and may contain lowercase and uppercase letters, digits, underscores, periods and dashes.
* not start with a period or a dash
* may contain a maximum of 128 characters
"""
)
class EnvironmentConfiguration(servo.BaseConfiguration):
...
class CommandConfiguration(servo.BaseConfiguration):
...
class ContainerConfiguration(servo.BaseConfiguration):
"""
The ContainerConfiguration class models the configuration of an optimizeable container within a Kubernetes Deployment.
"""
name: ContainerTagName
alias: Optional[ContainerTagName]
command: Optional[str] # TODO: create model...
cpu: CPU
memory: Memory
env: Optional[List[str]] # (adjustable environment variables) TODO: create model...
static_environment_variables: Optional[Dict[str, str]]
class OptimizationStrategy(str, enum.Enum):
"""
OptimizationStrategy is an enumeration of the possible ways to perform optimization on a Kubernetes Deployment.
"""
default = "default"
"""The default strategy directly applies adjustments to the target Deployment and its containers.
"""
canary = "canary"
"""The canary strategy creates a servo managed standalone tuning Pod based on the target Deployment and makes
adjustments to it instead of the Deployment itself.
"""
class BaseOptimizationStrategyConfiguration(pydantic.BaseModel):
type: OptimizationStrategy = pydantic.Field(..., const=True)
def __eq__(self, other) -> bool:
if isinstance(other, OptimizationStrategy):
return self.type == other
return super().__eq__(other)
class Config:
extra = pydantic.Extra.forbid
class DefaultOptimizationStrategyConfiguration(BaseOptimizationStrategyConfiguration):
type = pydantic.Field(OptimizationStrategy.default, const=True)
class CanaryOptimizationStrategyConfiguration(BaseOptimizationStrategyConfiguration):
type = pydantic.Field(OptimizationStrategy.canary, const=True)
alias: Optional[ContainerTagName]
class FailureMode(str, enum.Enum):
"""
The FailureMode enumeration defines how to handle a failed adjustment of a Kubernetes resource.
"""
rollback = "rollback"
shutdown = "shutdown"
ignore = "ignore"
exception = "exception"
destroy = "destroy" # deprecated, but accepted as "shutdown"
@classmethod
def options(cls) -> List[str]:
"""
Return a list of strings that identifies all failure mode configuration options.
"""
return list(map(lambda mode: mode.value, cls.__members__.values()))
class PermissionSet(pydantic.BaseModel):
"""Permissions objects model Kubernetes permissions granted through RBAC."""
group: str
resources: List[str]
verbs: List[str]
STANDARD_PERMISSIONS = [
PermissionSet(
group="apps",
resources=["deployments", "replicasets"],
verbs=["get", "list", "watch", "update", "patch"],
),
PermissionSet(
group="",
resources=["namespaces"],
verbs=["get"],
),
PermissionSet(
group="",
resources=["pods", "pods/logs", "pods/status"],
verbs=["create", "delete", "get", "list", "watch"],
),
]
ROLLOUT_PERMISSIONS = [
PermissionSet(
group="argoproj.io",
resources=["rollouts", "rollouts/status"],
verbs=["get", "list", "watch", "update", "patch"],
),
]
class BaseKubernetesConfiguration(servo.BaseConfiguration):
"""
BaseKubernetesConfiguration provides a set of configuration primitives for optimizable Kubernetes resources.
Child classes of `BaseKubernetesConfiguration` such as the `DeploymentConfiguration` can benefit from
the cascading configuration behavior implemented on the `KubernetesConfiguration` class.
Common settings will be cascaded from the containing class for attributes if they have not been explicitly set
and are equal to the default value. Settings that are mandatory in the superclass (such as timeout and namespace)
but are available for override should be declared as optional on `BaseKubernetesConfiguration` and overridden and
declared as mandatory in `BaseKubernetesConfiguration`'.
"""
kubeconfig: Optional[pydantic.FilePath] = pydantic.Field(
description="Path to the kubeconfig file. If `None`, use the default from the environment.",
)
context: Optional[str] = pydantic.Field(description="Name of the kubeconfig context to use.")
namespace: Optional[DNSSubdomainName] = pydantic.Field(
description="Kubernetes namespace where the target deployments are running.",
)
settlement: Optional[servo.Duration] = pydantic.Field(
description="Duration to observe the application after an adjust to ensure the deployment is stable. May be overridden by optimizer supplied `control.adjust.settlement` value."
)
on_failure: FailureMode = pydantic.Field(
FailureMode.exception,
description=f"How to handle a failed adjustment. Options are: {servo.utilities.strings.join_to_series(list(FailureMode.__members__.values()))}",
)
timeout: Optional[servo.Duration] = pydantic.Field(
description="Time interval to wait before considering Kubernetes operations to have failed."
)
@pydantic.validator("on_failure")
def validate_failure_mode(cls, v):
if v == FailureMode.destroy:
servo.logger.warning(f"Deprecated value 'destroy' used for 'on_failure', replacing with 'shutdown'")
return FailureMode.shutdown
return v
StrategyTypes = Union[
OptimizationStrategy,
DefaultOptimizationStrategyConfiguration,
CanaryOptimizationStrategyConfiguration,
]
class DeploymentConfiguration(BaseKubernetesConfiguration):
"""
The DeploymentConfiguration class models the configuration of an optimizable Kubernetes Deployment.
"""
name: DNSSubdomainName
containers: List[ContainerConfiguration]
strategy: StrategyTypes = OptimizationStrategy.default
replicas: servo.Replicas
class RolloutConfiguration(BaseKubernetesConfiguration):
"""
The RolloutConfiguration class models the configuration of an optimizable Argo Rollout.
"""
name: DNSSubdomainName
containers: List[ContainerConfiguration]
strategy: StrategyTypes = OptimizationStrategy.canary
replicas: servo.Replicas
class KubernetesConfiguration(BaseKubernetesConfiguration):
namespace: DNSSubdomainName = DNSSubdomainName("default")
timeout: servo.Duration = "5m"
permissions: List[PermissionSet] = pydantic.Field(
STANDARD_PERMISSIONS,
description="Permissions required by the connector to operate in Kubernetes.",
)
deployments: Optional[List[DeploymentConfiguration]] = pydantic.Field(
description="Deployments to be optimized.",
)
rollouts: Optional[List[RolloutConfiguration]] = pydantic.Field(
description="Argo rollouts to be optimized.",
)
@pydantic.root_validator
def check_deployment_and_rollout(cls, values):
if (not values.get('deployments')) and (not values.get('rollouts')):
raise ValueError("No optimization target(s) were specified")
return values
@classmethod
def generate(cls, **kwargs) -> "KubernetesConfiguration":
return cls(
namespace="default",
description="Update the namespace, deployment, etc. to match your Kubernetes cluster",
deployments=[
DeploymentConfiguration(
name="app",
replicas=servo.Replicas(
min=1,
max=2,
),
containers=[
ContainerConfiguration(
name="opsani/fiber-http:latest",
cpu=CPU(min="250m", max=4, step="125m"),
memory=Memory(min="256MiB", max="4GiB", step="128MiB"),
)
],
)
],
**kwargs,
)
def __init__(self, *args, **kwargs) -> None: # noqa: D107
super().__init__(*args, **kwargs)
self.cascade_common_settings()
def cascade_common_settings(self, *, overwrite: bool = False) -> None:
"""
Apply common settings to child models that inherit from BaseKubernetesConfiguration.
This method provides enables hierarchical overrides of common configuration values
based on shared inheritance. Each attribute is introspected and if it inherits from
`BaseKubernetesConfiguration`, any common attribute values are copied onto the child
model, cascading them downward. Only attributes whose value is equal to the default
and have not been explicitly set are updated.
# FIXME: Cascaded settings should only be optional if they can be optional at the top level. Right now we are implying that namespace can be None as well.
"""
for name, field in self.__fields__.items():
if issubclass(field.type_, BaseKubernetesConfiguration):
attribute = getattr(self, name)
for obj in (
attribute if isinstance(attribute, Collection) else [attribute]
):
# don't cascade if optional and not set
if obj is None:
continue
for (
field_name,
field,
) in BaseKubernetesConfiguration.__fields__.items():
if field_name in servo.BaseConfiguration.__fields__:
# don't cascade from the base class
continue
if field_name in obj.__fields_set__ and not overwrite:
self.logger.trace(
f"skipping config cascade for field '{field_name}' set with value '{getattr(obj, field_name)}'"
)
continue
current_value = getattr(obj, field_name)
if overwrite or current_value == field.default:
parent_value = getattr(self, field_name)
setattr(obj, field_name, parent_value)
self.logger.trace(
f"cascaded setting '{field_name}' from KubernetesConfiguration to child '{attribute}': value={parent_value}"
)
else:
self.logger.trace(
f"declining to cascade value to field '{field_name}': the default value is set and overwrite is false"
)
async def load_kubeconfig(self) -> None:
"""
Asynchronously load the Kubernetes configuration
"""
config_file = pathlib.Path(self.kubeconfig or kubernetes_asyncio.config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION).expanduser()
if config_file.exists():
await kubernetes_asyncio.config.load_kube_config(
config_file=str(config_file),
context=self.context,
)
elif os.getenv("KUBERNETES_SERVICE_HOST"):
kubernetes_asyncio.config.load_incluster_config()
else:
raise RuntimeError(
f"unable to configure Kubernetes client: no kubeconfig file nor in-cluser environment variables found"
)
KubernetesOptimizations.update_forward_refs()
DeploymentOptimization.update_forward_refs()
CanaryOptimization.update_forward_refs()
class KubernetesChecks(servo.BaseChecks):
"""Checks for ensuring that the Kubernetes connector is ready to run."""
config: KubernetesConfiguration
@servo.require("Connectivity to Kubernetes")
async def check_kubernetes_connectivity(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
await v1.get_code()
@servo.warn("Kubernetes version")
async def check_kubernetes_version(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
version = await v1.get_code()
assert int(version.major) >= 1
# EKS sets minor to "17+"
assert int(int("".join(c for c in version.minor if c.isdigit()))) >= 16
@servo.require("Required permissions")
async def check_kubernetes_permissions(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 = kubernetes_asyncio.client.AuthorizationV1Api(api)
required_permissions = self.config.permissions
if self.config.rollouts:
required_permissions.append(ROLLOUT_PERMISSIONS)
for permission in required_permissions:
for resource in permission.resources:
for verb in permission.verbs:
attributes = kubernetes_asyncio.client.models.V1ResourceAttributes(
namespace=self.config.namespace,
group=permission.group,
resource=resource,
verb=verb,
)
spec =kubernetes_asyncio.client.models.V1SelfSubjectAccessReviewSpec(
resource_attributes=attributes
)
review =kubernetes_asyncio.client.models.V1SelfSubjectAccessReview(spec=spec)
access_review = await v1.create_self_subject_access_review(
body=review
)
assert (
access_review.status.allowed
), f'Not allowed to "{verb}" resource "{resource}"'
@servo.require('Namespace "{self.config.namespace}" is readable')
async def check_kubernetes_namespace(self) -> None:
await Namespace.read(self.config.namespace)
@servo.multicheck('Deployment "{item.name}" is readable')
async def check_kubernetes_deployments(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_dep(dep_config: DeploymentConfiguration) -> None:
await Deployment.read(dep_config.name, dep_config.namespace)
return (self.config.deployments or []), check_dep
@servo.multicheck('Rollout "{item.name}" is readable')
async def check_kubernetes_rollouts(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rol(rol_config: RolloutConfiguration) -> None:
await Rollout.read(rol_config.name, rol_config.namespace)
return (self.config.rollouts or []), check_rol
async def _check_container_resource_requirements(
self,
target_controller: Union[Deployment, Rollout],
target_config: Union[DeploymentConfiguration, RolloutConfiguration]
) -> None:
for cont_config in target_config.containers:
container = target_controller.find_container(cont_config.name)
assert container, f"{type(target_controller).__name__} {target_config.name} | |
<filename>tools/hum_proto/hum_proto/apploop.py
# Copyright (C) 2017 by <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import errno
import os
import shlex
import socket
import sys
from prompt_toolkit import application
from prompt_toolkit import buffer
from prompt_toolkit import interface
from prompt_toolkit.layout import containers
from prompt_toolkit.layout import controls
from prompt_toolkit.layout import dimension
from prompt_toolkit import shortcuts
import six
from hum_proto import message
def _mksockerr(err):
"""
Construct a ``socket.error`` instance based on a specified socket
error number.
:param int err: A value from ``errno``.
:returns: The specified socket error.
:rtype: ``socket.error``
"""
return socket.error(err, os.strerror(err))
def connect(address):
"""
Create a connection to a specified address. The address may be a
local path, or it may be a host name, an IPv4 address, or an IPv6
address (optionally surrounded by square brackets). A port may be
specified by separating it from the host name or address with a
colon. If a port is not specified, it defaults to "7300".
:param str address: The address to connect to.
:returns: A connected socket.
"""
# Is it a path?
if '/' in address:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
sock.connect(address)
return sock
# OK, it must be a host and port
host, sep, port = address.rpartition(':')
if not sep or not port.isdigit():
host = address
port = 7300
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
# Connect to the host
lasterr = None
for family, socktype, proto, _canonname, sockaddr in socket.getaddrinfo(
host, port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
try:
sock = socket.socket(family, socktype, proto)
sock.connect(sockaddr)
return sock
except Exception:
# Failed; save the last error
lasterr = sys.exc_info()
# Failed to connect to a host
six.reraise(*lasterr)
def command(func_or_name=None, aliases=None):
"""
A decorator for marking ``ApplicationLoop`` methods that implement
particular commands. For instance, ``@command('spam')`` will mark
a method as implementing the "spam" command. If no name is given
or the argument left off, the method name will be used for the
command.
:param func_or_name: A callable or a command name.
:param list aliases: A list of aliases for the command.
:returns: If ``func_or_name`` is a callable, sets the
``_command_name`` attribute to the callable name, then
returns the callable. Otherwise, returns a function
decorator which sets ``_command_name`` appropriately.
"""
# The actual decorator
def decorator(func):
func._command_name = name or func.__name__
func._command_aliases = aliases or []
return func
# If it was a callable, use the function name
if callable(func_or_name):
name = None
return decorator(func_or_name)
# Return the decorator
name = func_or_name
return decorator
class ApplicationLoopMeta(type):
"""
A metaclass for the ``ApplicationLoop`` class. This metaclass
searches the namespace for methods decorated with the ``@command``
decorator and constructs a dictionary mapping command names to the
underlying functions.
"""
def __init__(cls, name, bases, namespace):
"""
Initialize a newly constructed class.
:param str name: The name of the new class.
:param tuple bases: A tuple of the class's base classes.
:param dict namespace: The new class's namespace.
"""
# Compile a dictionary of commands
commands = {}
# Search through the namespace values...
for val in namespace.values():
if callable(val) and hasattr(val, '_command_name'):
# We have a command!
commands[val._command_name] = val
# Also set up its aliases
for alias in val._command_aliases:
commands[alias] = val
# Save the commands
cls._commands = commands
@six.add_metaclass(ApplicationLoopMeta)
class ApplicationLoop(object):
"""
Core application loop for the Humboldt Protocol Analyzer.
"""
def __init__(self, sock, sslctx_cli, sslctx_srv):
"""
Initialize an ``ApplicationLoop`` instance.
:param sock: A connected socket.
:type sock: ``socket.socket``
:param sslctx_cli: An SSL context to use for client-side SSL
support.
:type sslctx_cli: ``hum_proto.ssl_utils.SSLContext``
:param sslctx_srv: An SSL context to use for server-side SSL
support.
:type sslctx_srv: ``hum_proto.ssl_utils.SSLContext``
"""
# Save the socket and SSL context
self.sock = sock
self.sslctx_cli = sslctx_cli
self.sslctx_srv = sslctx_srv
# The command line interface
self._cli = None
# Initialize the display buffer
self.display_buf = buffer.Buffer()
# Initialize the command buffer
self.command_buf = buffer.Buffer(
accept_action=buffer.AcceptAction(self.execute),
)
def _close(self):
"""
Close the socket.
"""
# Is it closed already?
if self.sock is None:
return
# Display a message to alert the user to the closed connection
self.display('Connection closed')
# Close it
self.cli.eventloop.remove_reader(self.sock)
self.sock.close()
self.sock = None
def _recv(self):
"""
Receive a message from the socket.
"""
# Read a message
msg = message.Message.recv(self.sock)
if msg is None:
# Close the connection
self._close()
else:
# Display the message
self.display('S: %r' % msg)
# If there's a reaction, invoke it
if msg.reaction:
msg.reaction(self)
# Make sure we redraw to display the message
self.cli.invalidate()
def display(self, text):
"""
Display text in the display pane.
:param str text: The text to display.
"""
# Display the text
self.display_buf.insert_text('%s\n' % text)
def execute(self, cli, doc):
"""
Execute a command. This is called whenever the "enter" key is
pressed in the command buffer.
:param cli: The command line interface.
:type cli: ``prompt_toolkit.interface.CommandLineInterface``
:param doc: The document contained in the command buffer.
:type doc: ``prompt_toolkit.document.Document``
"""
# Do nothing if the command is empty
if not doc.text:
return
# Split the command
cmd = shlex.split(doc.text)
# Reset the input buffer
self.command_buf.reset(append_to_history=True)
# What do we do?
if cmd[0] not in self._commands:
self.display('ERROR: Unknown command "%s"' % cmd[0])
else:
self._commands[cmd[0]](self, cmd[1:])
@command(aliases=['quit'])
def exit(self, args):
"""
Exits the interpreter. Arguments are ignored.
:param list args: The list of arguments to the command.
"""
# Close the connection
self._close()
# Signal the interface to exit
self.cli.set_return_value(None)
@command
def close(self, args):
"""
Close the connection. Arguments are ignored.
:param list args: The list of arguments to the command.
"""
# Close the connection
self._close()
@command
def connect(self, args):
"""
Connect to the specified Humboldt endpoint.
:param list args: The list of arguments to the command.
"""
# Make sure we have an address
if len(args) != 1:
self.display(
'ERROR: too %s arguments for connect' %
('many' if len(args) > 1 else 'few')
)
return
# Make the new connection
try:
new_sock = connect(args[0])
except Exception as err:
# Hmmm, couldn't connect?
self.display(
'ERROR: Unable to connect to %s: %s' % (args[0], err)
)
return
# Set up the new socket
self.setsock(new_sock)
self.display('Connected to %s' % args[0])
@command
def send(self, args):
"""
Construct a message and send it to the connected Humboldt
instance.
:param list args: The list of arguments to the command.
"""
# Interpret the message description
try:
msg = message.Message.interpret(args)
except message.CommandError as err:
self.display(
'ERROR: Failed to understand message to send: %s' % err
)
return
# Send the message
if self.sock is None:
self.display('ERROR: Connection is closed')
return
# Send the message
self.send_msg(msg)
def send_msg(self, msg):
"""
Send a message to the connected Humboldt instance.
:param msg: The message to send.
:type msg: ``humboldt.message.Message``
"""
msg.send(self.sock)
# Display what we sent
self.display('C: %r' % msg)
# If there's an action, invoke it
if msg.action:
msg.action(self)
def setsock(self, newsock):
"""
Set up a new socket to monitor.
:param newsock: The new socket to monitor.
:type newsock: ``socket.socket``
"""
# Make sure the current socket is closed
if self.sock:
self._close()
# Save the new socket and set it up for monitoring
self.sock = newsock
self.cli.eventloop.add_reader(self.sock, self._recv)
def wrap(self, wrapper, *args, **kwargs):
"""
Wrap the socket.
:param wrapper: A callable of at least one argument. This
callable must take a socket object as its
first parameter, and must return a new
socket-compatible object that will be used for
subsequent message sending and receiving.
:param *args: Additional positional arguments for the wrapper.
These will be passed after the socket object.
:param **kwargs: Additional keyword arguments for the wrapper.
:raises socket.error:
If the socket has been closed, raises a ``socket.error``
with the | |
<reponame>keiserlab/LUNA<filename>luna/interaction/calc.py
from openbabel import openbabel as ob
from operator import le, ge
from itertools import combinations, product
from collections import defaultdict
import json
from luna.interaction.config import DefaultInteractionConfig, InteractionConfig
from luna.interaction.filter import InteractionFilter
from luna.interaction.type import InteractionType
from luna.mol.features import ChemicalFeature
from luna.wrappers.base import BondType
from luna.analysis.summary import count_interaction_types
import luna.util.math as im
from luna.util.default_values import BOUNDARY_CONFIG
from luna.util.exceptions import IllegalArgumentError
from luna.mol.groups import AtomGroupNeighborhood
from luna.util.file import pickle_data, unpickle_data
from luna.version import __version__
import logging
logger = logging.getLogger()
CATIONS = ("PositivelyIonizable", "PosIonizable", "Positive")
ANIONS = ("NegativelyIonizable", "NegIonizable", "Negative")
COV_BONDS_MAPPING = {
BondType.SINGLE: "Single bond",
BondType.DOUBLE: "Double bond",
BondType.TRIPLE: "Triple bond",
BondType.AROMATIC: "Aromatic bond"
}
WATER_NAMES = ['HOH', 'DOD', 'WAT', 'H2O', 'OH2']
class InteractionsManager:
"""Store and manage :class:`~luna.interaction.type.InteractionType` objects.
Parameters
----------
interactions : iterable of :class:`~luna.interaction.type.InteractionType`, optional
An initial sequence of :class:`~luna.interaction.type.InteractionType` objects.
entry : :class:`~luna.mol.entry.Entry`, optional
The chain or compound used as reference to calculate interactions.
"""
def __init__(self, interactions=None, entry=None):
if interactions is None:
interactions = []
self.entry = entry
self._interactions = list(interactions)
self.version = __version__
@property
def interactions(self):
""" list of :class:`~luna.interaction.type.InteractionType`, read-only: The list of interactions.\
Additional interactions should be added using the method :py:meth:`add_interactions`."""
return self._interactions
@property
def size(self):
"""int, read-only: The number of interactions."""
return len(self._interactions)
def get_all_atm_grps(self):
"""Get all atom groups establishing interactions.
Returns
-------
: set of :class:`~luna.mol.groups.AtomGroup`
"""
atm_grps = set()
for inter in self.interactions:
atm_grps.add(inter.src_grp)
atm_grps.add(inter.trgt_grp)
return atm_grps
def count_interations(self, must_have_target=False):
"""Count the number of each type of interaction in ``interactions``.
Parameters
----------
must_have_target : bool
If True, count only interactions involving the target ligand.
The default value is False, which implies all interactions will be considered.
Returns
-------
: dict
"""
return count_interaction_types(self.interactions, must_have_target=must_have_target)
def add_interactions(self, interactions):
"""Add one or more :class:`~luna.interaction.type.InteractionType` objects to
``interactions``."""
self._interactions = list(set(self.interactions + list(interactions)))
def remove_interactions(self, interactions):
"""Remove one or more :class:`~luna.interaction.type.InteractionType` objects from \
``interactions``.
Any recursive references to the removed objects will also be cleared.
"""
self._interactions = list(set(self.interactions) - set(interactions))
for inter in interactions:
inter.clear_refs()
def filter_by_types(self, types):
"""Filter :class:`~luna.interaction.type.InteractionType` objects by their types.
Parameters
----------
types : iterable of str
A sequence of interaction types.
Yields
------
:class:`~luna.interaction.type.InteractionType`
"""
for inter in self.interactions:
if inter.type in types:
yield inter
def filter_out_by_binding_mode(self, binding_modes_filter):
"""Filter out interactions based on binding modes.
**Note:** this method modifies ``interactions``.
Parameters
----------
binding_modes_filter : :class:`~luna.interaction.filter.BindingModeFilter`
A :class:`~luna.interaction.filter.BindingModeFilter` object that defines binding mode conditions
to decide which interactions are valid.
Returns
-------
: set of :class:`~luna.interaction.type.InteractionType`
The interactions that were filtered out.
"""
inters_to_remove = set()
for inter in self.interactions:
if not binding_modes_filter.is_valid(inter):
inters_to_remove.add(inter)
self.remove_interactions(inters_to_remove)
return inters_to_remove
def to_csv(self, output_file):
"""Write interactions to a comma-separated values (csv) file.
Parameters
----------
output_file : str
The output CSV file.
"""
interactions_set = set()
for inter in self.interactions:
grp1 = ";".join(sorted(["/".join(a.full_atom_name.split("/")) for a in inter.src_grp.atoms]))
grp2 = ";".join(sorted(["/".join(a.full_atom_name.split("/")) for a in inter.trgt_grp.atoms]))
grp1, grp2 = sorted([grp1, grp2])
interactions_set.add((grp1, grp2, inter.type))
with open(output_file, "w") as OUT:
OUT.write("atom_group1,atom_group2,interaction\n")
# Sort lines before writing to always keep the same order.
OUT.write("\n".join([",".join(k) for k in sorted(interactions_set)]))
def to_json(self, output_file=None, indent=None):
"""Write interactions to a_initial_shell_data JSON file.
Parameters
----------
output_file : str
The output JSON file.
indent : int or str, optional
Indent level for pretty-printed JSON files.
An indent level of 0, negative, or '' only insert newlines.
Positive integers indent that many spaces per level.
If a string is provided (e.g., '\\\\t'), it will be used to indent each level.
The default value is None, which selects the most compact representation.
"""
with open(output_file, 'w') as OUT:
inter_objs = [inter.as_json() for inter in self.interactions]
json.dump(inter_objs, OUT, indent=indent)
def save(self, output_file, compressed=True):
"""Write the pickled representation of the `InteractionsManager` object to the file ``output_file``.
Parameters
----------
output_file : str
The output file.
compressed : bool, optional
If True (the default), compress the pickled representation as a gzip file (.gz).
Raises
-------
FileNotCreated
If the file could not be created.
"""
pickle_data(self, output_file, compressed)
@staticmethod
def load(input_file):
"""Load the pickled representation of an `InteractionsManager` object saved at the file ``input_file``.
Returns
----------
: `InteractionsManager`
The reconstituted `InteractionsManager` object.
Raises
-------
PKLNotReadError
If the file could not be loaded.
"""
return unpickle_data(input_file)
def __len__(self):
# Number of interactions
return self.size
def __iter__(self):
"""Iterate over children."""
for inter in self.interactions:
yield inter
class InteractionCalculator:
"""Calculate interactions.
.. note::
This class provides default LUNA methods to calculate interactions.
However, one can provide their own methods without modifying this class.
In the **Examples** section, we will show how to define custom functions.
.. note::
In case you want to disable specific parameters (e.g., angles) used during
the calculation of interactions, you do not need to define a custom
function for it. You could just delete the parameter from the configuration
and LUNA will automatically recognize that a given parameter is not
necessary anymore.
Check **Examples 3** to see how to do it and how to implement this automatic
behavior on your custom functions.
Parameters
----------
inter_config : :class:`~luna.interaction.config.InteractionConfig`
An :class:`~luna.interaction.config.InteractionConfig` object with all parameters and cutoffs necessary
to compute interactions defined in ``inter_funcs``.
If not provided, the default LUNA configuration will be used instead \
(:class:`~luna.interaction.config.DefaultInteractionConfig`).
inter_filter : :class:`~luna.interaction.filter.InteractionFilter`, optional
An :class:`~luna.interaction.filter.InteractionFilter` object to filter out interactions on-the-fly.
The default value is None, which implies no interaction will be filtered out.
inter_funcs : dict of {tuple : iterable of callable}
A dict to define custom functions to calculate interactions,
where keys are tuples of feature names (e.g. ``("Hydrophobic", "Hydrophobic")``) and
values are lists of references to custom functions (see Examples for more details).
If not provided, the default LUNA methods will be used instead.
add_non_cov : bool
If True (the default), compute non-covalent interactions.
If you are providing custom functions to compute non-covalent interactions and
want to make them controllable by this flag, make sure to verify the state of
``add_non_cov`` at the beginning of the function and return an empty list in case it is False.
add_cov : bool
If True (the default), compute covalent interactions.
If you are providing custom functions to compute covalent interactions and
want to make them controllable by this flag, make sure to verify the state of
``add_cov`` at the beginning of the function and return an empty list in case it is False.
add_proximal : bool
If True, compute proximal interactions, which are only distance-based contacts between atoms
or atom groups that, therefore, only imply proximity. The default value is False.
If you are providing custom functions to compute proximal interactions and
want to make them controllable by this flag, make sure to verify the state of
``add_proximal`` at the beginning of the function and return an empty list in case it is False.
add_atom_atom : bool
If True (the default), compute atom-atom interactions,
which, as the name suggests, are interactions that only involve atoms no matter their features.
If you are providing custom functions to compute atom-atom interactions and want to make them
controllable by this flag, make sure to verify the state of ``add_atom_atom`` at the beginning
of the function and return an empty list in case it is False.
.. note::
In LUNA, we consider the following interactions as atom-atom: `Van der Waals`,
`Van der Waals clash`, and `Atom overlap`. We opted to separate `Van der Waals` from
other non-covalent interactions because LUNA may generate an unnecessary number of additional
interactions that are usually already represented by other non-covalent interactions as
weak hydrogen bonds, hydrophobic, or dipole-dipole interactions.
Thus, to give users a fine-grain control over which interactions to calculate,
we provided this additional flag to turn off the calculation of Van der Waals interactions.
add_dependent_inter : bool
If True, compute interactions that depend on other interactions.
Currently, only water-bridged hydrogen bonds and salt bridges have a dependency on
other interactions. The first, depends on two or more hydrogen bonds, while the second depends on
an ionic and a hydrogen bond. The default value is False, which implies no dependent interaction
will be computed.
add_h2o_pairs_with_no_target | |
<gh_stars>0
"""
Code source https://github.com/amdegroot/ssd.pytorch
"""
import os.path as osp
import sys
import torch
import torch.utils.data as data
from pycocotools.coco import COCO
import cv2
import numpy as np
COCO_ROOT = osp.join("dataset")
IMAGES = 'images'
ANNOTATIONS = 'annotations'
COCO_API = 'PythonAPI'
INSTANCES_SET = 'instances_{}.json'
COCO_CLASSES_2017 = \
('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat',
'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle',
'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def get_label_map(label_file):
label_map = {}
labels = open(label_file, 'r')
for line in labels:
ids = line.split(',')
label_map[int(ids[0])] = int(ids[1])
return label_map
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on
0 dim
"""
batch = zip(*batch) # Group by type
imgs, boxes, segmentations = batch
imgs = [torch.from_numpy(np.stack(x)) for x in zip(*imgs)]
segmentations = [torch.from_numpy(np.stack(x)) for x in zip(*segmentations)]
boxes = [torch.from_numpy(x) for x in boxes]
return imgs, boxes, segmentations
class COCOAnnotationTransform(object):
"""Transforms a COCO annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
"""
def __init__(self):
self.label_map = get_label_map(osp.join(COCO_ROOT, 'coco_labels.txt'))
def __call__(self, target, width, height):
"""
Args:
target (dict): COCO target json annotation as a python dict
height (int): height
width (int): width
Returns:
a list containing lists of bounding boxes [bbox coords, class idx]
"""
scale = np.array([width, height, width, height])
res = []
for obj in target:
if 'bbox' in obj:
bbox = obj['bbox']
bbox = np.array(bbox).astype(np.float32)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
label_idx = self.label_map[obj['category_id']] - 1
final_box = list(bbox/scale)
if (np.array(final_box) > 1.).any():
print("WTFFFFFFF")
final_box.append(label_idx)
res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]
else:
print("no bbox problem!")
return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]
class COCODetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
"""
def __init__(self, root, image_set='val2017', transform=None,
target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):
sys.path.append(osp.join(root, COCO_API))
self.root = osp.join(root, IMAGES, image_set)
self.coco = COCO(osp.join(root, ANNOTATIONS,
INSTANCES_SET.format(image_set)))
self.ids = list(self.coco.imgToAnns.keys())
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target).
target is the object returned by ``coco.loadAnns``.
"""
im, gt, sg, h, w = self.pull_item(index)
return im + sg
def __len__(self):
return len(self.ids)
def pull_item(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target, height, width).
target is the object returned by ``coco.loadAnns``.
"""
img_id = self.ids[index]
target = self.coco.imgToAnns[img_id]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
target = self.coco.loadAnns(ann_ids)
path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])
assert osp.exists(path), 'Image path does not exist: {}'.format(path)
img = cv2.imread(osp.join(self.root, path))
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target, dtype=np.float32)
imgs, boxes, labels, segmentations = self.transform(img, target[:, :4], target[:, 4])
# to rgb
imgs = [img[:, :, (2, 1, 0)] for img in imgs]
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return [torch.from_numpy(img).permute(2, 0, 1) for img in imgs], target, segmentations, \
height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
cv2 img
'''
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
return self.coco.loadAnns(ann_ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
if __name__ == "__main__":
from utils.ssd_augmentation import SSDAugmentation
dataset_root = "/raid/workspace/alexandrug/coco/coco"
image_set = "val2017"
data_mean = [104, 117, 123]
dim = [256, 128]
dataset = COCODetection(root=dataset_root, image_set=image_set,
transform=SSDAugmentation(dim, data_mean))
train_loader = data.DataLoader(dataset, 1,
num_workers=40,
shuffle=False, # collate_fn=detection_collate,
pin_memory=True)
for batch_idx, data in enumerate(train_loader):
seg = data[-1][0,0].numpy().copy()
print(data[-1])
torch.save({"target": data[-1]}, "test")
target = data[-1].float()
target.div_(255)
b, c, _, _ = target.size()
target.div_(target.sum(dim=[2, 3]).unsqueeze(2).unsqueeze(3))
target = target.view(b, c, -1)
target = target.detach()
print(target)
# seg_soft = (segt.view(b, c, -1).float() / ).view(b,c, w, h)
# t = x / x.sum(dim=[2, 3]).unsqueeze(2).unsqueeze(3)
# cv2.imshow("seg", seg)
#
# seg_softv = seg_soft[0, 0].numpy()
# print(seg_softv.max())
# print(seg_softv)
# cv2.imshow("seg_softv", seg_softv)
# cv2.waitKey(0)
#sg_scale0 = sg[0]
#for bidx in range(gt.shape[0]):
# class_idx = int((gt[bidx])[-1])
#print(COCO_CLASSES_2017[class_idx], gt[bidx])
# img = remake(im[0], data_mean, box=((gt[bidx][:4]) * dim[0]).astype(np.int))
#cv2.imshow("IMG", img)
# img_small = cv2.resize(img, (16, 16))
# cv2.imshow("IMG_small", img_small)
# img_small = cv2.resize(img_small, (256, 256))
# cv2.imshow("IMG_resized", img_small)
#cv2.imshow("Class Segm", sg_scale0[class_idx])
#cv2.waitKey(0)
print("-" * 100)
print("Batch: " + str(batch_idx))
# #
# segmentation = torch.zeros(80, *im[0].size()[1:])
#
#
# print(im[0].size())
# print(im[1].size())
# print(gt)
# print(w, h)
#
# import cv2
#
# x = np.zeros((256, 256), dtype=np.uint8)
# x[100:150, 100:200] = 255
#
# blur = cv2.GaussianBlur(x,(129*2+1, 129), 0)
# cv2.normalize(blur, blur, 0, 255, cv2.NORM_MINMAX)
# cv2.imshow("test1", blur)
# cv2.waitKey(0)
# #
# for i in range(5):
# blur = cv2.GaussianBlur(blur,(129*2+1, 129), 0)
# cv2.normalize(blur, blur, 0, 255, cv2.NORM_MINMAX)
# cv2.imshow("test2", blur)
# cv2.waitKey(0)
#
# blur = cv2.medianBlur(blur,(129,129),0)
# # blur = cv2.GaussianBlur(blur,(51,51),0)
# # blur = cv2.GaussianBlur(blur,(51,51),0)
#
# cv2.imshow("test2", blur)
# cv2.waitKey(0)
#
#
# import numpy as np
#
#
# def softmax(x, temp=1.):
# """Compute softmax values for each sets of scores in x."""
# return np.exp(x/temp) / np.sum(np.exp(x/temp))
#
#
# def makeGaussian(size, fwhm = 3, center=None, x=None, y=None):
# """ Make a square gaussian kernel.
#
# size is the length of a side of the square
# fwhm is full-width-half-maximum, which
# can be thought of as an effective radius.
# """
#
# if x is None:
# x = np.arange(0, size, 1, float)
# y = x[:, np.newaxis]
#
# if center is None:
# x0 = y0 = size // 2
# else:
# x0 = center[0]
# y0 = center[1]
#
# return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
# # return 7**(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
#
#
#
# size=256
#
# old_s = np.zeros((size, size))
# for i in range(100):
# yt = np.random.randint(0, size-1)
# yb = np.random.randint(yt+1, size)
# xl = np.random.randint(0, size-1)
# xr = np.random.randint(xl+1, size)
# yt = 50+50
# yb = 150+50
# xl = 50+50
# xr = 155+50
#
# img = np.zeros((size, size), dtype=np.uint8)
# height = yb - yt
# width = xr - xl
#
# arx = ary = 1.
# if width < height:
# ary = (width / float(height) + 1.) / 2.
# else:
# arx = (float(height) / width + 1.) / 2.
#
# # ar = | |
import numpy as np
import pymc3 as pm
import theano.tensor as T
import theano.tensor.signal.conv as C
from epimodel import EpidemiologicalParameters
from epimodel.pymc3_distributions.asymmetric_laplace import AsymmetricLaplace
class RandomWalkMobilityModel(pm.Model):
def __init__(self, data, cm_plot_style=None, name="", model=None):
"""
Constructor.
:param data: PreprocessedData object
:param cm_plot_style: NPI data
:param name: model name
:param model: required for PyMC3, but never used.
"""
super().__init__(name, model)
self.d = data
self.trace = None
# self.CMDelayCut = 30
# compute days to actually observe, looking at the data which is masked, and which isn't.
# indices of active country-days in the 1D Rs*Ds vector
observed_active = []
for r in range(self.nRs):
for d in range(self.nDs):
# if its not masked, after the cut, and not before 100 confirmed
if (
self.d.NewCases.mask[r, d] == False
# and d > self.CMDelayCut
and not np.isnan(self.d.Confirmed.data[r, d])
):
observed_active.append(r * self.nDs + d)
else:
self.d.NewCases.mask[r, d] = True
print(len(observed_active))
self.all_observed_active = np.array(observed_active)
@property
def nRs(self):
"""
:return: number of regions / countries
"""
return len(self.d.Rs)
@property
def nDs(self):
"""
:return: number of days
"""
return len(self.d.Ds)
@property
def nCMs(self):
"""
:return: number of countermeasures
"""
return len(self.d.CMs)
def build_model(
self,
r_walk_period=7,
r_walk_noise_scale_prior=0.15,
intervention_prior="AL",
cm_prior_scale=10,
wearing_parameterisation="exp",
wearing_mean=0,
wearing_mean_linear=0,
wearing_mean_quadratic=0,
wearing_sigma=0.4,
wearing_sigma_linear=0.26,
wearing_sigma_quadratic=0.13,
mobility_mean=1.704,
mobility_sigma=0.44,
R_prior_mean_mean=1.07,
R_prior_mean_scale=0.2,
R_noise_scale=0.4,
cm_prior="skewed",
gi_mean_mean=5.06,
gi_mean_sd=0.33,
gi_sd_mean=2.11,
gi_sd_sd=0.5,
cases_delay_mean_mean=10.92,
cases_delay_disp_mean=5.41,
cases_truncation=32,
log_init_mean=9.9,
log_init_sd=9.9,
IGNORE_START=10,
IGNORE_END=10,
mobility_leaveout=False,
mob_and_wearing_only=False,
**kwargs,
):
"""
Build PyMC3 model.
:param R_prior_mean: R_0 prior mean
:param cm_prior_scale: NPI effectiveness prior scale
:param cm_prior: NPI effectiveness prior type. Either 'normal', 'icl' or skewed (asymmetric laplace)
:param gi_mean_mean: mean of normal prior placed over the generation interval mean
:param gi_mean_sd: sd of normal prior placed over the generation interval mean
:param gi_sd_mean: mean of normal prior placed over the generation interval sd
:param gi_sd_sd: sd of normal prior placed over the generation interval sd
:param growth_noise_scale: growth noise scale
:param cases_delay_mean_mean: mean of normal prior placed over cases delay mean
:param cases_delay_mean_sd: sd of normal prior placed over cases delay mean
:param cases_delay_disp_mean: mean of normal prior placed over cases delay dispersion
:param cases_delay_disp_sd: sd of normal prior placed over cases delay dispersion
:param deaths_truncation: maximum death delay
:param cases_truncation: maximum reporting delay
"""
for key, _ in kwargs.items():
print(f"Argument: {key} not being used")
# Ensure mobility feature is in the right place
mob_feature = "avg_mobility_no_parks_no_residential"
assert self.d.CMs[-2] == mob_feature
with self.model:
# build NPI Effectiveness priors
print(wearing_parameterisation)
if wearing_parameterisation is None:
if intervention_prior == "AL":
self.CM_Alpha = AsymmetricLaplace(
"CM_Alpha",
scale=cm_prior_scale,
symmetry=0.5,
shape=(self.nCMs - 1,),
)
else:
self.CM_Alpha = pm.Normal(
"CM_Alpha", mu=0, sigma=cm_prior_scale, shape=(self.nCMs - 1,)
)
else:
assert self.d.CMs[-1] == "percent_mc"
if intervention_prior == "AL":
self.CM_Alpha = AsymmetricLaplace(
"CM_Alpha",
scale=cm_prior_scale,
symmetry=0.5,
shape=(self.nCMs - 2,),
)
else:
self.CM_Alpha = pm.Normal(
"CM_Alpha", mu=0, sigma=cm_prior_scale, shape=(self.nCMs - 2,)
)
self.CMReduction = pm.Deterministic(
"CMReduction", T.exp((-1.0) * self.CM_Alpha)
)
# prior specification for wearing options:
if wearing_parameterisation == "exp":
self.Wearing_Alpha = pm.Normal(
"Wearing_Alpha", mu=wearing_mean, sigma=wearing_sigma, shape=(1,)
)
self.WearingReduction = pm.Deterministic(
"WearingReduction", T.exp((-1.0) * self.Wearing_Alpha)
)
if wearing_parameterisation == "log_linear":
self.Wearing_Alpha = pm.Normal(
"Wearing_Alpha", mu=wearing_mean_linear, sigma=wearing_sigma_linear, shape=(1,)
)
self.WearingReduction = pm.Deterministic(
"WearingReduction", 1.0 - self.Wearing_Alpha
)
if wearing_parameterisation == "log_quadratic":
self.Wearing_Alpha = pm.Normal(
"Wearing_Alpha", mu=wearing_mean_quadratic, sigma=wearing_sigma_quadratic, shape=(1,)
)
self.WearingReduction = pm.Deterministic(
"WearingReduction", 1.0 - 2.0 * self.Wearing_Alpha
)
if wearing_parameterisation == "log_quadratic_2":
self.Wearing_Alpha = pm.Normal(
"Wearing_Alpha", mu=wearing_mean_quadratic, sigma=wearing_sigma_quadratic, shape=(2,)
)
self.WearingReduction = pm.Deterministic(
"WearingReduction",
1.0 - self.Wearing_Alpha[0] - self.Wearing_Alpha[1],
)
self.Mobility_Alpha = pm.Normal(
"Mobility_Alpha", mu=mobility_mean, sigma=mobility_sigma, shape=(1,)
)
self.MobilityReduction = pm.Deterministic(
"MobilityReduction",
(2.0 * (T.exp(-1.0 * self.Mobility_Alpha)))
/ (1.0 + T.exp(-1.0 * self.Mobility_Alpha)),
)
self.HyperRMean = pm.TruncatedNormal(
"HyperRMean", mu=R_prior_mean_mean, sigma=R_prior_mean_scale, lower=0.1
)
self.HyperRVar = pm.HalfNormal("HyperRVar", sigma=R_noise_scale)
self.RegionR_noise = pm.Normal("RegionR_noise", 0, 1, shape=(self.nRs,))
self.RegionR = pm.Deterministic(
"RegionR", self.HyperRMean + self.RegionR_noise * self.HyperRVar
)
# load CMs active without wearing, compute log-R reduction and region log-R based on NPIs active
if wearing_parameterisation is not None:
self.ActiveCMs = pm.Data("ActiveCMs", self.d.ActiveCMs[:, :-2, :])
self.ActiveCMReduction = (
T.reshape(self.CM_Alpha, (1, self.nCMs - 2, 1)) * self.ActiveCMs
)
self.ActiveCMs_wearing = pm.Data(
"ActiveCMs_wearing", self.d.ActiveCMs[:, -1, :]
)
else:
self.ActiveCMs = pm.Data("ActiveCMs", self.d.ActiveCMs[:, :-1, :])
self.ActiveCMReduction = (
T.reshape(self.CM_Alpha, (1, self.nCMs - 1, 1)) * self.ActiveCMs
)
growth_reduction = T.sum(self.ActiveCMReduction, axis=1)
if mob_and_wearing_only:
growth_reduction = 0
else:
pm.Deterministic("growth_reduction", growth_reduction)
# calculating reductions for each of the wearing parameterisations
if wearing_parameterisation == "exp":
self.ActiveCMReduction_wearing = T.reshape(
self.Wearing_Alpha, (1, 1, 1)
) * T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
growth_reduction_wearing = T.sum(self.ActiveCMReduction_wearing, axis=1)
if wearing_parameterisation == "log_linear":
self.ActiveCMReduction_wearing = T.reshape(
self.Wearing_Alpha, (1, 1, 1)
) * T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
eps = 10 ** (-20)
growth_reduction_wearing = -1.0 * T.log(
T.nnet.relu(1.0 - T.sum(self.ActiveCMReduction_wearing, axis=1))
+ eps
)
if wearing_parameterisation == "log_quadratic":
self.ActiveCMReduction_wearing = (
T.reshape(self.Wearing_Alpha, (1, 1, 1))
* T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
+ T.reshape(self.Wearing_Alpha, (1, 1, 1))
* T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
** 2
)
eps = 10 ** (-20)
growth_reduction_wearing = -1.0 * T.log(
T.nnet.relu(1.0 - T.sum(self.ActiveCMReduction_wearing, axis=1))
+ eps
)
# TODO: take out these reshapes. Can just add an axis manually.
if wearing_parameterisation == "log_quadratic_2":
self.ActiveCMReduction_wearing = (
T.reshape(self.Wearing_Alpha[0], (1, 1, 1))
* T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
+ T.reshape(self.Wearing_Alpha[1], (1, 1, 1))
* T.reshape(
self.ActiveCMs_wearing,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
** 2
)
eps = 10 ** (-20)
growth_reduction_wearing = -1.0 * T.log(
T.nnet.relu(1.0 - T.sum(self.ActiveCMReduction_wearing, axis=1))
+ eps
)
if wearing_parameterisation is None:
growth_reduction_wearing = 0
else:
pm.Deterministic("growth_reduction_wearing", growth_reduction_wearing)
# make reduction for mobility
self.ActiveCMs_mobility = pm.Data(
"ActiveCMs_mobility", self.d.ActiveCMs[:, -2, :]
)
self.ActiveCMReduction_mobility = T.reshape(
self.Mobility_Alpha, (1, 1, 1)
) * T.reshape(
self.ActiveCMs_mobility,
(self.d.ActiveCMs.shape[0], 1, self.d.ActiveCMs.shape[2]),
)
growth_reduction_mobility = -1.0 * T.log(
T.sum(
(2.0 * T.exp(-1.0 * self.ActiveCMReduction_mobility))
/ (1.0 + T.exp(-1.0 * self.ActiveCMReduction_mobility)),
axis=1,
)
)
if mobility_leaveout:
growth_reduction_mobility = 0
initial_mobility_reduction = 0
else:
initial_mobility_reduction = growth_reduction_mobility[:, 0]
initial_mobility_reduction = T.reshape(initial_mobility_reduction, (self.nRs, 1))
pm.Deterministic("initial_mobility_reduction", initial_mobility_reduction)
pm.Deterministic("growth_reduction_mobility", growth_reduction_mobility)
# random walk
nNP = int(self.nDs / r_walk_period) - 1
r_walk_noise_scale = pm.HalfNormal(
"r_walk_noise_scale", r_walk_noise_scale_prior
)
# rescaling variables by 10 for better NUTS adaptation
r_walk_noise = pm.Normal("r_walk_noise", 0, 1.0 / 10, shape=(self.nRs, nNP))
expanded_r_walk_noise = T.repeat(
r_walk_noise_scale * 10.0 * T.cumsum(r_walk_noise, axis=-1),
r_walk_period,
axis=-1,
)[: self.nRs, : (self.nDs - 2 * r_walk_period)]
full_log_Rt_noise = T.zeros((self.nRs, self.nDs))
full_log_Rt_noise = T.subtensor.set_subtensor(
full_log_Rt_noise[:, 2 * r_walk_period :], expanded_r_walk_noise
)
self.ExpectedLogR = pm.Deterministic(
"ExpectedLogR",
T.reshape(pm.math.log(self.RegionR), (self.nRs, 1))
- growth_reduction
- growth_reduction_wearing
- (growth_reduction_mobility - initial_mobility_reduction)
+ full_log_Rt_noise,
)
self.Rt_walk = pm.Deterministic(
"Rt_walk",
T.exp(T.log(self.RegionR.reshape((self.nRs, 1))) + full_log_Rt_noise),
)
self.Rt_cm = pm.Deterministic(
"Rt_cm",
T.exp(
T.log(self.RegionR.reshape((self.nRs, 1)))
- growth_reduction
- growth_reduction_wearing
),
)
# convert R into growth rates
self.GI_mean = pm.Normal("GI_mean", gi_mean_mean, gi_mean_sd)
self.GI_sd = pm.Normal("GI_sd", gi_sd_mean, gi_sd_sd)
gi_beta = self.GI_mean / self.GI_sd ** 2
gi_alpha = self.GI_mean ** 2 / self.GI_sd ** 2
self.ExpectedGrowth = pm.Deterministic(
"ExpectedGrowth",
gi_beta
* (
np.exp(self.ExpectedLogR / gi_alpha)
- T.ones_like(self.ExpectedLogR)
),
)
self.Growth = self.ExpectedGrowth
# Originally N(0, 50)
self.InitialSize_log = pm.Normal(
"InitialSize_log", log_init_mean, log_init_sd, shape=(self.nRs,)
)
self.Infected_log = pm.Deterministic(
"Infected_log",
T.reshape(self.InitialSize_log, (self.nRs, 1))
+ self.Growth.cumsum(axis=1),
)
self.Infected = pm.Deterministic("Infected", pm.math.exp(self.Infected_log))
cases_delay_dist = pm.NegativeBinomial.dist(
mu=cases_delay_mean_mean, alpha=cases_delay_disp_mean
)
bins = np.arange(0, cases_truncation)
pmf = T.exp(cases_delay_dist.logp(bins))
pmf = pmf / T.sum(pmf)
reporting_delay = pmf.reshape((1, cases_truncation))
expected_confirmed = C.conv2d(
self.Infected, reporting_delay, border_mode="full"
)[:, : self.nDs]
self.ExpectedCases = pm.Deterministic(
"ExpectedCases", expected_confirmed.reshape((self.nRs, self.nDs))
)
# Observation Noise Dispersion Parameter (negbin alpha)
self.Psi = pm.HalfNormal("Psi", 5)
# effectively handle missing values ourselves
# likelihood
self.ObservedCases = pm.NegativeBinomial(
"ObservedCases",
mu=self.ExpectedCases.reshape((self.nRs * self.nDs,))[
self.all_observed_active
],
alpha=self.Psi,
shape=(len(self.all_observed_active),),
observed=self.d.NewCases.data.reshape((self.nRs * self.nDs,))[
self.all_observed_active
],
)
class MandateMobilityModel(pm.Model):
def __init__(self, data, cm_plot_style=None, name="", model=None):
"""
Constructor.
:param data: PreprocessedData object
:param cm_plot_style: NPI data
:param name: model name
:param model: required for PyMC3, but never used.
"""
super().__init__(name, model)
self.d = data
self.trace = None
# self.CMDelayCut = 30
# compute days to actually observe, looking at the data which is masked, and which isn't.
# indices of active country-days in the 1D Rs*Ds vector
observed_active = []
for r in range(self.nRs):
for d in range(self.nDs):
# if its not masked, after the cut, and not before 100 confirmed
if (
self.d.NewCases.mask[r, | |
= User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
postfix = ' 1'
project_1 = create_project(**{
'project_name': 'Project name' + postfix,
'description': 'Project description' + postfix,
'budget': 100000,
'deadline': deadline,
'closed': True,
})
response = self.client.get(reverse('projects:detail', args=(project_1.id,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, _("No tasks are available."))
self.assertQuerysetEqual(response.context['task_list'], [])
self.assertEqual(response.context['project']['percentage_completed'], 0.0)
def test_projects_percentage_completed_with_three_tasks(self):
"""Test correct representation of percentage completed for the
project with three different tasks.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
start_date = timezone.now()
complete_date = timezone.now() + datetime.timedelta(days=8)
postfix = '_1'
firstname = 'Marshall' + postfix
middlename = 'Bruce' + postfix
surname = 'Mathers' + postfix
email = f'<EMAIL>'
birthdate = timezone.now() + datetime.timedelta(days=-365 * 30)
employee = create_employee(**{
'firstname': firstname,
'middlename': middlename,
'surname': surname,
'email': email,
'birthdate': birthdate,
})
postfix = ' 1'
project_1 = create_project(**{
'project_name': 'Project name' + postfix,
'description': 'Project description' + postfix,
'budget': 100000,
'deadline': deadline,
'closed': True,
})
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': NEW,
})
postfix = ' 2'
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': COMPLETED,
})
postfix = ' 3'
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': COMPLETED,
})
postfix = ' 4'
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': COMPLETED,
})
response = self.client.get(reverse('projects:detail', args=(project_1.id,)))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['task_list'].order_by('id'),
["<Task: Task name 1>", "<Task: Task name 2>",
"<Task: Task name 3>", "<Task: Task name 4>"]
)
self.assertEqual(response.context['project']['percentage_completed'], 75.0)
class ProjectCreateViewTest(TestCase):
"""Tests for create view of Project model.s
"""
def test_create_project(self):
"""Test creating of project.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['add_project', 'view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
response = self.client.post(
reverse('projects:create'),
{
'project_name': 'Project name',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': False,
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/projects/')
qs_target = [
{
'project_name': 'Project name',
'deadline': deadline,
'budget': 100000.0,
'closed': False,
'tasks_count': 0,
'percentage_completed': 0.0
}
]
response = self.client.get(response.url)
fields = list(response.context['project_list']._fields)
fields.remove('id')
self.assertQuerysetEqual(
response.context['project_list'].values(*fields).order_by('id'),
qs_target,
transform=lambda x: x
)
class ProjectEditViewTest(TestCase):
"""Test Project edit view.
"""
def test_edit_project_without_next(self):
"""Test editing project without next-hook.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['change_project', 'view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
project_1 = create_project(**{
'project_name': 'Project name',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
response = self.client.post(
reverse('projects:edit', args=(project_1.id,)),
{
'project_name': 'Project name 1',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': False,
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/projects/')
qs_target = [
{
'project_name': 'Project name 1',
'deadline': deadline,
'budget': 100000.0,
'closed': False,
'tasks_count': 0,
'percentage_completed': 0.0
}
]
response = self.client.get(response.url)
fields = list(response.context['project_list']._fields)
fields.remove('id')
self.assertQuerysetEqual(
response.context['project_list'].values(*fields).order_by('id'),
qs_target,
transform=lambda x: x
)
def test_edit_project_with_next(self):
"""Test editing project with next-hook.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['change_project', 'view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
project_1 = create_project(**{
'project_name': 'Project name',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
response = self.client.post(
reverse('projects:edit', args=(project_1.id,))
+ '?next=' + reverse('projects:detail', args=(project_1.id,)),
{
'project_name': 'Project name 1',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f'/projects/{project_1.id}/')
response = self.client.get(response.url)
self.assertEqual(
response.context['project']['project_name'],
'Project name 1'
)
class ProjectDeleteViewTest(TestCase):
"""Test delete view of the Project model.
"""
def test_delete_project_with_next(self):
"""Test deleting project with next-hook.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['delete_project', 'view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
project_1 = create_project(**{
'project_name': 'Project name 1',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
deadline = timezone.now() + datetime.timedelta(days=32)
create_project(**{
'project_name': 'Project name 2',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
response = self.client.post(
reverse('projects:delete', args=(project_1.id,))
+ '?next=' + reverse('projects:detail', args=(project_1.id,))
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f'/projects/')
qs_target = [
{
'project_name': 'Project name 2',
'deadline': deadline,
'budget': 100000.0,
'closed': True,
'tasks_count': 0,
'percentage_completed': 0.0
}
]
response = self.client.get(response.url)
fields = list(response.context['project_list']._fields)
fields.remove('id')
self.assertQuerysetEqual(
response.context['project_list'].values(*fields).order_by('id'),
qs_target,
transform=lambda x: x
)
def test_delete_project_without_next(self):
"""Test deleting project without next-hook.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['delete_project', 'view_project'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
deadline = timezone.now() + datetime.timedelta(days=32)
project_1 = create_project(**{
'project_name': 'Project name 1',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
deadline = timezone.now() + datetime.timedelta(days=32)
create_project(**{
'project_name': 'Project name 2',
'description': 'Project description',
'budget': 100000,
'deadline': deadline,
'closed': True,
})
response = self.client.post(
reverse('projects:delete', args=(project_1.id,))
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f'/projects/')
qs_target = [
{
'project_name': 'Project name 2',
'deadline': deadline,
'budget': 100000.0,
'closed': True,
'tasks_count': 0,
'percentage_completed': 0.0
}
]
response = self.client.get(response.url)
fields = list(response.context['project_list']._fields)
fields.remove('id')
self.assertQuerysetEqual(
response.context['project_list'].values(*fields).order_by('id'),
qs_target,
transform=lambda x: x
)
class TaskModelTest(TestCase):
"""Tests for Task model.
"""
def test_model_str(self):
"""Test representation of Tasks __str__.
"""
deadline = timezone.now() + datetime.timedelta(days=32)
start_date = timezone.now()
complete_date = timezone.now() + datetime.timedelta(days=8)
postfix = ' 1'
project_1 = create_project(**{
'project_name': 'Project name' + postfix,
'description': 'Project description' + postfix,
'budget': 100000,
'deadline': deadline,
'closed': True,
})
task_1_1_str = create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': None,
'assignee': None,
'status': NEW,
}).__str__()
task_str_target = 'Task name 1'
self.assertEqual(task_1_1_str, task_str_target)
def test_get_status_repr(self):
"""Test status representation of Task.
"""
deadline = timezone.now() + datetime.timedelta(days=32)
start_date = timezone.now()
complete_date = timezone.now() + datetime.timedelta(days=8)
postfix = ' 1'
project_1 = create_project(**{
'project_name': 'Project name' + postfix,
'description': 'Project description' + postfix,
'budget': 100000,
'deadline': deadline,
'closed': True,
})
task_1_1 = create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': None,
'assignee': None,
'status': NEW,
})
status_new = task_1_1.get_status_repr()
task_1_1.status = IN_PROGRESS
status_in_progress = task_1_1.get_status_repr()
task_1_1.status = COMPLETED
status_completed = task_1_1.get_status_repr()
status_new_target = _('New')
status_in_progress_target = _('In progress')
status_completed_target = _('Finished')
self.assertEqual(status_new, status_new_target)
self.assertEqual(status_in_progress, status_in_progress_target)
self.assertEqual(status_completed, status_completed_target)
class TaskListViewTest(TestCase):
"""Test for list view of the Task model.
"""
def test_no_tasks(self):
"""Test list view without any task.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['view_task'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
response = self.client.get(reverse('projects:task_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, _("No tasks are available."))
self.assertQuerysetEqual(response.context['task_list'], [])
def test_two_tasks(self):
"""Test list view with two tasks.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['view_task'])
usr = User.objects.create_user(username=username, password=password, email=email, )
usr.user_permissions.set(perms)
self.client.login(username=username, password=password)
postfix = '_1'
firstname = 'Marshall' + postfix
middlename = 'Bruce' + postfix
surname = 'Mathers' + postfix
email = f'<EMAIL>'
birthdate = timezone.now() + datetime.timedelta(days=-365 * 30)
employee = create_employee(**{
'firstname': firstname,
'middlename': middlename,
'surname': surname,
'email': email,
'birthdate': birthdate,
})
deadline = timezone.now() + datetime.timedelta(days=32)
start_date = timezone.now()
complete_date = timezone.now() + datetime.timedelta(days=8)
postfix = ' 1'
project_1 = create_project(**{
'project_name': 'Project name' + postfix,
'description': 'Project description' + postfix,
'budget': 100000,
'deadline': deadline,
'closed': True,
})
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': NEW,
})
postfix = ' 2'
create_task(**{
'project': project_1,
'task_name': 'Task name' + postfix,
'description': 'description',
'start_date': start_date,
'complete_date': complete_date,
'author': employee,
'assignee': employee,
'status': NEW,
})
response = self.client.get(reverse('projects:task_list'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['task_list'].order_by('id'),
['<Task: Task name 1>', '<Task: Task name 2>']
)
class TaskDetailViewTest(TestCase):
"""Tests for detail view of the Task model.
"""
def test_task_without_comments(self):
"""Test for detail view without any comment.
"""
username = 'test'
email = '<EMAIL>'
password = '<PASSWORD>'
perms = Permission.objects.filter(codename__in=['view_task'])
usr = User.objects.create_user(username=username, | |
_SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_date_time_null_request(date_time_path: datetime.datetime, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/datetime/null/{dateTimePath}"
path_format_arguments = {
"dateTimePath": _SERIALIZER.url("date_time_path", date_time_path, "iso-8601"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_base64_url_request(base64_url_path: bytes, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/string/bG9yZW0/{base64UrlPath}"
path_format_arguments = {
"base64UrlPath": _SERIALIZER.url("base64_url_path", base64_url_path, "base64"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_array_csv_in_path_request(array_path: List[str], **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = (
"/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}"
)
path_format_arguments = {
"arrayPath": _SERIALIZER.url("array_path", array_path, "[str]", div=","),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_paths_unix_time_url_request(unix_time_url_path: datetime.datetime, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/paths/int/1460505600/{unixTimeUrlPath}"
path_format_arguments = {
"unixTimeUrlPath": _SERIALIZER.url("unix_time_url_path", unix_time_url_path, "unix-time"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
def build_queries_get_boolean_true_request(**kwargs: Any) -> HttpRequest:
bool_query = kwargs.pop("bool_query", True) # type: bool
accept = "application/json"
# Construct URL
url = "/queries/bool/true"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_boolean_false_request(**kwargs: Any) -> HttpRequest:
bool_query = kwargs.pop("bool_query", False) # type: bool
accept = "application/json"
# Construct URL
url = "/queries/bool/false"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_boolean_null_request(*, bool_query: Optional[bool] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/bool/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if bool_query is not None:
query_parameters["boolQuery"] = _SERIALIZER.query("bool_query", bool_query, "bool")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_one_million_request(**kwargs: Any) -> HttpRequest:
int_query = kwargs.pop("int_query", 1000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/int/1000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_negative_one_million_request(**kwargs: Any) -> HttpRequest:
int_query = kwargs.pop("int_query", -1000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/int/-1000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_int_null_request(*, int_query: Optional[int] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/int/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if int_query is not None:
query_parameters["intQuery"] = _SERIALIZER.query("int_query", int_query, "int")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_query = kwargs.pop("long_query", 10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/long/10000000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_negative_ten_billion_request(**kwargs: Any) -> HttpRequest:
long_query = kwargs.pop("long_query", -10000000000) # type: int
accept = "application/json"
# Construct URL
url = "/queries/long/-10000000000"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_get_long_null_request(*, long_query: Optional[int] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/long/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if long_query is not None:
query_parameters["longQuery"] = _SERIALIZER.query("long_query", long_query, "long")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_scientific_positive_request(**kwargs: Any) -> HttpRequest:
float_query = kwargs.pop("float_query", 103400000000000000000) # type: float
accept = "application/json"
# Construct URL
url = "/queries/float/1.034E+20"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_scientific_negative_request(**kwargs: Any) -> HttpRequest:
float_query = kwargs.pop("float_query", -1.034e-20) # type: float
accept = "application/json"
# Construct URL
url = "/queries/float/-1.034E-20"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_float_null_request(*, float_query: Optional[float] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/float/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if float_query is not None:
query_parameters["floatQuery"] = _SERIALIZER.query("float_query", float_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_decimal_positive_request(**kwargs: Any) -> HttpRequest:
double_query = kwargs.pop("double_query", 9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/queries/double/9999999.999"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_decimal_negative_request(**kwargs: Any) -> HttpRequest:
double_query = kwargs.pop("double_query", -9999999.999) # type: float
accept = "application/json"
# Construct URL
url = "/queries/double/-9999999.999"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_double_null_request(*, double_query: Optional[float] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/double/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if double_query is not None:
query_parameters["doubleQuery"] = _SERIALIZER.query("double_query", double_query, "float")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_unicode_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "啊齄丂狛狜隣郎隣兀﨩") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/unicode/"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_url_encoded_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "begin!*'();:@ &=+$,/?#[]end") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_empty_request(**kwargs: Any) -> HttpRequest:
string_query = kwargs.pop("string_query", "") # type: str
accept = "application/json"
# Construct URL
url = "/queries/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_string_null_request(*, string_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = | |
0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, re
sys.path.append("{0}/Desktop/cbmi/reproduce/python/MedicalResearchTool/objects".format(os.environ['HOME'])) #TODO
sys.path.append("{0}/Desktop/cbmi/reproduce/python/MedicalResearchTool".format(os.environ['HOME']))
import nltk
import requests
from pprint import pprint
from stemming.porter2 import stem
from ArticleManager import ArticleManager
from DatabaseManager import DatabaseManager
from bs4 import BeautifulSoup
class ArticleExtractor(ArticleManager):
"""
Extract study information from the article text
Depends on:
os -- https://docs.python.org/3/library/os.html
sys -- https://docs.python.org/3/library/sys.html
re -- https://docs.python.org/3/library/re.html
nltk -- http://www.nltk.org/
requests -- http://docs.python-requests.org/en/master/
pprint -- https://docs.python.org/3/library/pprint.html
stemming -- https://pypi.python.org/pypi/stemming/1.0
beautiful soup -- https://www.crummy.com/software/BeautifulSoup/bs4/doc/
Functions from inherited from ArticleManager:
get_choices
check
check_boolean
ask
ask_without_choices
See ArticleManager for additional documentation
"""
def __init__(self,**kwargs):
super(ArticleExtractor,self).__init__(**kwargs) #pass run_style and metadata keyword argument on to ArticleManager constructor (if provided)
def clean_entry(self):
"""
For fields in ArticleExtractor.entry attribute with multiple entries, remove duplicates and format the final input
Runs on ArticleExtractor.entry attribute
Return: ArticleExtractor.entry attribute (dictionary)
Article.entry must be type: dictionary
Example:
>>> ae = ArticleExtractor()
...
>>> ae.entry
{
'article_doi':'10.1016/j.arth.2015.12.012',
'analysis_sw':'SAS,SPSS,SAS',
'grant_ids':'#29861982, #EI98239',
'primary_research':1
}
>>> clean_entry()
{
'article_doi':'10.1016/j.arth.2015.12.012',
'analysis_sw':'SAS, SPSS',
'grant_ids':'#29861982, #EI98239',
'primary_research':1
}
Raise TypeError when entry is not a dictionary
>>> print(ae.entry)
['a', 'b']
>>> ae.clean_entry()
TypeError: clean_entry called on: ['a', 'b']
invalid type: <class 'list'>
"""
if (type(self.entry) is not dict):
raise TypeError("clean_entry called on: {0} \ninvalid type: {1}".format(self.entry,type(self.entry)))
return self.entry
for (k,v) in self.entry.items():
copy = v
try:
val = copy.split(',')
val = list(map(str.strip, val))
val = set(val)
val = ', '.join(val)
self.entry[k] = val
except AttributeError:
#copy.split(',') failed because val was not a string
#v was already clean
pass
return self.entry
def get_reviewer(self):
"""
Get the name of the person reviewing the article
Use computer's username to take a guess, or ask for input if cant determine a guess
Return: Void
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae.get_reviewer()
I think 'Whos reviewing the article?' should be 'Elvis' based on: 'user of computer'
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'reviewer':'Elvis'}
If a guess cant be determined, ask user whos reviewing the article
>>> ae.get_reviewer()
#popup window appears
>>> ae = ArticleExtractor(run_style=1)
>>> ae.get_reviewer()
>>> ae.entry
{'reviewer':'Elvis'}
"""
username = os.getlogin() or pwd.getpwuid(os.getuid())[0] #username of the person using the computer
users = self.get_choices("reviewer")
for user in users:
if (re.search(username,user,re.I)):
self.check("Whos reviewing the article?",users[user],"user of computer","reviewer",display=user)
return
self.ask("Whos reviewing the article?","reviewer")
def chunker(self,sentence):
"""
Chunk a sentence
Args: sentence -- (string)
Return: nltk.tree.Tree object, traversable,
chunks begin with and end with proper noun (singular or plural)
and these may occur between the two proper nouns:
proper noun, noun, ',', '(', ')', ':', demonstrative adjective, conjuction, preposition
For more information, see:
http://www.nltk.org/book/ch07.html
Example:
>>> ae = ArticleExtractor()
>>> ae.chunker("The Institute for American Greatness has partnered with The University of Iceland")
Tree('S', [('The', 'DT'), \
Tree('Chunk', [('Institute', 'NNP'), ('for', 'IN'), ('American', 'NNP'), ('Greatness', 'NNP')]), \
('has', 'VBZ'), ('partnered', 'VBN'), ('with', 'IN'), ('The', 'DT'), \
Tree('Chunk', [('University', 'NNP'), ('of', 'IN'), ('Iceland', 'NNP')]) \
])
Except TypeError when sentence is not a string, retry by casting sentence to string
>>> ae.chunker(12)
chunker called on: '12'
12 is type <class 'int'> but must be a string or bytes-like object
retrying with cast to string
Tree('S', [('12', 'CD')])
"""
try:
words = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(words)
#tagged = nltk.pos_tag([word.rstrip(''.join([str(i) for i in range(10)])) for word in words])
chunkGram = r"Chunk: {<NNP.?><NNP.?|NN.?|,|\(|\)|:|IN|CC|DT>*<NNP.?|\)>}"
chunkedParser = nltk.RegexpParser(chunkGram)
chunked = chunkedParser.parse(tagged)
return chunked
except TypeError as e:
print("chunker called on: '{}' \n{} is type: {} but must be a string or bytes-like object".format(sentence,sentence,type(sentence)))
print("retrying with cast to string")
return self.chunker(str(sentence))
def get_clinical_domain(self,key_words):
"""
Get the clinical domain of the article
Args: key_words -- words to search against the clinical domain choices (list of strings)
Return: int value corresponding to redcap key for given domain, or 0 if no keyword matches (unknown domain) or keywords is invalid type
Example:
>>> ae = ArticleExtractor()
>>> ae.get_clinical_domain(['Neurology'])
23
>>> ae.get_clinical_domain(['The American Dream'])
0
>>> ae.get_clinical_domain(12)
0
"""
if ('clinical_domain' in self.entry):
return
if (type(key_words) is not list):
return 0
stopwords = nltk.corpus.stopwords.words('english') + ['health','disease','medicine','medical','sciences','medicine','international']
key_words = [stem(word.lower().strip()) for word in key_words if word.lower() not in stopwords]
domains = self.get_choices("clinical_domain")
for word in key_words:
for domain in domains:
if (re.search(re.escape(word),domain,re.I)):
return domain
return 0
def _get_hypotheses(self,text):
"""
Determine whether the study in the article was 'Hypothesis Driven or Hypothesis Generating'
and if they stated the null and alternative hypotheses
assign the value to entry
Args: text -- text from the article to be extracted (string)
Return: void
Article that were 'hypothesis driven' usually presented their hypotheses in the format:
"we hypothesized ..."
Example:
>>> ae = ArticleExtractor()
>>> ae._get_hypotheses("We hypothesized that patients undergoing extended BEV therapy could have altered patterns of recurrence and symptoms of recurrence due to its novel mechanism of action")
>>> ae.entry
{'hypothesis_gen_or_driv':1}
#from article: 23632207.pdf (doi: 10.1016/j.ygyno.2013.04.055)
>>> ae.entry
{'hypothesis_gen_or_driv':1}
>>> ae._get_hypotheses("We randomly collected data with no direction *hand slap")
>>> ae.entry
{'hypothesis_gen_or_driv':2}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'we.*?hypothes',each_sent,re.I)):
self.check("Hypothesis Driven or Hypothesis Generating",1,each_sent,"hypothesis_gen_or_driv",display="driven")
if ("hypothesis_gen_or_driv" in self.entry):
#we didnt encounter any articles that stated null and alternate hypotheses. Here's how we might ask
self.generate_chooser("Does the publication state null and alternative hypotheses?",self.get_choices("clear_hypothesis"),info=each_sent)
if (self.user_choice != -1):
self.entry['clear_hypothesis'] = self.user_choice
return
self.entry['hypothesis_gen_or_driv'] = 2
return
def _get_funding(self,text):
"""
Get funding and grant information for the study in an article
Args: text -- text from the article to be extracted (string)
Return: void
Articles usually presented their funding in the format:
"This study was funded in part by ... (grant #NIH8982)"
"This study was funded by a grant from ..."
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_funding("Our study was funded by the NIH (grant id: #3234Gj8)")
I think 'Grant ID' should be: '3234Gj8' based on:
Our study was funded by the NIH (grant id: #3234Gj8)
Is this correct? (if no, type no and press enter; otherwise, press enter):
I think 'Funders' should be: 'the NIH' based on:
Our study was funded by the NIH (grant id: #3234Gj8)
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'funders': 'the NIH', 'grant_ids': '3234Gj8'}
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_funding("Our study was not funded by the NIH, but rather my mom")
I think 'Funders' should be: 'the NIH' based on:
Our study was not funded by the NIH, but rather my mom
Is this correct? (if no, type no and press enter; otherwise, press enter): no
Do you know the correct value? (if yes, type yes and press enter; otherwise, press enter): yes
Type the correct value for 'Funders': researchers mom
>>> ae.entry
{'funders': 'researchers mom'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_funding("Our study was funded by the NIH (grant id: #3234Gj8)")
>>> ae.entry
{'funders': 'the NIH', 'grant_ids': '3234Gj8'}
>>> ae._get_funding("The research was funded by Wayne Enterprises.")
>>> ae.entry
{'funders': 'Wayne Enterprises'}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'funded.*?by',each_sent,re.I|re.S)):
search = re.search(r"grant.*?(\w*\d[\w\d/-]*)",each_sent,re.I)
if (search):
self.check("Grant ID",search.group(1).strip(),each_sent,"grant_ids")
search = re.search(r'grant.*?from (.*?)[^\w\s-]',each_sent,re.I|re.S)
if (search):
self.check("Funders",search.group(1).strip(),each_sent,"funders")
else:
search = re.search(r'funded.*?by (.*?)[^\w\s-]',each_sent,re.I|re.S)
self.check("Funders",search.group(1).strip(),each_sent,"funders")
def _get_inex_criteria(self,text):
"""
Determine if the study in an article documented their inclusion / exclusion criteria
Args: text -- text from the article to be extracted (string)
Return: void
Search for:
"... were included", "... were excluded", "inclusion", "exclusion"
indicator phrases
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_inex_criteria("Aliens and other reptiles were excluded from our study")
I think 'Inclusion Exclusion Criteria Stated' should be: 'yes' based on:
Aliens and other reptiles were excluded from our study
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'inclusion_and_exclusion_stated': '1', 'inclusion_exclu_location___3': 1}
#location___3 is body of article
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_inex_criteria("Aliens and other reptiles were excluded from our study")
>>> ae.entry
{'inclusion_and_exclusion_stated': '1', 'inclusion_exclu_location___3': 1}
"""
for each_sent in nltk.sent_tokenize(text):
copy = each_sent
if(re.search(r'were\W*includ',each_sent,re.I) or re.search(r'were\W*exclud',each_sent,re.I) or
re.search(r'inclus',each_sent,re.I) or (re.search(r'exclus',each_sent,re.I) and not re.search(r'exclusively',each_sent,re.I))):
if ("inclusion_and_exclusion_stated" not in self.entry):
self.check_boolean("Inclusion Exclusion Criteria Stated",1,each_sent,"inclusion_and_exclusion_stated",display='yes')
if ("inclusion_and_exclusion_stated" in self.entry):
self.entry['inclusion_exclu_location___3'] = 1
self.check_ontol(each_sent)
return
def check_ontol(self,info):
"""
Ask user if any inclusion / exclusion criteria are presenting relative to standard ontologies
(method is called when a sentence is found indicating that inclusion / exclusion criteria was stated)
Args: info -- sentence that indicated inclusion / exclusion criteria was stated (string)
Return: void
#TODO, this needs to be expanded to check for standard ontologies that occur outside of the sentence which
indicated that inclusion / exclusion criteria were stated
This is difficult because a variety of names exist (ICD-9, ICD9, 'International Classification of Diseases')
and it is difficult to discern which category ('Procedure', 'Diagnosis', 'Medication', 'Laboratory')
the ontology belongs to
For now, prompts user whenever 'inclusion_and_exclusion_stated' set to True
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae.check_ontol("Patients suffering from sleep apnea (ICD-9 327.23) were excluded")
based on:
Patients suffering from sleep apnea (ICD-9 327.23) were excluded
Are any standard ontologies stated (such as CPT, ICD9, etc)? (if yes, type yes and press enter; otherwise, press enter): yes
#user interacts with GUI
>>> ae.entry
{'ontol_and_vocab_stated': 1, 'diag_vocabulary': 1}
"""
if ("ontol_and_vocab_stated" in self.entry):
return
if (not self.run_style):
print("based on:")
print(info)
if (self.ask_question("Are any standard ontologies stated (such as CPT, ICD9, etc)?")):
self.entry['ontol_and_vocab_stated'] = 1
c1 = {
"Procedure":1,
"Diagnosis":2,
"Medication":3,
"Laboratory":4
}
c2 = {
"Procedure":"proc_vocabulary",
"Diagnosis":"diag_vocabulary",
"Medication":"med_vocab",
"Laboratory":"lab_vocab"
}
c3 = dict((v,k) for k,v in c1.items())
self.generate_chooser("What categories are the ontologies a part of?",c1)
if (self.user_choice != -1):
self.ask("What ontologies are given for the category {}?".format(c3[self.user_choice]),c2[c3[self.user_choice]])
#TODO, **** I think this isnt correct, check when redcap comes back online
def _get_databases(self,text):
"""
Determine if article cited databases used in the study
Args: text -- text from the article to be extracted (string)
Return: void
If a sentence has the word 'database', guesses that the database is the largest chunk (see chunker method for more information)
Sometimes correct, but probaby the best default
#TODO, if there's more than one database
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_databases("The study database was obtained retrospectively from the Swedish National Stroke Register, Riksstroke and comprised all consecutive patients | |
"""
primitive.py
----------
Process detected planar primitives.
Primitives are supported in vertex group format (.vg, .bvg).
Mapple as in [Easy3D](https://github.com/LiangliangNan/Easy3D)
can be used to generate such primitives from point clouds.
Otherwise, one can refer to the vertex group file format specification
attached to the README document.
"""
from random import random
from pathlib import PosixPath
import struct
import numpy as np
from sklearn.decomposition import PCA
from tqdm import tqdm
from .logger import attach_to_log
logger = attach_to_log()
class VertexGroup:
"""
Class for manipulating planar primitives.
"""
def __init__(self, filepath, process=True):
"""
Init VertexGroup.
Class for manipulating planar primitives.
Parameters
----------
filepath: pathlib.Path
Filepath to vertex group file (.vg) or binary vertex group file (.bvg)
process: bool
Immediate processing if set True
"""
self.filepath = filepath
self.processed = False
self.points = None
self.planes = None
self.bounds = None
self.points_grouped = None
self.points_ungrouped = None
self.vgroup_ascii = self.load_file()
self.vgroup_binary = None
if process:
self.process()
def load_file(self):
"""
Load (ascii / binary) vertex group file.
"""
if self.filepath.suffix == '.vg':
with open(self.filepath, 'r') as fin:
return fin.readlines()
elif self.filepath.suffix == '.bvg':
# define size constants
_SIZE_OF_INT = 4
_SIZE_OF_FLOAT = 4
_SIZE_OF_PARAM = 4
_SIZE_OF_COLOR = 3
vgroup_ascii = ''
with open(self.filepath, 'rb') as fin:
# points
num_points = struct.unpack('i', fin.read(_SIZE_OF_INT))[0]
points = struct.unpack('f' * num_points * 3, fin.read(_SIZE_OF_FLOAT * num_points * 3))
vgroup_ascii += f'num_points: {num_points}\n'
vgroup_ascii += ' '.join(map(str, points)) + '\n'
# colors
num_colors = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'num_colors: {num_colors}\n'
# normals
num_normals = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
normals = struct.unpack('f' * num_normals * 3, fin.read(_SIZE_OF_FLOAT * num_normals * 3))
vgroup_ascii += f'num_normals: {num_normals}\n'
vgroup_ascii += ' '.join(map(str, normals)) + '\n'
# groups
num_groups = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'num_groups: {num_groups}\n'
group_counter = 0
while group_counter < num_groups:
group_type = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
num_group_parameters = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
group_parameters = struct.unpack("f" * _SIZE_OF_PARAM, fin.read(_SIZE_OF_INT * _SIZE_OF_PARAM))
group_label_size = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
# be reminded that vg <-> bvg in Mapple does not maintain group order
group_label = struct.unpack("c" * group_label_size, fin.read(group_label_size))
group_color = struct.unpack("f" * _SIZE_OF_COLOR, fin.read(_SIZE_OF_FLOAT * _SIZE_OF_COLOR))
group_num_point = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
group_points = struct.unpack("i" * group_num_point, fin.read(_SIZE_OF_INT * group_num_point))
num_children = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'group_type: {group_type}\n'
vgroup_ascii += f'num_group_parameters: {num_group_parameters}\n'
vgroup_ascii += 'group_parameters: ' + ' '.join(map(str, group_parameters)) + '\n'
vgroup_ascii += 'group_label: ' + ''.join(map(str, group_label)) + '\n'
vgroup_ascii += 'group_color: ' + ' '.join(map(str, group_color)) + '\n'
vgroup_ascii += f'group_num_point: {group_num_point}\n'
vgroup_ascii += ' '.join(map(str, group_points)) + '\n'
vgroup_ascii += f'num_children: {num_children}\n'
group_counter += 1
# convert vgroup_ascii to list
return vgroup_ascii.split('\n')
else:
raise ValueError(f'unable to load {self.filepath}, expected *.vg or .bvg.')
def process(self):
"""
Start processing vertex group.
"""
logger.info('processing {}'.format(self.filepath))
self.points = self.get_points()
self.planes, self.bounds, self.points_grouped, self.points_ungrouped = self.get_primitives()
self.processed = True
def get_points(self, row=1):
"""
Get points from vertex group.
Parameters
----------
row: int
Row number where points are specified, defaults to 1 for filename.vg
Returns
----------
as_float: (n, 3) float
Point cloud
"""
pc = np.fromstring(self.vgroup_ascii[row], sep=' ')
return np.reshape(pc, (-1, 3))
def get_primitives(self):
"""
Get primitives from vertex group.
Returns
----------
params: (n, 4) float
Plane parameters
bounds: (n, 2, 3) float
Bounding box of the primitives
groups: (n, m, 3) float
Groups of points
ungrouped_points: (u, 3) float
Points that belong to no group
"""
is_primitive = [line.startswith('group_num_point') for line in self.vgroup_ascii]
primitives = [self.vgroup_ascii[line] for line in np.where(is_primitive)[0] + 1] # lines of groups in the file
params = []
bounds = []
groups = []
grouped_indices = set() # indices of points being grouped
for i, p in enumerate(primitives):
point_indices = np.fromstring(p, sep=' ').astype(np.int64)
grouped_indices.update(point_indices)
points = self.points[point_indices]
param = self.fit_plane(points, mode='PCA')
if param is None:
continue
params.append(param)
bounds.append(self._points_bound(points))
groups.append(points)
ungrouped_indices = set(range(len(self.points))).difference(grouped_indices)
ungrouped_points = self.points[list(ungrouped_indices)] # points that belong to no groups
return np.array(params), np.array(bounds), np.array(groups, dtype=object), np.array(ungrouped_points)
@staticmethod
def _points_bound(points):
"""
Get bounds (AABB) of the points.
Parameters
----------
points: (n, 3) float
Points
Returns
----------
as_float: (2, 3) float
Bounds (AABB) of the points
"""
return np.array([np.amin(points, axis=0), np.amax(points, axis=0)])
def normalise_from_centroid_and_scale(self, centroid, scale, num=None):
"""
Normalising points.
Centroid and scale are provided to be mitigated, which are identical with the return of
scale_and_offset() such that the normalised points align with the corresponding mesh.
Notice the difference with normalise_points_to_centroid_and_scale().
Parameters
----------
centroid: (3,) float
Centroid of the points to be mitigated
scale: float
Scale of the points to be mitigated
num: None or int
If specified, random sampling is performed to ensure the identical number of points
Returns
----------
None: NoneType
Normalised (and possibly sampled) self.points
"""
# mesh_to_sdf.utils.scale_to_unit_sphere()
self.points = (self.points - centroid) / scale
# update planes and bounds as point coordinates has changed
self.planes, self.bounds, self.points_grouped = self.get_primitives()
# safely sample points after planes are extracted
if num:
choice = np.random.choice(self.points.shape[0], num, replace=True)
self.points = self.points[choice, :]
def normalise_to_centroid_and_scale(self, centroid=(0, 0, 0), scale=1.0, num=None):
"""
Normalising points to the provided centroid and scale. Notice
the difference with normalise_points_from_centroid_and_scale().
Parameters
----------
centroid: (3,) float
Desired centroid of the points
scale: float
Desired scale of the points
num: None or int
If specified, random sampling is performed to ensure the identical number of points
Returns
----------
None: NoneType
Normalised (and possibly sampled) self.points
"""
######################################################
# this does not lock the scale
# offset = np.mean(points, axis=0)
# denominator = np.max(np.ptp(points, axis=0)) / scale
######################################################
bounds = np.ptp(self.points, axis=0)
center = np.min(self.points, axis=0) + bounds / 2
offset = center
self.points = (self.points - offset) / (bounds.max() * scale) + centroid
# update planes and bounds as point coordinates has changed
self.planes, self.bounds, self.points_grouped = self.get_primitives()
# safely sample points after planes are extracted
if num:
choice = np.random.choice(self.points.shape[0], num, replace=True)
self.points = self.points[choice, :]
@staticmethod
def fit_plane(points, mode='PCA'):
"""
Fit plane parameters for a point set.
Parameters
----------
points: (n, 3) float
Points to be fit
mode: str
Mode of plane fitting,
'PCA' (recommended) or 'LSA' (may introduce distortions)
Returns
----------
param: (4,) float
Plane parameters, (a, b, c, d) as in a * x + b * y + c * z = -d
"""
assert mode == 'PCA' or mode == 'LSA'
if len(points) < 3:
logger.warning('plane fitting skipped given #points={}'.format(len(points)))
return None
if mode == 'LSA':
# AX = B
logger.warning('LSA introduces distortions when the plane crosses the origin')
param = np.linalg.lstsq(points, np.expand_dims(np.ones(len(points)), 1))
param = np.append(param[0], -1)
else:
# PCA followed by shift
pca = PCA(n_components=3)
pca.fit(points)
eig_vec = pca.components_
logger.debug('explained_variance_ratio: {}'.format(pca.explained_variance_ratio_))
# normal vector of minimum variance
normal = eig_vec[2, :] # (a, b, c)
centroid = np.mean(points, axis=0)
# every point (x, y, z) on the plane satisfies a * x + b * y + c * z = -d
# taking centroid as a point on the plane
d = -centroid.dot(normal)
param = np.append(normal, d)
return param
def append_planes(self, additional_planes, additional_points=None):
"""
Append planes to vertex group. The provided planes can be accompanied by optional supporting points.
Notice these additional planes differ from `additional_planes` in `complex.py`: the former apply to
the VertexGroup data structure thus is generic to applications, while the latter apply to only the
CellComplex data structure.
Parameters
----------
additional_planes: (m, 4) float
Plane parameters
additional_points: None or (m, n, 3) float
Points that support planes
"""
if additional_points is None:
# this may still find use cases where plane parameters are provided as-is
logger.warning('no supporting points provided. only appending plane parameters')
else:
assert len(additional_planes) == len(additional_points)
# direct appending would not work
combined = np.zeros(len(self.points_grouped) + len(additional_points), dtype=object)
combined[:len(self.points_grouped)] = self.points_grouped
combined[len(self.points_grouped):] = [np.array(g) for g in additional_points]
self.points_grouped = combined
self.planes = np.append(self.planes, additional_planes, axis=0)
def save_vg(self, filepath):
"""
Save vertex group into a vg file.
Parameters
----------
filepath: str
Filepath to save vg file
"""
logger.info('writing vertex group into {}'.format(filepath))
if isinstance(filepath, str):
assert filepath.endswith('.vg')
elif isinstance(filepath, PosixPath):
assert filepath.suffix == '.vg'
assert self.planes is not None and self.points_grouped is not None
points_grouped = np.concatenate(self.points_grouped)
points_ungrouped = self.points_ungrouped
# | |
if top:
self.check_top_u += u[self.slice_top]
self.check_top_v += v[self.slice_top]
self.check_top_snx += sxy[self.slice_top]*self.normal_top[1]
self.check_top_sny += syy[self.slice_top]*self.normal_top[1]
def __internal_call__(self, source, reflection_distance, forces, dipstr, dipvec, target, compute_source_velocity, compute_source_stress, compute_target_velocity, compute_target_stress, loc):
# get total forces
tfx = np.sum(forces[0])
tfy = np.sum(forces[1])
# compute real distances
dist_x = reflection_distance * self.ranx
dist_y = reflection_distance * self.rany
if compute_target_velocity or compute_target_stress:
assert target is not None, 'Need to give target to compute target velocity or stresses'
if target is None:
target = np.zeros([2,1])
# get where points are too close to boundary
close_left = source[0] < self.bounds[0] + dist_x
close_right = source[0] > self.bounds[1] - dist_x
close_bottom = source[1] < self.bounds[2] + dist_y
close_top = source[1] > self.bounds[3] - dist_y
# get all bad locations
bad_locations = np.logical_or.reduce([close_left, close_right, close_bottom, close_top])
good_locations = ~bad_locations
# further divide the close to boundary points
far_bottom_top = ~np.logical_or(close_bottom, close_top)
far_left_right = ~np.logical_or(close_left, close_right)
close_left_bottom = np.logical_and(close_left, close_bottom)
close_left_top = np.logical_and(close_left, close_top)
close_left_only = np.logical_and(close_left, far_bottom_top)
close_right_bottom = np.logical_and(close_right, close_bottom)
close_right_top = np.logical_and(close_right, close_top)
close_right_only = np.logical_and(close_right, far_bottom_top)
close_bottom_only = np.logical_and(close_bottom, far_left_right)
close_top_only = np.logical_and(close_top, far_left_right)
# reflection 1: points close to only the left boundary reflected to the right
left_only_bad_source = source[:,close_left_only]
left_only_reflected_source = np.row_stack([
left_only_bad_source[0] + self.ranx,
left_only_bad_source[1],
])
left_only_reflected_forces = forces[:,close_left_only] if forces is not None else None
left_only_reflected_dipstr = dipstr[:,close_left_only] if dipstr is not None else None
left_only_reflected_dipvec = dipvec[:,close_left_only] if dipvec is not None else None
# reflection 2: points close to only the right boundary reflected to the left
right_only_bad_source = source[:,close_right_only]
right_only_reflected_source = np.row_stack([
right_only_bad_source[0] - self.ranx,
right_only_bad_source[1],
])
right_only_reflected_forces = forces[:,close_right_only] if forces is not None else None
right_only_reflected_dipstr = dipstr[:,close_right_only] if dipstr is not None else None
right_only_reflected_dipvec = dipvec[:,close_right_only] if dipvec is not None else None
# reflection 3: points close to only the bottom boundary reflected to the top
bottom_only_bad_source = source[:,close_bottom_only]
bottom_only_reflected_source = np.row_stack([
bottom_only_bad_source[0],
bottom_only_bad_source[1] + self.rany,
])
bottom_only_reflected_forces = forces[:,close_bottom_only] if forces is not None else None
bottom_only_reflected_dipstr = dipstr[:,close_bottom_only] if dipstr is not None else None
bottom_only_reflected_dipvec = dipvec[:,close_bottom_only] if dipvec is not None else None
# reflection 4: points close to only the top boundary reflected to the bottom
top_only_bad_source = source[:,close_top_only]
top_only_reflected_source = np.row_stack([
top_only_bad_source[0],
top_only_bad_source[1] - self.rany,
])
top_only_reflected_forces = forces[:,close_top_only] if forces is not None else None
top_only_reflected_dipstr = dipstr[:,close_top_only] if dipstr is not None else None
top_only_reflected_dipvec = dipvec[:,close_top_only] if dipvec is not None else None
# reflection 5: points close to the left and the bottom boundaries
left_bottom_bad_source = source[:,close_left_bottom]
left_bottom_rb = np.row_stack([
left_bottom_bad_source[0] + self.ranx,
left_bottom_bad_source[1],
])
left_bottom_rt = np.row_stack([
left_bottom_bad_source[0] + self.ranx,
left_bottom_bad_source[1] + self.rany,
])
left_bottom_lt = np.row_stack([
left_bottom_bad_source[0],
left_bottom_bad_source[1] + self.rany,
])
left_bottom_reflected_source = np.column_stack([left_bottom_rb, left_bottom_rt, left_bottom_lt])
left_bottom_reflected_forces = forces[:,close_left_bottom] if forces is not None else None
left_bottom_reflected_dipstr = dipstr[:,close_left_bottom] if dipstr is not None else None
left_bottom_reflected_dipvec = dipvec[:,close_left_bottom] if dipvec is not None else None
# reflection 6: points close to the left and the top boundaries, and their reflections
left_top_bad_source = source[:,close_left_top]
left_top_rt = np.row_stack([
left_top_bad_source[0] + self.ranx,
left_top_bad_source[1],
])
left_top_rb = np.row_stack([
left_top_bad_source[0] + self.ranx,
left_top_bad_source[1] - self.rany,
])
left_top_lb = np.row_stack([
left_top_bad_source[0],
left_top_bad_source[1] - self.rany,
])
left_top_reflected_source = np.column_stack([left_top_rt, left_top_rb, left_top_lb])
left_top_reflected_forces = forces[:,close_left_top] if forces is not None else None
left_top_reflected_dipstr = dipstr[:,close_left_top] if dipstr is not None else None
left_top_reflected_dipvec = dipvec[:,close_left_top] if dipvec is not None else None
# reflection 7: points close to the right and the top boundaries
right_top_bad_source = source[:,close_right_top]
right_top_lt = np.row_stack([
right_top_bad_source[0] - self.ranx,
right_top_bad_source[1],
])
right_top_lb = np.row_stack([
right_top_bad_source[0] - self.ranx,
right_top_bad_source[1] - self.rany,
])
right_top_rb = np.row_stack([
right_top_bad_source[0],
right_top_bad_source[1] - self.rany,
])
right_top_reflected_source = np.column_stack([right_top_lt, right_top_lb, right_top_rb])
right_top_reflected_forces = forces[:,close_right_top] if forces is not None else None
right_top_reflected_dipstr = dipstr[:,close_right_top] if dipstr is not None else None
right_top_reflected_dipvec = dipvec[:,close_right_top] if dipvec is not None else None
# reflection 8: points close to the right and the bottom boundaries
right_bottom_bad_source = source[:,close_right_bottom]
right_bottom_lb = np.row_stack([
right_bottom_bad_source[0] - self.ranx,
right_bottom_bad_source[1],
])
right_bottom_lt = np.row_stack([
right_bottom_bad_source[0] - self.ranx,
right_bottom_bad_source[1] + self.rany,
])
right_bottom_rt = np.row_stack([
right_bottom_bad_source[0],
right_bottom_bad_source[1] + self.rany,
])
right_bottom_reflected_source = np.column_stack([right_bottom_lb, right_bottom_lt, right_bottom_rt])
right_bottom_reflected_forces = forces[:,close_right_bottom] if forces is not None else None
right_bottom_reflected_dipstr = dipstr[:,close_right_bottom] if dipstr is not None else None
right_bottom_reflected_dipvec = dipvec[:,close_right_bottom] if dipvec is not None else None
# add these into the "all left only" grouping
all_left_only_source = np.column_stack([ left_only_bad_source, right_only_reflected_source ])
all_left_only_forces = np.column_stack([ left_only_reflected_forces, right_only_reflected_forces ]) if forces is not None else None
all_left_only_dipstr = np.column_stack([ left_only_reflected_dipstr, right_only_reflected_dipstr ]) if dipstr is not None else None
all_left_only_dipvec = np.column_stack([ left_only_reflected_dipvec, right_only_reflected_dipvec ]) if dipvec is not None else None
# add these into the "all right only" grouping
all_right_only_source = np.column_stack([ right_only_bad_source, left_only_reflected_source ])
all_right_only_forces = np.column_stack([ right_only_reflected_forces, left_only_reflected_forces ]) if forces is not None else None
all_right_only_dipstr = np.column_stack([ right_only_reflected_dipstr, left_only_reflected_dipstr ]) if dipstr is not None else None
all_right_only_dipvec = np.column_stack([ right_only_reflected_dipvec, left_only_reflected_dipvec ]) if dipvec is not None else None
# add these into the "all bottom only" grouping
all_bottom_only_source = np.column_stack([ bottom_only_bad_source, top_only_reflected_source ])
all_bottom_only_forces = np.column_stack([ bottom_only_reflected_forces, top_only_reflected_forces ]) if forces is not None else None
all_bottom_only_dipstr = np.column_stack([ bottom_only_reflected_dipstr, top_only_reflected_dipstr ]) if dipstr is not None else None
all_bottom_only_dipvec = np.column_stack([ bottom_only_reflected_dipvec, top_only_reflected_dipvec ]) if dipvec is not None else None
# add these into the "all top only" grouping
all_top_only_source = np.column_stack([ top_only_bad_source, bottom_only_reflected_source ])
all_top_only_forces = np.column_stack([ top_only_reflected_forces, bottom_only_reflected_forces ]) if forces is not None else None
all_top_only_dipstr = np.column_stack([ top_only_reflected_dipstr, bottom_only_reflected_dipstr ]) if dipstr is not None else None
all_top_only_dipvec = np.column_stack([ top_only_reflected_dipvec, bottom_only_reflected_dipvec ]) if dipvec is not None else None
# add these into the "left bottom" grouping
all_left_bottom_source = np.column_stack([ left_bottom_bad_source, left_top_lb, right_top_lb, right_bottom_lb, ])
all_left_bottom_forces = np.column_stack([ left_bottom_reflected_forces, left_top_reflected_forces, right_top_reflected_forces, right_bottom_reflected_forces ]) if forces is not None else None
all_left_bottom_dipstr = np.column_stack([ left_bottom_reflected_dipstr, left_top_reflected_dipstr, right_top_reflected_dipstr, right_bottom_reflected_dipstr ]) if dipstr is not None else None
all_left_bottom_dipvec = np.column_stack([ left_bottom_reflected_dipvec, left_top_reflected_dipvec, right_top_reflected_dipvec, right_bottom_reflected_dipvec ]) if dipvec is not None else None
# add these into the "right bottom" grouping
all_right_bottom_source = np.column_stack([ right_bottom_bad_source, right_top_rb, left_top_rb, left_bottom_rb, ])
all_right_bottom_forces = np.column_stack([ right_bottom_reflected_forces, right_top_reflected_forces, left_top_reflected_forces, left_bottom_reflected_forces ]) if forces is not None else None
all_right_bottom_dipstr = np.column_stack([ right_bottom_reflected_dipstr, right_top_reflected_dipstr, left_top_reflected_dipstr, left_bottom_reflected_dipstr ]) if dipstr is not None else None
all_right_bottom_dipvec = np.column_stack([ right_bottom_reflected_dipvec, right_top_reflected_dipvec, left_top_reflected_dipvec, left_bottom_reflected_dipvec ]) if dipvec is not None else None
# add these into the "right top" grouping
all_right_top_source = np.column_stack([ right_top_bad_source, left_top_rt, left_bottom_rt, right_bottom_rt, ])
all_right_top_forces = np.column_stack([ right_top_reflected_forces, left_top_reflected_forces, left_bottom_reflected_forces, right_bottom_reflected_forces ]) if forces is not None else None
all_right_top_dipstr = np.column_stack([ right_top_reflected_dipstr, left_top_reflected_dipstr, left_bottom_reflected_dipstr, right_bottom_reflected_dipstr ]) if dipstr is not None else None
all_right_top_dipvec = np.column_stack([ right_top_reflected_dipvec, left_top_reflected_dipvec, left_bottom_reflected_dipvec, right_bottom_reflected_dipvec ]) if dipvec is not None else None
# add these into the "left top" grouping
all_left_top_source = np.column_stack([ left_top_bad_source, left_bottom_lt, right_bottom_lt, right_top_lt, ])
all_left_top_forces = np.column_stack([ left_top_reflected_forces, left_bottom_reflected_forces, right_bottom_reflected_forces, right_top_reflected_forces ]) if forces is not None else None
all_left_top_dipstr = np.column_stack([ left_top_reflected_dipstr, left_bottom_reflected_dipstr, right_bottom_reflected_dipstr, right_top_reflected_dipstr ]) if dipstr is not None else None
all_left_top_dipvec = np.column_stack([ left_top_reflected_dipvec, left_bottom_reflected_dipvec, right_bottom_reflected_dipvec, right_top_reflected_dipvec ]) if dipvec is not None else None
# sources / forces / dipstrs / dipvecs for direct portion of FMM
lbr_sr = left_bottom_reflected_source
rbr_sr = right_bottom_reflected_source
rtr_sr = right_top_reflected_source
ltr_sr = left_top_reflected_source
lbr_ch = np.tile(left_bottom_reflected_forces, (1,3)) if forces is not None else None
rbr_ch = np.tile(right_bottom_reflected_forces, (1,3)) if forces is not None else None
rtr_ch = np.tile(right_top_reflected_forces, (1,3)) if forces is not None else None
ltr_ch = np.tile(left_top_reflected_forces, (1,3)) if forces is not None else None
lbr_ds = np.tile(left_bottom_reflected_dipstr, (1,3)) if dipstr is not None else None
rbr_ds = np.tile(right_bottom_reflected_dipstr, (1,3)) if dipstr is not None else None
rtr_ds = np.tile(right_top_reflected_dipstr, (1,3)) if dipstr is not None else None
ltr_ds = np.tile(left_top_reflected_dipstr, (1,3)) if dipstr is not None else None
lbr_dv = np.tile(left_bottom_reflected_dipvec, (1,3)) | |
for the training data (combining X, Y, treatment).
dataMissing: boolean, optional (default = False)
An indicator for if data are missing or not.
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
def classifyWithoutMissingData(observations, tree):
'''
Classifies (prediction) the observationss according to the tree, assuming without missing data.
Args
----
observations : list of list
The internal data format for the training data (combining X, Y, treatment).
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
if tree.results is not None: # leaf
return tree.results, tree.upliftScore
else:
v = observations[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
else:
if v == tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
return classifyWithoutMissingData(observations, branch)
def classifyWithMissingData(observations, tree):
'''
Classifies (prediction) the observationss according to the tree, assuming with missing data.
Args
----
observations : list of list
The internal data format for the training data (combining X, Y, treatment).
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
if tree.results is not None: # leaf
return tree.results
else:
v = observations[tree.col]
if v is None:
tr = classifyWithMissingData(observations, tree.trueBranch)
fr = classifyWithMissingData(observations, tree.falseBranch)
tcount = sum(tr.values())
fcount = sum(fr.values())
tw = float(tcount) / (tcount + fcount)
fw = float(fcount) / (tcount + fcount)
# Problem description: http://blog.ludovf.net/python-collections-defaultdict/
result = defaultdict(int)
for k, v in tr.items():
result[k] += v * tw
for k, v in fr.items():
result[k] += v * fw
return dict(result)
else:
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
else:
if v == tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
return classifyWithMissingData(observations, branch)
# function body
if dataMissing:
return classifyWithMissingData(observations, tree)
else:
return classifyWithoutMissingData(observations, tree)
def cat_group(dfx, kpix, n_group=10):
'''
Category Reduction for Categorical Variables
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
n_group : int, optional (default = 10)
The number of top category values to be remained, other category values will be put into "Other".
Returns
-------
The transformed categorical feature value list.
'''
if dfx[kpix].nunique() > n_group:
# get the top categories
top = dfx[kpix].isin(dfx[kpix].value_counts().index[:n_group])
dfx.loc[~top, kpix] = "Other"
return dfx[kpix].values
else:
return dfx[kpix].values
def cat_transform(dfx, kpix, kpi1):
'''
Encoding string features.
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
kpi1 : list
The list of feature names.
Returns
-------
dfx : DataFrame
The updated dataframe containing the encoded data.
kpi1 : list
The updated feature names containing the new dummy feature names.
'''
df_dummy = pd.get_dummies(dfx[kpix].values)
new_col_names = ['%s_%s' % (kpix, x) for x in df_dummy.columns]
df_dummy.columns = new_col_names
dfx = pd.concat([dfx, df_dummy], axis=1)
for new_col in new_col_names:
if new_col not in kpi1:
kpi1.append(new_col)
if kpix in kpi1:
kpi1.remove(kpix)
return dfx, kpi1
def cv_fold_index(n, i, k, random_seed=2018):
'''
Encoding string features.
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
kpi1 : list
The list of feature names.
Returns
-------
dfx : DataFrame
The updated dataframe containing the encoded data.
kpi1 : list
The updated feature names containing the new dummy feature names.
'''
np.random.seed(random_seed)
rlist = np.random.choice(a=range(k), size=n, replace=True)
fold_i_index = np.where(rlist == i)[0]
return fold_i_index
# Categorize continuous variable
def cat_continuous(x, granularity='Medium'):
'''
Categorize (bin) continuous variable based on percentile.
Args
----
x : list
Feature values.
granularity : string, optional, (default = 'Medium')
Control the granularity of the bins, optional values are: 'High', 'Medium', 'Low'.
Returns
-------
res : list
List of percentile bins for the feature value.
'''
if granularity == 'High':
lspercentile = [np.percentile(x, 5),
np.percentile(x, 10),
np.percentile(x, 15),
np.percentile(x, 20),
np.percentile(x, 25),
np.percentile(x, 30),
np.percentile(x, 35),
np.percentile(x, 40),
np.percentile(x, 45),
np.percentile(x, 50),
np.percentile(x, 55),
np.percentile(x, 60),
np.percentile(x, 65),
np.percentile(x, 70),
np.percentile(x, 75),
np.percentile(x, 80),
np.percentile(x, 85),
np.percentile(x, 90),
np.percentile(x, 95),
np.percentile(x, 99)
]
res = ['> p90 (%s)' % (lspercentile[8]) if z > lspercentile[8] else
'<= p10 (%s)' % (lspercentile[0]) if z <= lspercentile[0] else
'<= p20 (%s)' % (lspercentile[1]) if z <= lspercentile[1] else
'<= p30 (%s)' % (lspercentile[2]) if z <= lspercentile[2] else
'<= p40 (%s)' % (lspercentile[3]) if z <= lspercentile[3] else
'<= p50 (%s)' % (lspercentile[4]) if z <= lspercentile[4] else
'<= p60 (%s)' % (lspercentile[5]) if z <= lspercentile[5] else
'<= p70 (%s)' % (lspercentile[6]) if z <= lspercentile[6] else
'<= p80 (%s)' % (lspercentile[7]) if z <= lspercentile[7] else
'<= p90 (%s)' % (lspercentile[8]) if z <= lspercentile[8] else
'> p90 (%s)' % (lspercentile[8]) for z in x]
elif granularity == 'Medium':
lspercentile = [np.percentile(x, 10),
np.percentile(x, 20),
np.percentile(x, 30),
np.percentile(x, 40),
np.percentile(x, 50),
np.percentile(x, 60),
np.percentile(x, 70),
np.percentile(x, 80),
np.percentile(x, 90)
]
res = ['<= p10 (%s)' % (lspercentile[0]) if z <= lspercentile[0] else
'<= p20 (%s)' % (lspercentile[1]) if z <= lspercentile[1] else
'<= p30 (%s)' % (lspercentile[2]) if z <= lspercentile[2] else
'<= p40 (%s)' % (lspercentile[3]) if z <= lspercentile[3] else
'<= p50 (%s)' % (lspercentile[4]) if z <= lspercentile[4] else
'<= p60 (%s)' % (lspercentile[5]) if z <= lspercentile[5] else
'<= p70 (%s)' % (lspercentile[6]) if z <= lspercentile[6] else
'<= p80 (%s)' % (lspercentile[7]) if z <= lspercentile[7] else
'<= p90 (%s)' % (lspercentile[8]) if z <= lspercentile[8] else
'> p90 (%s)' % (lspercentile[8]) for z in x]
else:
lspercentile = [np.percentile(x, 15), np.percentile(x, 50), np.percentile(x, 85)]
res = ['1-Very Low' if z < lspercentile[0] else
'2-Low' if z < lspercentile[1] else
'3-High' if z < lspercentile[2] else
'4-Very High' for z in x]
return res
def kpi_transform(dfx, kpi_combo, kpi_combo_new):
'''
Feature transformation from continuous feature to binned features for a list of features
Args
----
dfx : DataFrame
DataFrame containing the features.
kpi_combo : list of string
List of feature names to be transformed
kpi_combo_new : list of string
List of new feature names to be assigned to the transformed features.
Returns
-------
dfx : DataFrame
Updated DataFrame containing the new features.
'''
for j in range(len(kpi_combo)):
if type(dfx[kpi_combo[j]].values[0]) == str:
dfx[kpi_combo_new[j]] = dfx[kpi_combo[j]].values
dfx[kpi_combo_new[j]] = cat_group(dfx=dfx, kpix=kpi_combo_new[j])
else:
if len(kpi_combo) > 1:
dfx[kpi_combo_new[j]] = cat_continuous(
dfx[kpi_combo[j]].values, granularity='Low'
)
else:
dfx[kpi_combo_new[j]] = cat_continuous(
dfx[kpi_combo[j]].values, granularity='High'
)
return dfx
# Uplift Random Forests
class UpliftRandomForestClassifier:
""" Uplift Random Forest for Classification Task.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the uplift random forest.
evaluationFunction : string
Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.
max_features: int, optional (default=10)
The number of features to consider when looking for the best split.
random_state: int, optional (default=2019)
The seed used by the random number generator.
max_depth: int, optional (default=5)
The maximum depth of the tree.
min_samples_leaf: int, optional (default=100)
The minimum number of samples required to be split at a leaf node.
min_samples_treatment: int, optional (default=10)
The minimum number of samples required of the experiment group to be split at a leaf node.
n_reg: int, optional (default=10)
The regularization parameter defined in Rzepakowski et al. 2012, the
weight (in terms of sample size) of the parent node influence on the
child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.
control_name: string
The name of the control group (other experiment groups will be regarded as treatment groups)
normalization: boolean, optional (default=True)
The normalization factor defined in Rzepakowski et al. 2012,
correcting for tests with large number of splits and imbalanced
treatment and control splits
Outputs
----------
df_res: pandas dataframe
A user-level results dataframe containing the estimated individual treatment effect.
"""
def __init__(self,
n_estimators=10,
max_features=10,
random_state=2019,
max_depth=5,
min_samples_leaf=100,
min_samples_treatment=10,
n_reg=10,
evaluationFunction=None,
control_name=None,
normalization=True):
"""
Initialize the UpliftRandomForestClassifier class.
"""
self.classes_ = {}
self.n_estimators = n_estimators
self.max_features = max_features
self.random_state = random_state
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_treatment = min_samples_treatment
self.n_reg = | |
a callback "
"interface",
[self.implementor.location])
if not isinstance(implementee, IDLInterface):
raise WebIDLError("Right-hand side of 'implements' is not an "
"interface",
[self.implementee.location])
if implementee.isCallback():
raise WebIDLError("Right-hand side of 'implements' is a callback "
"interface",
[self.implementee.location])
implementor.addImplementedInterface(implementee)
def validate(self):
pass
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[A-Z_a-z][0-9A-Z_a-z]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_WHITESPACE(self, t):
r'[\t\n\r ]+|[\t\n\r ]*((//[^\n]*|/\*.*?\*/)[\t\n\r ]*)+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"module": "MODULE",
"interface": "INTERFACE",
"partial": "PARTIAL",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"implements": "IMPLEMENTS",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"jsonifier": "JSONIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"creator": "CREATOR",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"Date": "DATE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"optional": "OPTIONAL",
"sequence": "SEQUENCE",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"or": "OR"
}
tokens.extend(keywords.values())
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename = self.filename)])
def __init__(self, outputdir, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self,
outputdir=outputdir,
lextab='webidllex',
reflags=re.DOTALL)
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterface
| PartialInterface
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceCallback(self, p):
"""
CallbackOrInterface : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceInterface(self, p):
"""
CallbackOrInterface : Interface
"""
p[0] = p[1]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| Interface
"""
assert p[1]
p[0] = p[1]
def p_Interface(self, p):
"""
Interface : INTERFACE IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
parent = p[3]
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLInterface):
raise WebIDLError("Partial interface has the same name as "
"non-interface object",
[location, p[0].location])
p[0].setNonPartial(location, parent, members)
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLInterface(location, self.globalScope(), identifier, parent,
members, isPartial=False)
def p_InterfaceForwardDecl(self, p):
"""
Interface : INTERFACE IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_PartialInterface(self, p):
"""
PartialInterface : PARTIAL INTERFACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
members = p[5]
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLInterface):
raise WebIDLError("Partial interface has the same name as "
"non-interface object",
[location, p[0].location])
# Just throw our members into the existing IDLInterface. If we
# have extended attributes, those will get added to it
# automatically.
p[0].members.extend(members)
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLInterface(location, self.globalScope(), identifier, None,
members, isPartial=True)
pass
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]] if p[2] else []
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : Const
| AttributeOrOperation
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
# Add our extended attributes
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMember(self, p):
"""
DictionaryMember : Type IDENTIFIER DefaultValue SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[1]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
defaultValue = p[3]
p[0] = IDLArgument(self.getLocation(p, 2), identifier, t, optional=True,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True)
def p_DefaultValue(self, p):
"""
DefaultValue : EQUALS ConstValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallbackType(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5])
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF Type IDENTIFIER SEMICOLON
"""
typedef = IDLTypedefType(self.getLocation(p, 1), p[2], p[3])
typedef.resolve(self.globalScope())
p[0] = typedef
def p_ImplementsStatement(self, p):
"""
ImplementsStatement : ScopedName IMPLEMENTS ScopedName SEMICOLON
"""
assert(p[2] == "implements")
implementor = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
implementee = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLImplementsStatement(self.getLocation(p, 1), implementor,
implementee)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType == None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location | |
<gh_stars>10-100
#NodoContenedor
class NodoArbol:
#Constructor
def __init__(self, ValorI = [-1, ""], AnteriorI = None):
self.Valores = []
self.Hijos = []
self.Anterior = None
#Si solo se creo el Nodo
if ValorI[0]==-1 and AnteriorI == None:
for i in range(4):
if i!=3:
self.Valores.append([-1, ""])
self.Hijos.append(None)
self.Anterior = None
#Si el nodo se crea con valor Inicial
elif ValorI!=-1 and AnteriorI == None:
for i in range(4):
if i!=3:
self.Valores.append([-1, ""])
self.Hijos.append(None)
self.Valores[0] = ValorI
self.Anterior = None
#Si el nodo tiene valor inicial y un nodo anterior
elif ValorI!=-1 and AnteriorI != None:
for i in range(4):
if i!=3:
self.Valores.append([-1, ""])
self.Hijos.append(None)
self.Valores[0] = ValorI
self.Anterior = AnteriorI
class CRUD_Tuplas:
def __init__(self):
#Inicia la raiz vacia
self.Raiz = None
self.PK = []
self.values = []
def dot2(self, Actual, Cadena):
#Creacion del Nodo
Dot = "Nodo" + Cadena + " [shape=plaintext\nlabel=<\n<table border='1' cellborder='1'>\n<tr>"
Last = 0
for i in range(2):
if Actual.Valores[i][0] != -1:
Last = i
#Toma de datos
Dot += "<td port='port_" + str(i) + "'></td><td>" + str(Actual.Valores[i][0]) + "</td>"
Dot += "<td port='port_" + str(Last+1) + "'></td>"
Dot += "</tr>\n</table>\n>];\n\n"
for i in range(3):
if Actual.Hijos[i] != None:
#Conexion con los hijos
Dot += "Nodo" + Cadena + ":port_" + str(i) + " -> Nodo" + Cadena + str(i) + ";\n"
Dot += self.dot2(Actual.Hijos[i], Cadena + str(i))
return Dot
def dot(self):
#Inicio del generador de grafico
Dot = "digraph G {\n"
if self.Raiz != None:
Dot += self.dot2(self.Raiz, "")
Dot += "\n}"
file = open("Diagrama-bloques.dot", "w")
file.write(Dot)
file.close()
#check_call(['dot', '-Tpng', 'Diagrama-bloques.dot', '-o', 'Diagrama-bloques.png'])
def Mostrar2(self, Actual):
print("[",end="")
for i in range(3):
if Actual.Hijos[i] != None:
self.Mostrar2(Actual.Hijos[i])
if Actual.Valores[i][0] != -1:
if i!=0:
print(",",end="")
print(Actual.Valores[i][0],end="")
if Actual.Hijos[3] != None:
self.Mostrar2(Actual.Hijos[2])
print("]",end="")
def Mostrar(self):
self.Mostrar2(self.Raiz)
print("\n")
def InsertarArriba(self, Actual):
#Tomamos el valor anterior
Anterior = Actual.Anterior
#Si estamos en la raiz, el anterior sera nulo
if Anterior == None:
#Se crea el nuevo anterior con el valor mediano del nodo actual
Anterior = NodoArbol(Actual.Valores[1])
#El primer Hijo toma el valor pequeño del actual
Hijo0 = NodoArbol(Actual.Valores[0], Anterior)
#Los primeros 2 hijos del actual se les asigna al Hijo0
Hijo0.Hijos[0] = Actual.Hijos[0]
Hijo0.Hijos[1] = Actual.Hijos[1]
#Si los Hijos asignados al Hijo0 no son nulos, e establece el anterior Hijo0
if Hijo0.Hijos[0] != None:
Hijo0.Hijos[0].Anterior = Hijo0
if Hijo0.Hijos[1] != None:
Hijo0.Hijos[1].Anterior = Hijo0
#Se repite el procedimiento del Hijo0 con el Hijo1
Hijo1 = NodoArbol(Actual.Valores[2], Anterior)
#Para el Hijo1 se asignan el 2 y 3 del Actual
Hijo1.Hijos[0] = Actual.Hijos[2]
Hijo1.Hijos[1] = Actual.Hijos[3]
if Hijo1.Hijos[0] != None:
Hijo1.Hijos[0].Anterior = Hijo1
if Hijo1.Hijos[1] != None:
Hijo1.Hijos[1].Anterior = Hijo1
#El nuevo anterior se le asignan como hijos el Hijo0 y el Hijo1
Anterior.Hijos[0] = Hijo0
Anterior.Hijos[1] = Hijo1
#Se asigna el nodo crado como nueva Raiz
self.Raiz = Anterior
#Con esto dividimos el Nodo entero subiendo el valor intermedio y los extremos se toman como nuevos nodos
else:
#Tomamos los valores del Anterior
Auxiliar = Anterior.Hijos
Aux = Anterior.Valores
#Se busca en que hijo estamos
i = 0
while Auxiliar[i] != Actual:
i=i+1
j = 3
#Los hijos y valores que vayan despues del hijo se corren una posicion
while (j>i):
if j!=3:
Aux[j] = Aux[j-1]
Auxiliar[j] = Auxiliar[j-1]
j = j - 1
#El valor inmediato
Aux[i] = Actual.Valores[1]
#El primer Hijo toma el valor pequeño del actual
Hijo0 = NodoArbol(Actual.Valores[0], Anterior)
#Los primeros 2 hijos del actual se les asigna al Hijo0
Hijo0.Hijos[0] = Actual.Hijos[0]
Hijo0.Hijos[1] = Actual.Hijos[1]
#Si los Hijos asignados al Hijo0 no son nulos, e establece el anterior Hijo0
if Hijo0.Hijos[0] != None:
Hijo0.Hijos[0].Anterior = Hijo0
if Hijo0.Hijos[1] != None:
Hijo0.Hijos[1].Anterior = Hijo0
#Se repite el procedimiento del Hijo0 con el Hijo1
Hijo1 = NodoArbol(Actual.Valores[2], Anterior)
#Para el Hijo1 se asignan el 2 y 3 del Actual
Hijo1.Hijos[0] = Actual.Hijos[2]
Hijo1.Hijos[1] = Actual.Hijos[3]
if Hijo1.Hijos[0] != None:
Hijo1.Hijos[0].Anterior = Hijo1
if Hijo1.Hijos[1] != None:
Hijo1.Hijos[1].Anterior = Hijo1
#Al anterior se estableces sus nuevos 2 hijos
Anterior.Hijos[i] = Hijo0
Anterior.Hijos[i+1] = Hijo1
#En caso de que el nodo este lleno, se repite el proceso con el Anterior
if(Aux[2][0] != -1):
self.InsertarArriba(Anterior)
def InsertarEnNodo(self, Actual, Nuevo):
#Tomamos los valores del nodo
Aux = Actual.Valores
#Si esta vacio
if Aux[0][0] == -1:
#Se incerta en la posicion 0
Aux[0] = Nuevo
#Si hay un valor en el nodo
elif Aux[1][0] == -1:
#Se comprueba el orden
if str(Nuevo[0]) < Aux[0][0]:
#Si el Nuevo es menos al valor 0 se corre y se inserta al inicio el valor
Aux[1] = Aux[0]
Aux[0] = Nuevo
else:
#Si el nuevo es mayor ocupa la posicion 1
Aux[1] = Nuevo
#Si ya hay 2 valores
else:
#Se ordena
if str(Nuevo[0]) < Aux[0][0]:
Aux[2] = Aux[1]
Aux[1] = Aux[0]
Aux[0] = Nuevo
elif str(Nuevo[0]) < Aux[1][0]:
Aux[2] = Aux[1]
Aux[1] = Nuevo
else:
Aux[2] = Nuevo
#Se balancea al ya tener 3 valores en el nodo
self.InsertarArriba(Actual)
def BuscarInsercion(self, Actual, Nuevo):
#Se toman los hijos del nodo actual
Auxiliar = Actual.Hijos
#Si no tiene Hijos el nodo
if Auxiliar[0] == None:
#Se incerta en el nodo
self.InsertarEnNodo(Actual, Nuevo)
#Si no
else:
#Se toman los valores del nodo
Aux = Actual.Valores
#Si el nuevo es menos al valor menor del actual
if str(Nuevo[0]) < Aux[0][0]:
#Se va al hijo izquierdo
self.BuscarInsercion(Auxiliar[0], Nuevo)
#Si hay mas de 2 valores en el actual y el nodo es mayor al segundo
elif Aux[1][0] != -1 and Aux[1][0] < str(Nuevo[0]):
#Se incerta en el hijo derecho
self.BuscarInsercion(Auxiliar[2], Nuevo)
else:
#Si no en el hijo central
self.BuscarInsercion(Auxiliar[1], Nuevo)
def Insertar(self, Nuevo):
#Si la raiz es nula
if self.Raiz == None :
#Se crea la raiz con un nuevo nodo
arreglo = Nuevo
auxiliar = ""
if self.PK == []:
arreglo = Nuevo
arreglo.insert(0, Nuevo[0])
#print(arreglo)
self.Raiz = NodoArbol(arreglo)
elif len(self.PK)==1:
auxiliar = Nuevo[0]
arreglo.insert(0,auxiliar)
#print(arreglo)
self.Raiz = NodoArbol(arreglo)
else:
for n in self.PK:
if n == len(self.PK):
auxiliar += str(arreglo[n])
else:
auxiliar += str(arreglo[n]) + "$"
arreglo.insert(0,auxiliar)
#print(arreglo)
self.Raiz = NodoArbol(arreglo)
else:
#Si no hay llave primaria aún
if self.PK == []:
arreglo = Nuevo
arreglo.insert(0, Nuevo[0])
#print(arreglo)
Existe = self.BuscarN(arreglo[0])
#print(Existe)
if Existe == None:
# Si no se busca donde insertarlo
self.BuscarInsercion(self.Raiz, arreglo)
else:
print("Llave repetida")
# Si ya hay llave primaria
elif len(self.PK) == 1:
arreglo = Nuevo
arreglo.insert(0, Nuevo[0])
#print(arreglo)
self.BuscarInsercion(self.Raiz, arreglo)
else:
arreglo = Nuevo
auxiliar = ""
for n in self.PK:
if n == len(self.PK):
auxiliar += str(arreglo[n])
else:
auxiliar += str(arreglo[n]) + "$"
arreglo.insert(0, auxiliar)
#print(arreglo)
Existe = self.BuscarN(auxiliar)
#print(Existe)
if Existe == None:
# Si no se busca donde insertarlo
self.BuscarInsercion(self.Raiz, arreglo)
else:
return 4
return 0
def Buscar2(self, Clave, Actual):
#Si el nodo actual existe
if Actual != None:
Aux = Actual.Valores
#Si el valor nuevo es menor al valor 0
if str(Clave) < Aux[0][0]:
#Se busca en el hijo '
return self.Buscar2(Clave, Actual.Hijos[0])
elif str(Clave) == Aux[0][0]:
#Si es igual al valor 0 se regresa ese valor
return Aux[0]
elif Aux[1][0] != -1 and str(Clave) > Aux[1][0]:
#Si existe el valor 1 y es mayor a ese se busca en el hijo 2
return self.Buscar2(Clave, Actual.Hijos[2])
elif Aux[1][0] != -1 and str(Clave) == Aux[1][0]:
#Si existe el valor 1 y es igual a este se regresa ese
return Aux[1]
else:
#Si no se busca en el hijo 1
return self.Buscar2(Clave, Actual.Hijos[1])
else:
#Si no regresa nulo
return None
def Buscar(self, Clave):
if self.Raiz == None:
#Si la raiz no existe se regresa none
return None
else:
#Si existe se busca la clave
return self.Buscar2(Clave, self.Raiz)
def BuscarN2(self, Clave, Actual):
#Si el nodo actual existe
if Actual != None:
Aux = Actual.Valores
#Si el valor nuevo es menor al valor 0
if str(Clave) < str(Aux[0][0]):
#Se busca en el hijo '
return self.BuscarN2(Clave, Actual.Hijos[0])
elif str(Clave) == str(Aux[0][0]):
#Si es igual al valor 0 | |
no previous LCD
lcid_cache = getattr(c, 'lcid_cache', '')
if lcid_cache != '' and path in lcid_cache:
return lcid_cache[path]
try:
log_iter = commit.repo.log(commit._id, path, id_only=True, limit=2)
next(log_iter)
rev = next(log_iter)
return commit.repo.rev_to_commit_id(rev)
except StopIteration:
return None
@classmethod
def get(cls, tree):
'''Find or build the LastCommitDoc for the given tree.'''
cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
last_commit_id = cls._last_commit_id(tree.commit, path)
lcd = cache.get(cls, {'path': path, 'commit_id': last_commit_id})
if lcd is None:
commit = cache.get(Commit, {'_id': last_commit_id})
commit.set_context(tree.repo)
lcd = cls._build(commit.get_path(path))
return lcd
@classmethod
def _build(cls, tree):
'''
Build the LCD record, presuming that this tree is where it was most
recently changed.
'''
model_cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
entries = []
prev_lcd = None
prev_lcd_cid = cls._prev_commit_id(tree.commit, path)
if prev_lcd_cid:
prev_lcd = model_cache.get(
cls, {'path': path, 'commit_id': prev_lcd_cid})
entries = {}
nodes = set(
[node.name for node in chain(tree.tree_ids, tree.blob_ids, tree.other_ids)])
changed = set(
[node for node in nodes if os.path.join(path, node) in tree.commit.changed_paths])
unchanged = [os.path.join(path, node) for node in nodes - changed]
if prev_lcd:
# get unchanged entries from previously computed LCD
entries = prev_lcd.by_name
elif unchanged:
# no previously computed LCD, so get unchanged entries from SCM
# (but only ask for the ones that we know we need)
entries = tree.commit.repo.last_commit_ids(tree.commit, unchanged)
if entries is None:
# something strange went wrong; still show the list of files
# and possibly try again later
entries = {}
# paths are fully-qualified; shorten them back to just node names
entries = {
os.path.basename(path): commit_id for path, commit_id in six.iteritems(entries)}
# update with the nodes changed in this tree's commit
entries.update({node: tree.commit._id for node in changed})
# convert to a list of dicts, since mongo doesn't handle arbitrary keys
# well (i.e., . and $ not allowed)
entries = [{'name': name, 'commit_id': value}
for name, value in six.iteritems(entries)]
lcd = cls(
commit_id=tree.commit._id,
path=path,
entries=entries,
)
model_cache.set(cls, {'path': path, 'commit_id': tree.commit._id}, lcd)
return lcd
@LazyProperty
def by_name(self):
return {n.name: n.commit_id for n in self.entries}
class ModelCache(object):
'''
Cache model instances based on query params passed to get. LRU cache.
This does more caching than ming sessions (which only cache individual objects by _id)
The added complexity here may be unnecessary premature optimization, but
should be quite helpful when building up many models in order, like lcd _build
for a series of several new commits.
'''
def __init__(self, max_instances=None, max_queries=None):
'''
By default, each model type can have 2000 instances and
8000 queries. You can override these for specific model
types by passing in a dict() for either max_instances or
max_queries keyed by the class(es) with the max values.
Classes not in the dict() will use the default 2000/8000
default.
If you pass in a number instead of a dict, that value will
be used as the max for all classes.
'''
max_instances_default = 2000
max_queries_default = 8000
if isinstance(max_instances, int):
max_instances_default = max_instances
if isinstance(max_queries, int):
max_queries_default = max_queries
self._max_instances = defaultdict(lambda: max_instances_default)
self._max_queries = defaultdict(lambda: max_queries_default)
if hasattr(max_instances, 'items'):
self._max_instances.update(max_instances)
if hasattr(max_queries, 'items'):
self._max_queries.update(max_queries)
# keyed by query, holds _id
self._query_cache = defaultdict(OrderedDict)
self._instance_cache = defaultdict(OrderedDict) # keyed by _id
def _normalize_query(self, query):
_query = query
if not isinstance(_query, tuple):
_query = tuple(sorted(list(_query.items()), key=lambda k: k[0]))
return _query
def _model_query(self, cls):
if hasattr(cls, 'query'):
return cls.query
elif hasattr(cls, 'm'):
return cls.m
else:
raise AttributeError(
'%s has neither "query" nor "m" attribute' % cls)
def get(self, cls, query):
_query = self._normalize_query(query)
self._touch(cls, _query)
if _query not in self._query_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
_id = self._query_cache[cls][_query]
if _id is None:
return None
if _id not in self._instance_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
return self._instance_cache[cls][_id]
def set(self, cls, query, val):
_query = self._normalize_query(query)
if val is not None:
_id = getattr(val, '_model_cache_id',
getattr(val, '_id',
self._query_cache[cls].get(_query,
None)))
if _id is None:
_id = val._model_cache_id = bson.ObjectId()
self._query_cache[cls][_query] = _id
self._instance_cache[cls][_id] = val
else:
self._query_cache[cls][_query] = None
self._touch(cls, _query)
self._check_sizes(cls)
def _touch(self, cls, query):
'''
Keep track of insertion order, prevent duplicates,
and expire from the cache in a FIFO manner.
'''
_query = self._normalize_query(query)
if _query not in self._query_cache[cls]:
return
_id = self._query_cache[cls].pop(_query)
self._query_cache[cls][_query] = _id
if _id not in self._instance_cache[cls]:
return
val = self._instance_cache[cls].pop(_id)
self._instance_cache[cls][_id] = val
def _check_sizes(self, cls):
if self.num_queries(cls) > self._max_queries[cls]:
_id = self._remove_least_recently_used(self._query_cache[cls])
if _id in self._instance_cache[cls]:
instance = self._instance_cache[cls][_id]
self._try_flush(instance, expunge=False)
if self.num_instances(cls) > self._max_instances[cls]:
instance = self._remove_least_recently_used(
self._instance_cache[cls])
self._try_flush(instance, expunge=True)
def _try_flush(self, instance, expunge=False):
try:
inst_session = session(instance)
except AttributeError:
inst_session = None
if inst_session:
inst_session.flush(instance)
if expunge:
inst_session.expunge(instance)
def _remove_least_recently_used(self, cache):
# last-used (most-recently-used) is last in cache, so take first
key, val = cache.popitem(last=False)
return val
def num_queries(self, cls=None):
if cls is None:
return sum([len(c) for c in self._query_cache.values()])
else:
return len(self._query_cache[cls])
def num_instances(self, cls=None):
if cls is None:
return sum([len(c) for c in self._instance_cache.values()])
else:
return len(self._instance_cache[cls])
def instance_ids(self, cls):
return list(self._instance_cache[cls].keys())
def batch_load(self, cls, query, attrs=None):
'''
Load multiple results given a query.
Optionally takes a list of attribute names to use
as the cache key. If not given, uses the keys of
the given query.
'''
if attrs is None:
attrs = list(query.keys())
for result in self._model_query(cls).find(query):
keys = {a: getattr(result, a) for a in attrs}
self.set(cls, keys, result)
class GitLikeTree(object):
'''
A tree node similar to that which is used in git
:var dict blobs: files at this level of the tree. name => oid
:var dict trees: subtrees (child dirs). name => GitLikeTree
'''
def __init__(self):
self.blobs = {} # blobs[name] = oid
self.trees = defaultdict(GitLikeTree) # trees[name] = GitLikeTree()
self._hex = None
def get_tree(self, path):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
if not path:
return self
cur = self
for part in path.split('/'):
cur = cur.trees[part]
return cur
def get_blob(self, path):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
path_parts = path.split('/')
dirpath, last = path_parts[:-1], path_parts[-1]
cur = self
for part in dirpath:
cur = cur.trees[part]
return cur.blobs[last]
def set_blob(self, path, oid):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
path_parts = path.split('/')
dirpath, filename = path_parts[:-1], path_parts[-1]
cur = self
for part in dirpath:
cur = cur.trees[part]
cur.blobs[filename] = oid
def hex(self):
'''Compute a recursive sha1 hash on the tree'''
# dependent on __repr__ below
if self._hex is None:
sha_obj = sha1(b'tree\n' + six.ensure_binary(repr(self)))
self._hex = sha_obj.hexdigest()
return self._hex
def __repr__(self):
# this can't change, is used in hex() above
lines = ['t %s %s' % (t.hex(), h.really_unicode(name))
for name, t in six.iteritems(self.trees)]
lines += ['b %s %s' % (oid, h.really_unicode(name))
for name, oid in six.iteritems(self.blobs)]
return six.ensure_str('\n'.join(sorted(lines)))
def __unicode__(self):
return self.pretty_tree(recurse=False)
def pretty_tree(self, indent=0, recurse=True, show_id=True):
'''For debugging, show a nice tree representation'''
lines = [' ' * indent + 't %s %s' %
(name, '\n' + t.unicode_full_tree(indent + 2, show_id=show_id)
if recurse else t.hex())
for name, t in sorted(six.iteritems(self.trees))]
lines += [' ' * indent + 'b %s %s' % (name, oid if show_id else '')
for name, oid in sorted(six.iteritems(self.blobs))]
output = h.really_unicode('\n'.join(lines)).encode('utf-8')
return output
def topological_sort(graph):
'''Return the topological sort of a graph.
The graph is a dict with each entry representing
a node (the key is the node ID) and its parent(s) (a
set of node IDs). Result is an iterator over the topo-sorted
node IDs.
The algorithm is based on one seen in
http://en.wikipedia.org/wiki/Topological_sorting#CITEREFKahn1962
'''
# Index children, identify roots
children = defaultdict(list)
roots = []
for nid, parents in list(graph.items()):
if not parents:
graph.pop(nid)
roots.append(nid)
for p_nid in parents:
children[p_nid].append(nid)
# Topo sort
while roots:
n = roots.pop()
yield n
for child in children[n]:
graph[child].remove(n)
if not graph[child]:
graph.pop(child)
roots.append(child)
assert not graph, 'Cycle detected'
def prefix_paths_union(a, b):
"""
Given two sets of paths, a and b, find the items from a that
are either in b or are parent directories of items in b.
"""
union | |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webbrowser
from typing import List, Tuple, Optional
import click
from lean.click import LeanCommand, ensure_options
from lean.components.api.api_client import APIClient
from lean.components.util.logger import Logger
from lean.container import container
from lean.models.api import (QCEmailNotificationMethod, QCNode, QCNotificationMethod, QCSMSNotificationMethod,
QCWebhookNotificationMethod, QCProject)
from lean.models.brokerages.cloud import all_cloud_brokerages, BinanceBrokerage
from lean.models.brokerages.cloud.base import CloudBrokerage
from lean.models.brokerages.cloud.bitfinex import BitfinexBrokerage
from lean.models.brokerages.cloud.coinbase_pro import CoinbaseProBrokerage
from lean.models.brokerages.cloud.interactive_brokers import InteractiveBrokersBrokerage
from lean.models.brokerages.cloud.oanda import OANDABrokerage
from lean.models.brokerages.cloud.paper_trading import PaperTradingBrokerage
from lean.models.brokerages.cloud.tradier import TradierBrokerage
from lean.models.logger import Option
def _log_notification_methods(methods: List[QCNotificationMethod]) -> None:
"""Logs a list of notification methods."""
logger = container.logger()
email_methods = [method for method in methods if isinstance(method, QCEmailNotificationMethod)]
email_methods = "None" if len(email_methods) == 0 else ", ".join(method.address for method in email_methods)
webhook_methods = [method for method in methods if isinstance(method, QCWebhookNotificationMethod)]
webhook_methods = "None" if len(webhook_methods) == 0 else ", ".join(method.address for method in webhook_methods)
sms_methods = [method for method in methods if isinstance(method, QCSMSNotificationMethod)]
sms_methods = "None" if len(sms_methods) == 0 else ", ".join(method.phoneNumber for method in sms_methods)
logger.info(f"Email notifications: {email_methods}")
logger.info(f"Webhook notifications: {webhook_methods}")
logger.info(f"SMS notifications: {sms_methods}")
def _prompt_notification_method() -> QCNotificationMethod:
"""Prompts the user to add a notification method.
:return: the notification method configured by the user
"""
logger = container.logger()
selected_method = logger.prompt_list("Select a notification method", [Option(id="email", label="Email"),
Option(id="webhook", label="Webhook"),
Option(id="sms", label="SMS")])
if selected_method == "email":
address = click.prompt("Email address")
subject = click.prompt("Subject")
return QCEmailNotificationMethod(address=address, subject=subject)
elif selected_method == "webhook":
address = click.prompt("URL")
headers = {}
while True:
headers_str = "None" if headers == {} else ", ".join(f"{key}={headers[key]}" for key in headers)
logger.info(f"Headers: {headers_str}")
if not click.confirm("Do you want to add a header?", default=False):
break
key = click.prompt("Header key")
value = click.prompt("Header value")
headers[key] = value
return QCWebhookNotificationMethod(address=address, headers=headers)
else:
phone_number = click.prompt("Phone number")
return QCSMSNotificationMethod(phoneNumber=phone_number)
def _configure_brokerage(logger: Logger) -> CloudBrokerage:
"""Interactively configures the brokerage to use.
:param logger: the logger to use
:return: the cloud brokerage the user configured
"""
brokerage_options = [Option(id=b, label=b.get_name()) for b in all_cloud_brokerages]
return logger.prompt_list("Select a brokerage", brokerage_options).build(logger)
def _configure_live_node(logger: Logger, api_client: APIClient, cloud_project: QCProject) -> QCNode:
"""Interactively configures the live node to use.
:param logger: the logger to use
:param api_client: the API client to make API requests with
:param cloud_project: the cloud project the user wants to start live trading for
:return: the live node the user wants to start live trading on
"""
nodes = api_client.nodes.get_all(cloud_project.organizationId)
live_nodes = [node for node in nodes.live if not node.busy]
if len(live_nodes) == 0:
raise RuntimeError(
f"You don't have any live nodes available, you can manage your nodes on https://www.quantconnect.com/organization/{cloud_project.organizationId}/resources")
node_options = [Option(id=node, label=f"{node.name} - {node.description}") for node in live_nodes]
return logger.prompt_list("Select a node", node_options)
def _configure_notifications(logger: Logger) -> Tuple[bool, bool, List[QCNotificationMethod]]:
"""Interactively configures how and when notifications should be sent.
:param logger: the logger to use
:return: whether notifications must be enabled for order events and insights, and the notification methods
"""
logger.info(
"You can optionally request for your strategy to send notifications when it generates an order or emits an insight")
logger.info("You can use any combination of email notifications, webhook notifications and SMS notifications")
notify_order_events = click.confirm("Do you want to send notifications on order events?", default=False)
notify_insights = click.confirm("Do you want to send notifications on insights?", default=False)
notify_methods = []
if notify_order_events or notify_insights:
_log_notification_methods(notify_methods)
notify_methods.append(_prompt_notification_method())
while True:
_log_notification_methods(notify_methods)
if not click.confirm("Do you want to add another notification method?", default=False):
break
notify_methods.append(_prompt_notification_method())
return notify_order_events, notify_insights, notify_methods
def _configure_auto_restart(logger: Logger) -> bool:
"""Interactively configures whether automatic algorithm restarting must be enabled.
:param logger: the logger to use
:return: whether automatic algorithm restarting must be enabled
"""
logger.info("Automatic restarting uses best efforts to restart the algorithm if it fails due to a runtime error")
logger.info("This can help improve its resilience to temporary errors such as a brokerage API disconnection")
return click.confirm("Do you want to enable automatic algorithm restarting?", default=True)
@click.command(cls=LeanCommand)
@click.argument("project", type=str)
@click.option("--brokerage",
type=click.Choice([b.get_name() for b in all_cloud_brokerages], case_sensitive=False),
help="The brokerage to use")
@click.option("--ib-user-name", type=str, help="Your Interactive Brokers username")
@click.option("--ib-account", type=str, help="Your Interactive Brokers account id")
@click.option("--ib-password", type=str, help="Your Interactive Brokers password")
@click.option("--ib-data-feed",
type=bool,
help="Whether the Interactive Brokers price data feed must be used instead of the QuantConnect price data feed")
@click.option("--tradier-account-id", type=str, help="Your Tradier account id")
@click.option("--tradier-access-token", type=str, help="Your Tradier access token")
@click.option("--tradier-environment",
type=click.Choice(["demo", "real"], case_sensitive=False),
help="The environment to run in, demo for the Developer Sandbox, real for live trading")
@click.option("--oanda-account-id", type=str, help="Your OANDA account id")
@click.option("--oanda-access-token", type=str, help="Your OANDA API token")
@click.option("--oanda-environment",
type=click.Choice(["demo", "real"], case_sensitive=False),
help="The environment to run in, demo for fxTrade Practice, real for fxTrade")
@click.option("--bitfinex-api-key", type=str, help="Your Bitfinex API key")
@click.option("--bitfinex-api-secret", type=str, help="Your Bitfinex API secret")
@click.option("--gdax-api-key", type=str, help="Your Coinbase Pro API key")
@click.option("--gdax-api-secret", type=str, help="Your Coinbase Pro API secret")
@click.option("--gdax-passphrase", type=str, help="Your Coinbase Pro API passphrase")
@click.option("--gdax-environment",
type=click.Choice(["paper", "live"], case_sensitive=False),
help="The environment to run in, paper for the sandbox, live for live trading")
@click.option("--binance-api-key", type=str, help="Your Binance API key")
@click.option("--binance-api-secret", type=str, help="Your Binance API secret")
@click.option("--binance-environment",
type=click.Choice(["demo", "real"], case_sensitive=False),
help="The environment to run in, demo for testnet, real for the production environment")
@click.option("--node", type=str, help="The name or id of the live node to run on")
@click.option("--auto-restart", type=bool, help="Whether automatic algorithm restarting must be enabled")
@click.option("--notify-order-events", type=bool, help="Whether notifications must be sent for order events")
@click.option("--notify-insights", type=bool, help="Whether notifications must be sent for emitted insights")
@click.option("--notify-emails",
type=str,
help="A comma-separated list of 'email:subject' pairs configuring email-notifications")
@click.option("--notify-webhooks",
type=str,
help="A comma-separated list of 'url:HEADER_1=VALUE_1:HEADER_2=VALUE_2:etc' pairs configuring webhook-notifications")
@click.option("--notify-sms", type=str, help="A comma-separated list of phone numbers configuring SMS-notifications")
@click.option("--push",
is_flag=True,
default=False,
help="Push local modifications to the cloud before starting live trading")
@click.option("--open", "open_browser",
is_flag=True,
default=False,
help="Automatically open the live results in the browser once the deployment starts")
def live(project: str,
brokerage: str,
ib_user_name: Optional[str],
ib_account: Optional[str],
ib_password: Optional[str],
ib_data_feed: Optional[bool],
tradier_account_id: Optional[str],
tradier_access_token: Optional[str],
tradier_environment: Optional[str],
oanda_account_id: Optional[str],
oanda_access_token: Optional[str],
oanda_environment: Optional[str],
bitfinex_api_key: Optional[str],
bitfinex_api_secret: Optional[str],
gdax_api_key: Optional[str],
gdax_api_secret: Optional[str],
gdax_passphrase: Optional[str],
gdax_environment: Optional[str],
binance_api_key: Optional[str],
binance_api_secret: Optional[str],
binance_environment: Optional[str],
node: str,
auto_restart: bool,
notify_order_events: Optional[bool],
notify_insights: Optional[bool],
notify_emails: Optional[str],
notify_webhooks: Optional[str],
notify_sms: Optional[str],
push: bool,
open_browser: bool) -> None:
"""Start live trading for a project in the cloud.
PROJECT must be the name or the id of the project to start live trading for.
By default an interactive wizard is shown letting you configure the deployment.
If --brokerage is given the command runs in non-interactive mode.
In this mode the CLI does not prompt for input or confirmation.
In non-interactive mode the options specific to the given brokerage are required,
as well as --node, --auto-restart, --notify-order-events and --notify-insights.
"""
logger = container.logger()
api_client = container.api_client()
cloud_project_manager = container.cloud_project_manager()
cloud_project = cloud_project_manager.get_cloud_project(project, push)
cloud_runner = container.cloud_runner()
finished_compile = cloud_runner.compile_project(cloud_project)
if brokerage is not None:
ensure_options(["brokerage", "node", "auto_restart", "notify_order_events", "notify_insights"])
brokerage_instance = None
if brokerage == PaperTradingBrokerage.get_name():
brokerage_instance = PaperTradingBrokerage()
elif brokerage == InteractiveBrokersBrokerage.get_name():
ensure_options(["ib_user_name", "ib_account", "ib_password", "ib_data_feed"])
brokerage_instance = InteractiveBrokersBrokerage(ib_user_name, ib_account, ib_password, ib_data_feed)
elif brokerage == TradierBrokerage.get_name():
ensure_options(["tradier_account_id", "tradier_access_token", "tradier_environment"])
brokerage_instance = TradierBrokerage(tradier_account_id, tradier_access_token, tradier_environment)
elif brokerage == OANDABrokerage.get_name():
ensure_options(["oanda_account_id", "oanda_access_token", "oanda_environment"])
brokerage_instance = OANDABrokerage(oanda_account_id, oanda_access_token, oanda_environment)
elif brokerage == BitfinexBrokerage.get_name():
ensure_options(["bitfinex_api_key", "bitfinex_api_secret"])
brokerage_instance = BitfinexBrokerage(bitfinex_api_key, bitfinex_api_secret)
elif brokerage == CoinbaseProBrokerage.get_name():
ensure_options(["gdax_api_key", "gdax_api_secret", "gdax_passphrase", "gdax_environment"])
brokerage_instance = CoinbaseProBrokerage(gdax_api_key, gdax_api_secret, gdax_passphrase, gdax_environment)
elif brokerage == BinanceBrokerage.get_name():
ensure_options(["binance_api_key", "binance_api_secret", "binance_environment"])
brokerage_instance = BinanceBrokerage(binance_api_key, binance_api_secret, binance_environment)
all_nodes = api_client.nodes.get_all(cloud_project.organizationId)
live_node = next((n for n in all_nodes.live if n.id == node or n.name == node), None)
if live_node is None:
raise RuntimeError(f"You have no live node with name or id '{node}'")
if live_node.busy:
raise RuntimeError(f"The live node named '{live_node.name}' is already in use by '{live_node.usedBy}'")
notify_methods = []
if notify_emails is not None:
for config in notify_emails.split(","):
address, subject = config.split(":")
notify_methods.append(QCEmailNotificationMethod(address=address, subject=subject))
if notify_webhooks is not None:
for config in notify_webhooks.split(","):
address, *headers = config.split(":")
headers = | |
#Botpic:https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Red_Rose_Photography.jpg/800px-Red_Rose_Photography.jpg
#Botpic:https://commons.wikimedia.org/wiki/File:Red_Rose_Photography.jpg
#reference:https://www.youtube.com/watch?v=SPTfmiYiuok
import discord
import os
import requests
import json
import math, random
from replit import db
from keep_alive import keep_alive
import asyncpraw, asyncprawcore
#import commands
import time, asyncio, datetime
from discord.ext import tasks
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
from prawcore import NotFound
import ffmpeg
from discord import FFmpegPCMAudio
from dotenv import load_dotenv
from youtube_search import YoutubeSearch
load_dotenv()
client = discord.Client()
# To cache the every user For on_remove_reaction to be usable
# Also enable members intent from https://discord.com/developers/ in bot secition
intents = discord.Intents.default()
intents.members = True
global playing, stream
global currently_playing_message
def say_hello():
print(time.ctime())
#await message.channel.send("hello :-)" + str(joke))
#t1 = threading.Timer(10, say_hello)
#t1.start()
#---------- To keep the bot alive --------------------------
#1. keeping the bot alive
'''
#------------------- adding a background task -----------------
status = cycle(['with Python','JetHub'])
@bot.event
async def on_ready():
change_status.start()
print("Your bot is ready")
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
#--------------------------------------------------------------
3. Setup the Uptime Robot :
create an account on uptime robot.
After creating an account, go to the dashboard and click on Add new monitor (preview)
select monitor type Http(s) (preview)
then go to to ur project on repl.it and copy the url from the top of the console and paste it in url section of the monitor (preview)
now set the monitoring interval to every 5 mins (so that it will ping the bot every 5 mins) and click on create monitor twice (preview)
That’s it…Now go to ur project on repl.it and hit the Run button
'''
class MySchedule:
async def schedule_message(sth,
author='anonymous',
message='please provide a message',
id=863298114949218324,
seconds=0):
print('received:')
print(author, message, id, seconds)
#await ctx.message.delete()
if author == 'anonymous':
#author = 'anonymous'
description = 'command: .anon your_message'
else:
author = author + ' <scheduled_message>'
description = "command: .schedule time_in_seconds your_message"
time.sleep(seconds)
print('sleep 10 seconds')
print('author : ', author)
#channel = bot.get_channel(id=ctx.channel.id)
#print('sending {}'.format(message))
#retStr = str("""```css\nThis is some colored Text```""")
#embed = discord.Embed(title="Random test")
#embed.add_field(name="Name field can't be colored as it seems",value=retStr)
#await ctx.send(embed=embed)
#message = str(ctx.message.author).split('#')[0] + ' : ' + message
embed = discord.Embed(title=author, colour=discord.Color.blue())
embed.add_field(
name=message,
value=description,
)
channel = bot.get_channel(id=id)
await channel.send(embed=embed)
reddit = asyncpraw.Reddit(
client_id="nnhGBCiBxSJysTobl6SLPQ",
client_secret=os.environ['rd_client_secret'],
password=os.environ['rd_pass'],
user_agent="praw_test",
username="Alternative-Ad-8849",
)
async def sub_exists(subreddit_name):
exists = True
if subreddit_name.startswith(('/r/', 'r/')):
subreddit_name = subreddit_name.split('r/')[-1] # -1 gets the last element in the list
try:
subreddit = await reddit.subreddit(subreddit_name, fetch=True) # by default Async PRAW doesn't make network requests when subreddit is called
# do something with subreddit
except asyncprawcore.Redirect:
exists=False
return(exists)
# Reddit will redirect to reddit.com/search if the subreddit doesn't exist
#await ctx.send(f"Subreddit {subreddit_name} doesn't exist.")
def get_nude():
memes_submissions = reddit.subreddit('BustyPetite').hot()
print('got memes')
post_to_pick = random.randint(1, 15)
print('choosen random')
for i in range(0, post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
return (submission.url)
def get_crazy(sub_reddit_name='memes'):
memes_submissions = reddit.subreddit(sub_reddit_name).hot()
#print('got memes')
#post_to_pick = random.randint(1, 15)
#print('choosen random')
start = random.randint(100, 1000)
end = random.randint(start, start + 100)
print('start:{} end:{}'.format(start, end))
for i in range(start, end):
#print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
def get_memes_crazy():
memes_submissions = reddit.subreddit('memes').hot()
print('got memes')
#post_to_pick = random.randint(1, 50)
print('choosen random')
for i in range(0, 50): #post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
#return submission
async def get_one(sub_reddit='memes'):
#Working
#submission = list(reddit.subreddit(sub_reddit_name).random()#.hot(limit=None))
#submissions = list(reddit.subreddit('redditdev').hot(limit=None))
'''urls=[]
submissions = await list(reddit.subreddit('redditdev').hot(limit=None))
print(await submissions)'''
#submissions = await reddit.subreddit("memes").hot(limit=random.randint(1,150))
#for submission in submissions:
# pass
subreddit = await reddit.subreddit(sub_reddit)
async for submission in subreddit.random_rising(
limit=random.randint(1, 150)):
pass
#print(submission.title)
#urls.append([submission.title,submission.url])
#yield(submission.title, submission.url)
#print(submission.title)'''
#submissionn = random.choice(submissions)
#submission = reddit.subreddit("AskReddit").random()
#submissions = reddit.subreddit('redditdev').hot(limit=None))
#submission = random.choice(submissions)
#print('got memes')
#post_to_pick = random.randint(1, 50)
#print('choosen random')
'''for i in range(0, 50):#post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)'''
#submission = await random.choice(memes_submissions)
#return(submission.url)
#print(submissionn.url)
#print(submission.title)
#return('hi')
embed = discord.Embed(title=submission.title,
url=submission.url,
description=submission.selftext,
colour=discord.Color.red())
embed.set_image(url=submission.url)
#await channel.send(embed=embed)
return (embed)
from discord.ext import commands
bot = commands.Bot(command_prefix='.', help_command=None, intents=intents)
'''
class MyHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
e = discord.Embed(colour=discord.Color.blurple(), description='')
for page in self.paginator.pages:
e.description += page
await destination.send(embed=e)
bot.help_command = MyHelpCommand()'''
# My sample help command:
@bot.command(name='help',
brief='`.help` for help',
help='Plesae enter `.help` for help')
async def help(ctx, args=None):
""" Check which mods are online on current guild """
help_embed = discord.Embed(
title="Encouragement Bot Help!",
#url="https:ioee.herokuapp.com/",
description=
"Type `.help <command name>` for more details about each command. e.g. `.help joke`",
)
command_names_list = [x.name for x in bot.commands]
# If there are no arguments, just list the commands:
if not args:
help_embed.add_field(
name="List of supported commands:",
value='value',
#value="\n".join([str(i+1)+". "+x.name for i,x in enumerate(bot.commands)]),
inline=False,
#colour=discord.Color.blue()
)
#bot.get_command(x.name).help
for i in bot.commands:
help_embed.add_field(
name='***{}***'.format(i.name),
#value='value'
value='> {}\n\n\n'.format(bot.get_command(i.name).brief),
inline=False,
#colour=discord.Color.blue()
)
#print(i.name)
#print(i)
#print(bot.get_command(i.name).help)
'''for i,command in enumerate(bot.commands):
help_embed.add_field(
name = command,
value = bot.get_command(command),
inline=True
)'''
help_embed.add_field(
name="Details",
value=
"Type `.help <command name>` for more details about each command.",
inline=False)
# If the argument is a command, get the help text from that command:
elif args in command_names_list:
help_embed.add_field(name=args,
value=str(bot.get_command(args).brief) + ' \n' +
str(bot.get_command(args).help))
# If someone is just trolling:
else:
help_embed.add_field(name="Nope.",
value="Don't think I got that command, boss!")
await ctx.send(embed=help_embed)
# My sample help command:
@bot.command(name='share_info',
brief='`.share_info` for share_info',
help='Plesae enter `.share_info` for mero_share_info')
async def info(ctx, args=None):
response = requests.get('http://ioee.herokuapp.com/meroshare/')
response = response.text.strip()
print(response)
try:
previous_messages = await ctx.channel.history(limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
pass
if (str(prev_message).strip() != response):
print('not same messages:prev_message and rseponse')
await ctx.send(response)
else:
print('same message as previous message, so not sending')
pass
@bot.command(name='ping',
brief=" short_help:to test if bot responding ",
help='long_help: e.g. .ping')
async def ping(ctx, subreddit='jokes', no_of_posts=1, user='.'):
#channel = bot.get_channel(id=int(channel_id))
'''for n, submission in enumerate(reddit.subreddit('memes').top('day',limit=int(no_of_posts/3))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(
title=submission.title,
url=submission.url,
description=body,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await ctx.send(embed=embed)'''
await ctx.send('pong ')
print('Ping-Pong is invoked: ', user, ctx)
@bot.command(name='embed', help='e.g.`.embed`', brief='embedding help')
async def embed(ctx):
embed = discord.Embed(title="Text Formatting",
url="https://realdrewdata.medium.com/",
description="Here are some ways to format text",
colour=discord.Color.blue())
embed.set_author(
name="RealDrewData",
url="https://twitter.com/RealDrewData",
icon_url=
"https://cdn-images-1.medium.com/fit/c/32/32/1*QVYjh50XJuOLQBeH_RZoGw.jpeg"
)
#embed.set_author(name=ctx.author.display_name, url="https://twitter.com/RealDrewData", icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/axLm3p6.jpeg")
embed.add_field(name="*Italics*",
value="Surround your text in asterisks (\*)",
inline=False)
embed.add_field(name="**Bold**",
value="Surround your text in double asterisks (\*\*)",
inline=False)
embed.add_field(name="__Underline__",
value="Surround your text in double underscores (\_\_)",
inline=False)
embed.add_field(name="~~Strikethrough~~",
value="Surround your text in double tildes (\~\~)",
inline=False)
embed.add_field(name="`Code Chunks`",
value="Surround your text in backticks (\`)",
inline=False)
embed.add_field(name="Blockquotes",
value="> Start your text with a greater than symbol (\>)",
inline=False)
embed.add_field(name="Secrets",
value="||Surround your text with double pipes (\|\|)||",
inline=False)
embed.set_footer(text="Learn more here: realdrewdata.medium.com")
await ctx.send(embed=embed)
@bot.command(name='schedule',
brief='to schedule message to be sent in any group.',
help='e.g. `.schedule 10 scheduled for ten seconds.')
async def schedule(ctx, seconds: int = 3, *, message='Hello There'):
#print(ctx.channel.id)
print('Seconds: ', seconds)
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
author = str(ctx.message.author).split('#')[0]
#print(author)
#print(type(id))
sch = {
'1': '',
'2': '',
'3': '',
'4': '',
'5': '',
}
r = random.randint(1, 5)
sch[str(r)] = MySchedule()
await sch[str(r)].schedule_message(author=author,
message=msg,
id=id,
seconds=int(seconds))
#await schedule_message(author=author, message=msg, id=id, seconds=3)
#print(id)
#print(channel)
#await channel.send('hi')
'''@bot.command()
async def schedule(ctx, message='Hello There', seconds = 3):
#print(ctx.channel.id)
m=str(message)
id = ctx.message.id
print('\n\n\n{}\n\n'.format(m))
author = str(ctx.message.author).split('#')[0]
await ctx.message.delete()
#id=ctx.channel.id
channel = bot.get_channel(id=id)
print(id)
print(channel)
#await channel.send('hi')
#await schedule_message(author, m, id, seconds = seconds)
#print(ctx.message)
#await ctx.message.delete(ctx.message)
#await channel.send('hi')
#await ctx.send('pong')
#print('Im invoked')'''
@bot.command(name='anon',
brief='to send message anonymously',
help='e.g. `.anon Guess who!`')
async def anon(ctx, *, message='please provide a message'):
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
a = {'anon': ''}
a['anon'] = MySchedule()
await a['anon'].schedule_message('anonymous', msg, id)
print('send')
print(msg, id)
#await schedule_message(author='', message=msg, id=id)
@bot.command(name="echo",
pass_context=True,
brief='ehhoes/repeat the message deleting the user\'s message',
help='e.g. `.echo I am echoed`')
async def echo(ctx, *, message='please provide a message'):
msg = message
#print(ctx.message)
try:
await ctx.message.delete()
except:
pass
#id=ctx.channel.id
await ctx.send(msg)
@echo.error
async def echo_error(ctx, error):
if isinstance(error, MissingPermissions):
text = "Sorry {}, you do not have permissions to do that!".format(
ctx.message.author)
await bot.send_message(ctx.message.channel, text)
@bot.command(name='unleash',
brief='unleahes the subreddit to c channel',
help='e.g.To unleash r/jokes `.unleash jokes`')
async def unleash(ctx, subreddit='none'):
if subreddit == 'none':
await ctx.send('Please enter the subreddit to be unleashed')
else:
print(ctx.channel.id)
#if "unleash" not in db.keys():db['unleash']={}
if await sub_exists(subreddit):
if str(ctx.channel.id) not in db['unleash']:
#i.e. channel doesn't exists in database
db['unleash'][str(ctx.channel.id)] = []
#db['unleash'][str(ctx.channel.id)].append(str(subreddit))
else:
#i.e. channel doesn't exists in database
if str(subreddit) not in db['unleash'][str(ctx.channel.id)]:
db['unleash'][str(ctx.channel.id)].append(str(subreddit))
await ctx.send('unleashing r/{} to {}'.format(subreddit, ctx.channel))
else:
await ctx.send('r/{} already unleashed to {}'.format(subreddit, ctx.channel))
else:
| |
<reponame>widgetti/react-ipywidgets<gh_stars>10-100
"""Write ipywidgets like React
ReactJS - ipywidgets relation:
* DOM nodes -- Widget
* Element -- Element
* Component -- function
"""
import copy
import logging
import sys
import threading
from dataclasses import dataclass, field
from inspect import isclass
from types import TracebackType
from typing import (
Any,
Callable,
ContextManager,
Dict,
Generic,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from warnings import warn
import ipywidgets as widgets
from typing_extensions import Literal
from . import _version
__version__ = _version.__version__
_last_rc = None # used for testing
local = threading.local()
T = TypeVar("T")
U = TypeVar("U")
W = TypeVar("W") # used for widgets
E = TypeVar("E") # used for elements
WidgetOrList = Union[widgets.Widget, List[widgets.Widget]]
EffectCleanupCallable = Callable[[], None]
EffectCallable = Callable[[], Optional[EffectCleanupCallable]]
ROOT_KEY = "ROOT::"
logger = logging.getLogger("react") # type: ignore
# this will show friendly stack traces
DEBUG = 0
# if True, will show the original stacktrace as cause
TRACEBACK_ORIGINAL = True
MIME_WIDGETS = "application/vnd.jupyter.widget-view+json"
widget_render_error_msg = (
"""Cannot show widget. You probably want to rerun the code cell above (<i>Click in the code cell, and press Shift+Enter <kbd>⇧</kbd>+<kbd>↩</kbd></i>)."""
)
mime_bundle_default = {"text/plain": "Cannot show ipywidgets in text", "text/html": widget_render_error_msg}
def element(cls, **kwargs):
return ComponentWidget(cls)(**kwargs)
widgets.Widget.element = classmethod(element)
def join_key(parent_key, key):
return f"{parent_key}{key}"
def pp(o):
import prettyprinter
prettyprinter.install_extras()
prettyprinter.pprint(o, width=1)
def same_component(c1, c2):
# return (c1.f.__name__ == c2.f.__name__) and (c1.f.__module__ == c2.f.__module__)
return c1 == c2
class ComponentCreateError(RuntimeError):
def __init__(self, rich_traceback):
super().__init__(rich_traceback)
self.rich_traceback = rich_traceback
class Component:
name: str
def __call__(self, *args, **kwargs) -> Union[widgets.Widget, "Element"]:
pass
class Element(Generic[W]):
child_prop_name = "children"
# to make every unique on_value callback to a unique wrapper
# so that we can remove the listeners
_callback_wrappers: Dict[Callable, Callable] = {}
create_lock: ContextManager = threading.Lock()
def __init__(self, component, *args, **kwargs):
self.component = component
self.mime_bundle = mime_bundle_default
self._key: Optional[str] = None
self.args = args
self.kwargs = kwargs
self.handlers = []
self._meta = {}
self._current_context = None
rc = _get_render_context(required=False)
if rc:
self._current_context = rc.context
if rc is not None and rc.container_adders:
rc.container_adders[-1].add(self)
if DEBUG:
# since we construct widgets or components from a different code path
# we want to preserve the original call stack, by manually tracking frames
try:
assert False
except AssertionError:
self.traceback = cast(TracebackType, sys.exc_info()[2])
assert self.traceback is not None
assert self.traceback.tb_frame is not None
assert self.traceback.tb_frame.f_back is not None
frame_py = self.traceback.tb_frame.f_back.f_back
assert frame_py is not None
self.traceback = TracebackType(tb_frame=frame_py, tb_lasti=self.traceback.tb_lasti, tb_lineno=frame_py.f_lineno, tb_next=None)
def key(self, value: str):
"""Returns the same element with a custom key set.
This can help render performance. See documentation for details.
"""
self._key = value
return self
def meta(self, **kwargs):
"""Add metadata to the created widget.
This can be used to find a widget for testing.
"""
self._meta = {**self._meta, **kwargs}
return self
def __repr__(self):
def format_arg(value):
value_repr = repr(value)
if len(value_repr) > 50:
value_repr = value_repr[:10] + "..." + value_repr[-10:]
return value_repr
args = [format_arg(value) for value in self.args]
def format_kwarg(key, value):
if key == "children":
if len(value) > 0:
contains_elements = any(isinstance(child, Element) for child in value)
if contains_elements:
return "children = ..."
return f"{key} = {format_arg(value)}"
kwargs = [format_kwarg(key, value) for key, value in self.kwargs.items()]
args_formatted = ", ".join(args + kwargs)
if isinstance(self.component, ComponentFunction):
name = self.component.f.__name__
return f"{name}({args_formatted})"
if isinstance(self.component, ComponentWidget):
modulename = self.component.widget.__module__
# lets shorten e.g. ipyvuetify.generated.Label.Label to ipyvuetify.Label
shorten = "ipywidgets ipyvuetify ipyvue".split()
for prefix in shorten:
if modulename.startswith(prefix):
modulename = prefix
# don't replace ipyvuetify with ipyvue
break
name = modulename + "." + self.component.widget.__name__
return f"{name}({args_formatted})"
else:
raise RuntimeError(f"No repr for {type(self)}")
def on(self, name, callback):
self.handlers.append((name, callback))
return self
def _ipython_display_(self, **kwargs):
display(self, self.mime_bundle)
def __enter__(self):
rc = _get_render_context()
ca = ContainerAdder[T](self, "children")
assert rc.context is self._current_context, f"Context change from {self._current_context} -> {rc.context}"
assert rc.context is not None
rc.container_adders.append(ca)
return self
def __exit__(self, *args, **kwargs):
rc = _get_render_context()
assert rc.context is self._current_context, f"Context change from {self._current_context} -> {rc.context}"
assert rc.context is not None
ca = rc.container_adders.pop()
self.add_children(ca.collect())
def add_children(self, children):
if self.child_prop_name not in self.kwargs:
self.kwargs[self.child_prop_name] = []
# generic way to add to a list or tuple
container_prop_type = type(self.kwargs[self.child_prop_name])
self.kwargs[self.child_prop_name] = self.kwargs[self.child_prop_name] + container_prop_type(children)
def _get_widget_args(self):
return self.component.widget.class_trait_names()
def _split_kwargs(self, kwargs):
# split into normal kwargs and events
listeners = {}
normal_kwargs = {}
assert isinstance(self.component, ComponentWidget)
args = self._get_widget_args()
for name, value in kwargs.items():
if name.startswith("on_") and name not in args:
listeners[name] = value
else:
normal_kwargs[name] = value
return normal_kwargs, listeners
def _create_widget(self, kwargs):
# we can't use our own kwarg, since that contains elements, not widgets
kwargs, listeners = self._split_kwargs(kwargs)
assert isinstance(self.component, ComponentWidget)
# Because we look before and after, we need a lock.
# A different implementation might avoid this.
with self.create_lock:
before = set(widgets.Widget.widgets)
try:
widget = self.component.widget(**kwargs)
if self._meta:
widget._react_meta = dict(self._meta)
except Exception:
raise RuntimeError(f"Could not create widget {self.component.widget} with {kwargs}")
for name, callback in listeners.items():
self._add_widget_event_listener(widget, name, callback)
after = set(widgets.Widget.widgets)
orphans = (after - before) - {widget.model_id}
return widget, orphans
def _update_widget(self, widget: widgets.Widget, el_prev: "Element", kwargs):
assert isinstance(self.component, ComponentWidget)
assert isinstance(el_prev.component, ComponentWidget)
assert same_component(self.component, el_prev.component)
# used_kwargs, _ = el_prev.split_kwargs(el_prev.kwargs)
args = self.component.widget.class_trait_names()
with widget.hold_sync():
# update values
for name, value in kwargs.items():
if name.startswith("on_") and name not in args:
self._update_widget_event_listener(widget, name, value, el_prev.kwargs.get(name))
else:
self._update_widget_prop(widget, name, value)
# if we previously gave an argument, but now we don't
# we have to restore the default values, and remove listeners
cls = widget.__class__
traits = cls.class_traits()
dropped_arguments = set(el_prev.kwargs) - set(self.kwargs)
for name in dropped_arguments:
if name.startswith("on_") and name not in args:
self._remove_widget_event_listener(widget, name, el_prev.kwargs[name])
else:
value = traits[name].default()
self._update_widget_prop(widget, name, value)
def _update_widget_prop(self, widget, name, value):
setattr(widget, name, value)
def _update_widget_event_listener(self, widget: widgets.Widget, name: str, callback: Callable, callback_prev: Optional[Callable]):
# it's an event listener
if callback != callback_prev and callback_prev is not None:
self._remove_widget_event_listener(widget, name, callback_prev)
self._add_widget_event_listener(widget, name, callback)
def _add_widget_event_listener(self, widget: widgets.Widget, name: str, callback: Callable):
target_name = name[3:]
def on_change(change):
callback(change.new)
self._callback_wrappers[callback] = on_change
widget.observe(on_change, target_name)
def _remove_widget_event_listener(self, widget: widgets.Widget, name: str, callback: Callable):
target_name = name[3:]
on_change = self._callback_wrappers[callback]
widget.unobserve(on_change, target_name)
FuncT = TypeVar("FuncT", bound=Callable[..., Element])
def find_children(el):
children = set()
if not isinstance(el.kwargs, dict):
raise RuntimeError("keyword arguments for {el} should be a dict, not {el.kwargs}")
for arg in list(el.kwargs.values()) + list(el.args):
if isinstance(arg, Element):
children.add(arg)
elif isinstance(arg, (tuple, list)):
for child in arg:
if isinstance(child, Element):
children.add(child)
children |= find_children(child)
elif isinstance(arg, dict):
for child in arg.values():
if isinstance(child, Element):
children.add(child)
children |= find_children(child)
return children
class ContainerAdder(Generic[W]):
def __init__(self, el: Element[W], prop_name: str):
self.el = el
self.prop_name = prop_name
self.created: List[Element] = []
def add(self, el):
self.created.append(el)
def collect(self):
children = set()
for el in self.created:
children |= find_children(el)
top_level = [k for k in self.created if k not in children]
return top_level
class ComponentWidget(Component):
def __init__(self, widget: Type[widgets.Widget], mime_bundle=mime_bundle_default):
self.mime_bundle = mime_bundle
self.widget = widget
self.name = widget.__name__
def __eq__(self, rhs):
if self is rhs:
return True
if not isinstance(rhs, ComponentWidget):
return False
return self.widget == rhs.widget
def __repr__(self):
return f"Component[{self.widget!r}]"
def __call__(self, *args, **kwargs):
el: Element = Element(self, *args, **kwargs)
# TODO: temporary, we cannot change the constructor
# otherwise we need to generate the wrapper code again for all libraries
el.mime_bundle = self.mime_bundle
return el
class ComponentFunction(Component):
def __init__(self, f: Callable[[], Element], mime_bundle=mime_bundle_default):
self.f = f
self.name = self.f.__name__
self.mime_bundle = mime_bundle
def __eq__(self, rhs):
if self is rhs:
return True
if not isinstance(rhs, ComponentFunction):
return False
return self.f == rhs.f
def __repr__(self):
return f"react.component({self.f.__module__}.{self.f.__name__})"
def __call__(self, *args, **kwargs):
el: Element = Element(self, *args, **kwargs)
el.mime_bundle = self.mime_bundle
return el
@overload
def component(obj: None = None, mime_bundle=...) -> Callable[[FuncT], FuncT]:
...
@overload
def component(obj: FuncT, mime_bundle=...) -> FuncT:
...
# it is actually this...
# def component(obj: Union[Type[widgets.Widget], FuncT]) -> Union[ComponentWidget, ComponentFunction[FuncT]]:
# but casting to FuncT gives much better type hints (e.g. argument types checks etc)
def component(obj: FuncT = None, mime_bundle: Dict[str, Any] = mime_bundle_default):
def wrapper(obj: FuncT) -> FuncT:
if isclass(obj) and issubclass(obj, widgets.Widget):
return cast(FuncT, ComponentWidget(widget=obj, mime_bundle=mime_bundle))
else:
return cast(FuncT, ComponentFunction(f=obj, mime_bundle=mime_bundle))
if obj is not None:
return wrapper(obj)
else:
return wrapper
def force_update():
rc = _get_render_context()
rc.force_update()
def get_widget(el: Element):
"""Returns the real underlying widget, can only be used in use_effect"""
rc = _get_render_context()
if el not in | |
<reponame>SeanCondon/onos-config-demo<filename>ocbind/lacp/interfaces/interface/config/__init__.py
# -*- coding: utf-8 -*-
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for each LACP aggregate interface
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__interval','__lacp_mode','__system_id_mac','__system_priority',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'config']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
def _get_interval(self):
"""
Getter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
return self.__interval
def _set_interval(self, v, load=False):
"""
Setter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interval() directly.
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interval must be of a type compatible with lacp-period-type""",
'defined-type': "openconfig-lacp:lacp-period-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)""",
})
self.__interval = t
if hasattr(self, '_set'):
self._set()
def _unset_interval(self):
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
def _get_lacp_mode(self):
"""
Getter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
return self.__lacp_mode
def _set_lacp_mode(self, v, load=False):
"""
Setter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_mode() directly.
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_mode must be of a type compatible with lacp-activity-type""",
'defined-type': "openconfig-lacp:lacp-activity-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)""",
})
self.__lacp_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_mode(self):
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
def _get_system_id_mac(self):
"""
Getter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
return self.__system_id_mac
def _set_system_id_mac(self, v, load=False):
"""
Setter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_id_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_id_mac() directly.
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_id_mac must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)""",
})
self.__system_id_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_system_id_mac(self):
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), | |
<gh_stars>0
"""
Simple table class.
Note that this module depends only on the Python standard library.
You can "install" it just by dropping it into your working directory.
A SimpleTable is inherently (but not rigidly) rectangular.
You should create it from a *rectangular* (2d!) iterable of data.
A SimpleTable can be concatenated with another SimpleTable
or extended by another SimpleTable. ::
table1.extend_right(table2)
table1.extend(table2)
Note that although a SimpleTable allows only one column (the first) of
stubs at initilization, concatenation of tables allows you to produce
tables with interior stubs. (You can also assign the datatype 'stub'
to the cells in any column, or use ``insert_stubs``.)
A SimpleTable can be initialized with `datatypes`: a list of ints that
provide indexes into `data_fmts` and `data_aligns`. Each data cell is
assigned a datatype, which will control formatting. If you do not
specify the `datatypes` list, it will be set to ``range(ncols)`` where
`ncols` is the number of columns in the data. (I.e., cells in a
column have their own datatype.) This means that you can just specify
`data_fmts` without bother to provide a `datatypes` list. If
``len(datatypes)<ncols`` then datatype assignment will cycle across a
row. E.g., if you provide 10 rows of data with ``datatypes=[0,1]``
then you will have 5 columns of datatype 0 and 5 columns of datatype
1, alternating. Correspoding to this specification, you should provide
a list of two ``data_fmts`` and a list of two ``data_aligns``.
Potential problems for Python 3
-------------------------------
- Calls ``next`` instead of ``__next__``.
The 2to3 tool should handle that no problem.
(We will switch to the `next` function if 2.5 support is ever dropped.)
- from __future__ import division, with_statement
- from itertools import izip as zip
- Let me know if you find other problems.
:contact: alan dot isaac at gmail dot com
:requires: Python 2.5.1+
:note: current version
:note: HTML data format currently specifies tags
:todo: support a bit more of http://www.oasis-open.org/specs/tr9503.html
:todo: add colspan support to Cell
:since: 2008-12-21
:change: 2010-05-02 eliminate newlines that came before and after table
"""
from __future__ import division, with_statement
try: #accommodate Python 3
from itertools import izip as zip
pass #JP: try to avoid empty try with 2to3
except ImportError:
pass
from itertools import cycle
from collections import defaultdict
import csv
def csv2st(csvfile, headers=False, stubs=False, title=None):
"""Return SimpleTable instance,
created from the data in `csvfile`,
which is in comma separated values format.
The first row may contain headers: set headers=True.
The first column may contain stubs: set stubs=True.
Can also supply headers and stubs as tuples of strings.
"""
rows = list()
with open(csvfile,'r') as fh:
reader = csv.reader(fh)
if headers is True:
headers = reader.next()
elif headers is False:
headers=()
if stubs is True:
stubs = list()
for row in reader:
if row:
stubs.append(row[0])
rows.append(row[1:])
else: #no stubs, or stubs provided
for row in reader:
if row:
rows.append(row)
if stubs is False:
stubs = ()
nrows = len(rows)
ncols = len(rows[0])
if any(len(row)!=ncols for row in rows):
raise IOError('All rows of CSV file must have same length.')
return SimpleTable(data=rows, headers=headers, stubs=stubs)
class SimpleTable(list):
"""Produce a simple ASCII, CSV, HTML, or LaTeX table from a
*rectangular* (2d!) array of data, not necessarily numerical.
Directly supports at most one header row,
which should be the length of data[0].
Directly supports at most one stubs column,
which must be the length of data.
(But see `insert_stubs` method.)
See globals `default_txt_fmt`, `default_csv_fmt`, `default_html_fmt`,
and `default_latex_fmt` for formatting options.
Sample uses::
mydata = [[11,12],[21,22]] # data MUST be 2-dimensional
myheaders = [ "Column 1", "Column 2" ]
mystubs = [ "Row 1", "Row 2" ]
tbl = sm.iolib.SimpleTable(mydata, myheaders, mystubs, title="Title")
print( tbl )
print( tbl.as_html() )
# set column specific data formatting
tbl = sm.iolib.SimpleTable(mydata, myheaders, mystubs,
fmt={'data_fmts':["%3.2f","%d"]})
print( tbl.as_csv() )
with open('./temp.tex','w') as fh:
fh.write( tbl.as_latex_tabular() )
"""
def __init__(self, data, headers=None, stubs=None, title='',
datatypes=None,
csv_fmt=None, txt_fmt=None, ltx_fmt=None, html_fmt=None,
celltype= None, rowtype=None,
**fmt_dict):
"""
Parameters
----------
data : list of lists or 2d array (not matrix!)
R rows by K columns of table elements
headers : list (or tuple) of str
sequence of K strings, one per header
stubs : list (or tuple) of str
sequence of R strings, one per stub
title : string
title of the table
datatypes : list of int
indexes to `data_fmts`
txt_fmt : dict
text formatting options
ltx_fmt : dict
latex formatting options
csv_fmt : dict
csv formatting options
hmtl_fmt : dict
hmtl formatting options
celltype : class
the cell class for the table (default: Cell)
rowtype : class
the row class for the table (default: Row)
fmt_dict : dict
general formatting options
"""
#self._raw_data = data
self.title = title
self._datatypes = datatypes or range(len(data[0]))
#start with default formatting
self._text_fmt = default_txt_fmt.copy()
self._latex_fmt = default_latex_fmt.copy()
self._csv_fmt = default_csv_fmt.copy()
self._html_fmt = default_html_fmt.copy()
#substitute any general user specified formatting
#:note: these will be overridden by output specific arguments
self._csv_fmt.update(fmt_dict)
self._text_fmt.update(fmt_dict)
self._latex_fmt.update(fmt_dict)
self._html_fmt.update(fmt_dict)
#substitute any output-type specific formatting
self._csv_fmt.update(csv_fmt or dict())
self._text_fmt.update(txt_fmt or dict())
self._latex_fmt.update(ltx_fmt or dict())
self._html_fmt.update(html_fmt or dict())
self.output_formats = dict(
text=self._text_fmt,
txt=self._text_fmt,
csv=self._csv_fmt,
htm=self._html_fmt,
html=self._html_fmt,
latex=self._latex_fmt,
ltx=self._latex_fmt)
self._Cell = celltype or Cell
self._Row = rowtype or Row
rows = self._data2rows(data) # a list of Row instances
list.__init__(self, rows)
self._add_headers_stubs(headers, stubs)
def __str__(self):
return self.as_text()
def _add_headers_stubs(self, headers, stubs):
"""Return None. Adds headers and stubs to table,
if these were provided at initialization.
Parameters
----------
headers : list of strings
K strings, where K is number of columns
stubs : list of strings
R strings, where R is number of non-header rows
:note: a header row does not receive a stub!
"""
_Cell = self._Cell
_Row = self._Row
if headers:
headers = [ _Cell(h,datatype='header') for h in headers ]
headers = _Row(headers, datatype='header')
headers.table = self
for cell in headers:
cell.row = headers
self.insert(0, headers)
if stubs:
self.insert_stubs(0, stubs)
def _data2rows(self, raw_data):
"""Return list of Row,
the raw data as rows of cells.
"""
_Cell = self._Cell
_Row = self._Row
rows = []
for datarow in raw_data:
dtypes = cycle(self._datatypes)
newrow = _Row([_Cell(datum) for datum in datarow])
newrow.table = self #row knows its SimpleTable
for cell in newrow:
cell.datatype = dtypes.next()
cell.row = newrow #a cell knows its row
rows.append(newrow)
return rows
def pad(self, s, width, align):
"""DEPRECATED: just use the pad function"""
return pad(s, width, align)
def get_colwidths(self, output_format, **fmt_dict):
fmt = self.output_formats[output_format].copy()
fmt.update(fmt_dict)
ncols = max(len(row) for row in self)
request = fmt.get('colwidths')
if request is 0: #assume no extra space desired (e.g, CSV)
return [0] * ncols
elif request is None: #assume no extra space desired (e.g, CSV)
request = [0] * ncols
elif isinstance(request, int):
request = [request] * ncols
elif len(request) < ncols:
request = [request[i%len(request)] for i in range(ncols)]
min_widths = []
for col in zip(*self):
maxwidth = max(len(c.format(0,output_format,**fmt)) for c in col)
min_widths.append(maxwidth)
result = map(max, min_widths, request)
return result
def _get_fmt(self, output_format, **fmt_dict):
"""Return dict, the formatting options.
"""
#first get the default formatting
try:
fmt = self.output_formats[output_format].copy()
except KeyError:
raise ValueError('Unknown format: %s' % output_format)
#then, add formatting specific to this call
fmt.update(fmt_dict)
return fmt
def as_csv(self, **fmt_dict):
"""Return string, the table in CSV format.
Currently only supports comma separator."""
#fetch the format, which may just be default_csv_format
fmt = self._get_fmt('csv', **fmt_dict)
return self.as_text(**fmt)
def as_text(self, **fmt_dict):
"""Return string, the table as text."""
#fetch the text format, override with fmt_dict
fmt = self._get_fmt('txt', **fmt_dict)
#get rows formatted as strings
formatted_rows = [ row.as_string('text', **fmt) for row in self ]
rowlen = len(formatted_rows[-1]) #don't use header row
#place decoration above the table body, if desired
table_dec_above = fmt.get('table_dec_above','=')
if table_dec_above:
formatted_rows.insert(0, table_dec_above * rowlen)
#next place a title at the very top, if desired
#:note: user can include a newlines at end of title if desired
title = self.title
if title:
title = pad(self.title, rowlen, fmt.get('title_align','c'))
formatted_rows.insert(0, title)
#add decoration below the table, if desired
table_dec_below = fmt.get('table_dec_below','-')
if table_dec_below:
formatted_rows.append(table_dec_below * rowlen)
return '\n'.join(formatted_rows)
def as_html(self, **fmt_dict):
"""Return string.
This is the default formatter for HTML tables.
An HTML table formatter must accept as arguments
a table and a format dictionary.
"""
#fetch | |
which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
"""
matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp)
matches = list(matches_iter)
types = interp.get_types()
res = types.TFindResults()
res.count = len(matches)
# python3 map doesn't return a list
starts = list(
map(lambda m: m.start() + FIND_MATCHES_START_OFFSET, matches)
)
res.start = starts
# python3 map doesn't return a list
sizes = list(map(lambda m: m.end() - m.start(), matches))
res.size = sizes
return res
"""Used to keep track of the current matches"""
# int64 FindFirst(
# <datatype> data,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int dir=1,
# int64 start=0,
# int64 size=0,
# int wildcardMatchLength=24 )
@native(name="FindFirst", ret=pfp.fields.Int64, send_interp=True)
def FindFirst(params, ctxt, scope, stream, coord, interp):
"""
This function is identical to the FindAll function except that the
return value is the position of the first occurrence of the target
found. A negative number is returned if the value could not be found.
"""
global FIND_MATCHES_ITER
FIND_MATCHES_ITER = _find_helper(
params, ctxt, scope, stream, coord, interp
)
try:
first = six.next(FIND_MATCHES_ITER)
return first.start() + FIND_MATCHES_START_OFFSET
except StopIteration as e:
return -1
# int64 FindNext( int dir=1 )
@native(name="FindNext", ret=pfp.fields.Int64)
def FindNext(params, ctxt, scope, stream, coord):
"""
This function returns the position of the next occurrence of the
target value specified with the FindFirst function. If dir is 1, the
find direction is down. If dir is 0, the find direction is up. The
return value is the address of the found data, or -1 if the target
is not found.
"""
if FIND_MATCHES_ITER is None:
raise errors.InvalidState()
direction = 1
if len(params) > 0:
direction = PYVAL(params[0])
if direction != 1:
# TODO maybe instead of storing the iterator in FIND_MATCHES_ITER,
# we should go ahead and find _all the matches in the file and store them
# in a list, keeping track of the idx of the current match.
#
# This would be highly inefficient on large files though.
raise NotImplementedError("Reverse searching is not yet implemented")
try:
next_match = six.next(FIND_MATCHES_ITER)
return next_match.start() + FIND_MATCHES_START_OFFSET
except StopIteration as e:
return -1
# TFindInFilesResults FindInFiles(
# <datatype> data,
# char dir[],
# char mask[],
# int subdirs=true,
# int openfiles=false,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int wildcardMatchLength=24 )
@native(name="FindInFiles", ret=pfp.fields.Void)
def FindInFiles(params, ctxt, scope, stream, coord):
"""
Searches for a given set of data across multiple files. See the FindAll
function for information on the data, matchcase, wholeword, method,
wildcardMatchLength and tolerance arguments. The dir argument indicates
the starting directory where the search will take place. mask indicates
which file types to search and may contain the characters '*' and
'?'. If subdirs is true, all subdirectories are recursively searched
for the value as well. If openfiles is true, only the currently
open files are searched. The return value is the TFindInFilesResults
structure which contains a count variable indicate the number of files
found plus an array of file variables. Each file variable contains
a count variable indicating the number of matches, plus an array of
start and size variables indicating the match position. For example:
"""
raise NotImplementedError()
# TFindStringsResults FindStrings(
# int minStringLength,
# int type,
# int matchingCharTypes,
# wstring customChars="",
# int64 start=0,
# int64 size=0,
# int requireNull=false )
@native(name="FindStrings", ret=pfp.fields.Void)
def FindStrings(params, ctxt, scope, stream, coord):
"""
Attempts to locate any strings within a binary file similar to the Find
Strings dialog which is accessed by clicking 'Search > Find Strings'
on the main menu. Specify the minimum length of each string in number
of characters with the minStringLength parameter. The type option
tells the algorithm to look for ASCII strings, UNICODE strings or
both by using one of the following constants:
FINDSTRING_ASCII
FINDSTRING_UNICODE
FINDSTRING_BOTH
To specify which characters are considered as part of a string,
use an OR bitmask ('|') of one or more of the following constants:
FINDSTRING_LETTERS - the letters A..Z and a..z
FINDSTRING_LETTERS_ALL - all international numbers including FINDSTRING_LETTERS
FINDSTRING_NUMBERS - the numbers 0..9
FINDSTRING_NUMBERS_ALL - all international numbers including FINDSTRING_NUMBERS
FINDSTRING_SYMBOLS - symbols such as '#', '@', '!', etc. except for '_'
FINDSTRING_UNDERSCORE - the character '_'
FINDSTRING_SPACES - spaces or whitespace
FINDSTRING_LINEFEEDS - line feed characters 0x0a, 0x0d
FINDSTRING_CUSTOM - include any custom characters in the customChars string
Note if the FINDSTRING_CUSTOM constant is included, any characters
from customChars are considered as part of the string otherwise the
customChars string is ignored. The start and size parameters indicate
the range of the file to search and if size is zero, the file is
searched starting from start to the end of the file. If requireNull
is true, the strings must have a null (0) character after each string.
The return value is a TFindStringsResults structure which contains a
count variable with the number of strings found, a start array holding
the starting position of each string, a size array holding the size in
bytes of each string, and a type array which indicates FINDSTRING_ASCII
if the string is an ASCII string or FINDSTRING_UNICODE if the string
is a Unicode string. For example, the following code finds all ASCII
strings of length at least 5 containing the characters "A..Za..z$&":
"""
raise NotImplementedError()
# int GetSectorSize()
@native(name="GetSectorSize", ret=pfp.fields.Int)
def GetSectorSize(params, ctxt, scope, stream, coord):
"""
Returns the size in bytes of the sectors for this drive. If this
file is not a drive, the current sector size is defined using the
'View > Division Lines > Set Sector Size' menu option.
"""
raise NotImplementedError()
# int HexOperation(
# int operation,
# int64 start,
# int64 size,
# operand,
# step=0,
# int64 skip=0 )
@native(name="HexOperation", ret=pfp.fields.Int)
def HexOperation(params, ctxt, scope, stream, coord):
"""
Perform any of the operations on hex data as available in the Hex
Operations dialog. The operation parameter chooses which operation to
perform and these operations are described in the Hex Operations dialog
documentation. start and size indicate which range of bytes to operate
on and if size is 0, the whole file is used. The operand indicates what
value to use during the operation and the result is different depending
upon which operation is used (see the Hex Operations dialog). operand
can be any of the basic numeric or floating point types and the type
of this parameter tells the function how to interpret the data. For
example, if a 'ushort' is raise NotImplementedError()ed as an operand, the block of data is
considered as an array of 'ushort' using the current endian. If step
is non-zero, the operand is incremented by step after each operation
and if skip is non-zero, skip number of bytes are skipped after each
operation. This function returns the number of bytes modified if
successful, or a negative number on error. The following constants
can be used for the operation parameter:
HEXOP_ASSIGN
HEXOP_ADD
HEXOP_SUBTRACT
HEXOP_MULTIPLY
HEXOP_DIVIDE
HEXOP_NEGATE
HEXOP_MODULUS
HEXOP_SET_MINIMUM
HEXOP_SET_MAXIMUM
HEXOP_SWAP_BYTES
HEXOP_BINARY_AND
HEXOP_BINARY_OR
HEXOP_BINARY_XOR
HEXOP_BINARY_INVERT
HEXOP_SHIFT_LEFT
HEXOP_SHIFT_RIGHT
HEXOP_SHIFT_BLOCK_LEFT
HEXOP_SHIFT_BLOCK_RIGHT
HEXOP_ROTATE_LEFT
HEXOP_ROTATE_RIGHT
For example, the following code would treat the bytes from address
16 to 48 as an array of floats and add the value 3.0 to each float
in the array:
"""
raise NotImplementedError()
# int64 Histogram( int64 start, int64 size, int64 result[256] )
@native(name="Histogram", ret=pfp.fields.Int64)
def Histogram(params, ctxt, scope, stream, coord):
"""
Counts the number of bytes of each value in the file from 0 up to
255. The bytes are counting starting from address start and continuing
for size bytes. The resulting counts are stored in the int64 array
results. For example, result[0] would indicate the number of 0 bytes
values found in the given range of data. The return value is the
total number of bytes read.
"""
raise NotImplementedError()
# int ImportFile( int type, char filename[], int wordaddresses=false, int defaultByteValue=-1 , coord)
@native(name="ImportFile", ret=pfp.fields.Int)
def ImportFile(params, ctxt, scope, stream, coord):
"""
Attempts to import the file specified by filename in one of the
supported import formats. The format is given by the type argument
and may be:
IMPORT_HEXTEXT
IMPORT_DECTEXT
| |
# Natural Language Toolkit: Classifier Utility Functions
#
# Copyright (C) 2001-2020 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions and classes for classifiers.
"""
import math
# from nltk.util import Deprecated
import nltk.classify.util # for accuracy & log_likelihood
from nltk.util import LazyMap
######################################################################
# { Helper Functions
######################################################################
# alternative name possibility: 'map_featurefunc()'?
# alternative name possibility: 'detect_features()'?
# alternative name possibility: 'map_featuredetect()'?
# or.. just have users use LazyMap directly?
def apply_features(feature_func, toks, labeled=None):
"""
Use the ``LazyMap`` class to construct a lazy list-like
object that is analogous to ``map(feature_func, toks)``. In
particular, if ``labeled=False``, then the returned list-like
object's values are equal to::
[feature_func(tok) for tok in toks]
If ``labeled=True``, then the returned list-like object's values
are equal to::
[(feature_func(tok), label) for (tok, label) in toks]
The primary purpose of this function is to avoid the memory
overhead involved in storing all the featuresets for every token
in a corpus. Instead, these featuresets are constructed lazily,
as-needed. The reduction in memory overhead can be especially
significant when the underlying list of tokens is itself lazy (as
is the case with many corpus readers).
:param feature_func: The function that will be applied to each
token. It should return a featureset -- i.e., a dict
mapping feature names to feature values.
:param toks: The list of tokens to which ``feature_func`` should be
applied. If ``labeled=True``, then the list elements will be
passed directly to ``feature_func()``. If ``labeled=False``,
then the list elements should be tuples ``(tok,label)``, and
``tok`` will be passed to ``feature_func()``.
:param labeled: If true, then ``toks`` contains labeled tokens --
i.e., tuples of the form ``(tok, label)``. (Default:
auto-detect based on types.)
"""
if labeled is None:
labeled = toks and isinstance(toks[0], (tuple, list))
if labeled:
def lazy_func(labeled_token):
return (feature_func(labeled_token[0]), labeled_token[1])
return LazyMap(lazy_func, toks)
else:
return LazyMap(feature_func, toks)
def attested_labels(tokens):
"""
:return: A list of all labels that are attested in the given list
of tokens.
:rtype: list of (immutable)
:param tokens: The list of classified tokens from which to extract
labels. A classified token has the form ``(token, label)``.
:type tokens: list
"""
return tuple(set(label for (tok, label) in tokens))
def log_likelihood(classifier, gold):
results = classifier.prob_classify_many([fs for (fs, l) in gold])
ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
return math.log(sum(ll) / len(ll))
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
class CutoffChecker(object):
"""
A helper class that implements cutoff checks based on number of
iterations and log likelihood.
Accuracy cutoffs are also implemented, but they're almost never
a good idea to use.
"""
def __init__(self, cutoffs):
self.cutoffs = cutoffs.copy()
if "min_ll" in cutoffs:
cutoffs["min_ll"] = -abs(cutoffs["min_ll"])
if "min_lldelta" in cutoffs:
cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"])
self.ll = None
self.acc = None
self.iter = 1
def check(self, classifier, train_toks):
cutoffs = self.cutoffs
self.iter += 1
if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]:
return True # iteration cutoff.
new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
if math.isnan(new_ll):
return True
if "min_ll" in cutoffs or "min_lldelta" in cutoffs:
if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]:
return True # log likelihood cutoff
if (
"min_lldelta" in cutoffs
and self.ll
and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"]))
):
return True # log likelihood delta cutoff
self.ll = new_ll
if "max_acc" in cutoffs or "min_accdelta" in cutoffs:
new_acc = nltk.classify.util.log_likelihood(classifier, train_toks)
if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]:
return True # log likelihood cutoff
if (
"min_accdelta" in cutoffs
and self.acc
and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"]))
):
return True # log likelihood delta cutoff
self.acc = new_acc
return False # no cutoff reached.
######################################################################
# { Demos
######################################################################
def names_demo_features(name):
features = {}
features["alwayson"] = True
features["startswith"] = name[0].lower()
features["endswith"] = name[-1].lower()
for letter in "abcdefghijklmnopqrstuvwxyz":
features["count(%s)" % letter] = name.lower().count(letter)
features["has(%s)" % letter] = letter in name.lower()
return features
def binary_names_demo_features(name):
features = {}
features["alwayson"] = True
features["startswith(vowel)"] = name[0].lower() in "aeiouy"
features["endswith(vowel)"] = name[-1].lower() in "aeiouy"
for letter in "abcdefghijklmnopqrstuvwxyz":
features["count(%s)" % letter] = name.lower().count(letter)
features["has(%s)" % letter] = letter in name.lower()
features["startswith(%s)" % letter] = letter == name[0].lower()
features["endswith(%s)" % letter] = letter == name[-1].lower()
return features
def names_demo(trainer, features=names_demo_features):
from nltk.corpus import names
import random
# Construct a list of classified names, using the names corpus.
namelist = [(name, "male") for name in names.words("male.txt")] + [
(name, "female") for name in names.words("female.txt")
]
# Randomly split the names into a test & train set.
random.seed(123456)
random.shuffle(namelist)
train = namelist[:5000]
test = namelist[5000:5500]
# Train up a classifier.
print("Training classifier...")
classifier = trainer([(features(n), g) for (n, g) in train])
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(n), g) for (n, g) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, g) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
print()
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
for ((name, gender), pdist) in list(zip(test, pdists))[:5]:
if gender == "male":
fmt = " %-15s *%6.4f %6.4f"
else:
fmt = " %-15s %6.4f *%6.4f"
print(fmt % (name, pdist.prob("male"), pdist.prob("female")))
except NotImplementedError:
pass
# Return the classifier
return classifier
def partial_names_demo(trainer, features=names_demo_features):
from nltk.corpus import names
import random
male_names = names.words("male.txt")
female_names = names.words("female.txt")
random.seed(654321)
random.shuffle(male_names)
random.shuffle(female_names)
# Create a list of male names to be used as positive-labeled examples for training
positive = map(features, male_names[:2000])
# Create a list of male and female names to be used as unlabeled examples
unlabeled = map(features, male_names[2000:2500] + female_names[:500])
# Create a test set with correctly-labeled male and female names
test = [(name, True) for name in male_names[2500:2750]] + [
(name, False) for name in female_names[500:750]
]
random.shuffle(test)
# Train up a classifier.
print("Training classifier...")
classifier = trainer(positive, unlabeled)
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(n), m) for (n, m) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, m) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
print()
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
for ((name, is_male), pdist) in zip(test, pdists)[:5]:
if is_male == True:
fmt = " %-15s *%6.4f %6.4f"
else:
fmt = " %-15s %6.4f *%6.4f"
print(fmt % (name, pdist.prob(True), pdist.prob(False)))
except NotImplementedError:
pass
# Return the classifier
return classifier
_inst_cache = {}
def wsd_demo(trainer, word, features, n=1000):
from nltk.corpus import senseval
import random
# Get the instances.
print("Reading data...")
global _inst_cache
if word not in _inst_cache:
_inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)]
instances = _inst_cache[word][:]
if n > len(instances):
n = len(instances)
senses = list(set(l for (i, l) in instances))
print(" Senses: " + " ".join(senses))
# Randomly split the names into a test & train set.
print("Splitting into test & train...")
random.seed(123456)
random.shuffle(instances)
train = instances[: int(0.8 * n)]
test = instances[int(0.8 * n) : n]
# Train up a classifier.
print("Training classifier...")
classifier = trainer([(features(i), l) for (i, l) in train])
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(i), l) for (i, l) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(i) for (i, n) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
except NotImplementedError:
pass
# Return the classifier
return classifier
def check_megam_config():
"""
Checks whether the MEGAM binary is configured.
"""
try:
_megam_bin
except NameError:
| |
to generate the
attribute path.
:param attribute: the name of the attribute.
:param missing: optional value to use when attribute value is None.
:param visitor: optional function responsible for obtaining the
attribute value from a node.
:return: a list of values of the required `attribute` of the
ancestor path of the given `node`.
"""
return [ visitor(c, attribute) or missing for c in node.ancestors() ] +\
[ visitor(node, attribute) or missing ]
def as_tree(self, visitor = None, children = None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
return [root.as_tree(visitor = visitor, children = children) for \
root in self.roots()]
def node_as_tree(self, node,
visitor = lambda self, node: self._default_node_visitor(node),
children = lambda self, node, visitor, children: \
self._default_node_children(node, visitor, children)):
""" Visits a :class:`CTENode` `node` and delegates to the (optional)
`visitor` callback, as well as the (optional) `children` callback,
in order to generate a dictionary representation of the node along
with its children nodes.
:param node: the :class:`CTENode` for which to generate the
representation.
:param visitor: optional function responsible for generating the
dictionary representation of the node.
:param children: optional function responsible for generating a
children key and list for the node.
:return: a dictionary representation of the structure of the node
and its descendant tree.
"""
tree = visitor(self, node)
tree.update(children(self, node, visitor, children))
return tree
def _default_node_visitor(self, node):
""" Generates a dictionary representation of the given :class:`CTENode`
`node`, which consists of the node itself under the key ``node``, as
well as structural information under the keys ``depth``, ``path``,
``ordering``, ``leaf``, and ``branch``.
:param node: the :class:`CTENode` for which to generate the
representation.
:return: a dictionary representation of the structure of the node.
"""
return {
'depth' : getattr(node, node._cte_node_depth),
'path' : [str(c) for c in getattr(node, node._cte_node_path)],
'ordering' : getattr(node, node._cte_node_ordering),
'leaf' : node.is_leaf(),
'branch' : node.is_branch(),
'node' : node,
}
def _default_node_children(self, node, visitor, children):
""" Generates a key and list of children of the given :class:`CTENode`
`node`, intended to be used as an update to the dictionary
representation generated by the :meth:`node_as_tree` method. The key is
``children`` and the list consists of the children of the given node as
determined by the `children` callback.
Each child node is, in turn, visited through recursive calls to
:meth:`node_as_child`, and the `visitor` and `children` parameters are
passed along.
:param node: the :class:`CTENode` for which to generate the children
representation.
:param visitor: optional function responsible for generating the
dictionary representation of the node.
:param children: optional function responsible for generating a
children key and list for the node.
:return: a key and list representation of the structure of the children
of the given node.
"""
return { self.model._cte_node_children : [ self.node_as_tree(child,
visitor = visitor, children = children) for child in \
node.children.all() ] }
def drilldown(self, attributes, path):
""" Recursively descends the tree/forest (starting from each root node)
in order to find a :class:`CTENode` which corresponds to the given
`path`. The path is expected to be an iterable of tuples, called
path components, consisting of attribute values with which to filter
through the QuerySet API. The name of the attribute to which each
value corresponds is specified in `attributes`, which is expected
to conform to Django's QuerySet API for the filter semantics. Each
value in the path component tuple will be mapped to its
corresponding attribute name before being passed to the filter
method.
For example, if the node model features the integer field ``x``
and the boolean field ``y``, we can drill down in the following way:
drilldown(('x__gte', 'y'),[(35, True), (37, False)])
The simplest form of drilldown is to match with equality on a single
attribute, such as ``name``, as in the following example:
drilldown(('name',), [('path',), ('to',), ('my',), ('node',)])
Don't forget the trailing comma if specifying singleton tuples! If
you need very simple, one-attribute path components, it is suggested
you extend the manager with your own convenience method; the above
will, for instance, become:
def drilldown_by_name(self, path):
return self.drilldown(('name',),
[(component,) for component in path])
Failure to find the required node results in a :class:`DoesNotExist`
exception being raised.
An empty path will result in the first root node being returned (if
at least one root node exists).
"""
# empty path result in first root, if present
if len(path) == 0:
try:
return self.roots()[0]
except IndexError:
raise self.model.DoesNotExist
# bootstrap with the first component, then iterate through the remaining
# components in the path as long as each child node is found
component = path[0]
current = None
# mapping of attribute names with values, as per QuerySet filter
attrs = lambda component: dict(zip(attributes, component))
# find the root corresponding to the bootstrapped initial path component
try:
root = self.roots().filter(**attrs(component))[0]
except IndexError:
raise self.model.DoesNotExist
# proceed to drill down until path components are exhausted
current = root
for component in path[1:]:
try:
current = current.children.filter(**attrs(component))[0]
except IndexError:
raise self.model.DoesNotExist
return current
def prepare_delete(self, node, method, position = None, save = True):
""" Prepares a given :class:`CTENode` `node` for deletion, by executing
the required deletion semantics (Pharaoh, Grandmother, or Monarchy).
The `method` argument can be one of the valid
:const:`DELETE_METHODS` choices. If it is
:const:`DELETE_METHOD_NONE` or ``None``, then the default delete
method will be used (as specified from the optional
:attr:`_cte_node_delete_method`).
Under the :const:`DELETE_METHOD_GRANDMOTHER` and
:const:`DELETE_METHOD_MONARCHY` delete semantics, descendant nodes
may be moved; in this case the optional `position` can be a
``callable`` which is invoked prior to each move operation (see
:meth:`move` for details).
Furthermore, by default, after each move operation, sub-tree nodes
which were moved will be saved through a call to :meth:`Model.save`
unless `save` is ``False``.
This method delegates move operations to :meth:`move`.
:param node: the :class:`CTENode` to prepare for deletion.
:param method: optionally, a delete method to use.
:param position: optionally, a ``callable`` to invoke prior to each
move operation.
:param save: flag indicating whether to save after each move
operation, ``True`` by default.
"""
# If no delete method preference is specified, use attribute.
if method is None:
method = node._cte_node_delete_method
# If no preference specified, use default.
if method == self.DELETE_METHOD_NONE:
method = self.DEFAULT_DELETE_METHOD
# Delegate to appropriate method.
getattr(self, 'prepare_delete_%s' % method)(node, position, save)
def prepare_delete_pharaoh(self, node, position = None, save = True):
""" Prepares a given :class:`CTENode` `node` for deletion, by executing
the :const:`DELETE_METHOD_PHARAOH` semantics.
This method does not perform any sub-tree | |
_("Restaurants et services de restauration mobile")),
('5610', _("Restaurants et services de restauration mobile")),
('5610A', _("Restauration traditionnelle")),
('5610B', _("Cafétérias et autres libres-services")),
('5610C', _("Restauration de type rapide")),
('562', _("Traiteurs et autres services de restauration")),
('5621', _("Services des traiteurs ")),
('5621Z', _("Services des traiteurs ")),
('5629', _("Autres services de restauration ")),
('5629A', _("Restauration collective sous contrat")),
('5629B', _("Autres services de restauration n.c.a.")),
('563', _("Débits de boissons")),
('5630', _("Débits de boissons")),
('5630Z', _("Débits de boissons")),
('58', _("Édition")),
('581', _("Édition de livres et périodiques et autres activités d'édition ")),
('5811', _("Édition de livres")),
('5811Z', _("Édition de livres")),
('5812', _("Édition de répertoires et de fichiers d'adresses")),
('5812Z', _("Édition de répertoires et de fichiers d'adresses")),
('5813', _("Édition de journaux")),
('5813Z', _("Édition de journaux")),
('5814', _("Édition de revues et périodiques")),
('5814Z', _("Édition de revues et périodiques")),
('5819', _("Autres activités d'édition")),
('5819Z', _("Autres activités d'édition")),
('582', _("Édition de logiciels")),
('5821', _("Édition de jeux électroniques")),
('5821Z', _("Édition de jeux électroniques")),
('5829', _("Édition d'autres logiciels")),
('5829A', _("Édition de logiciels système et de réseau")),
('5829B', _("Edition de logiciels outils de développement et de langages")),
('5829C', _("Edition de logiciels applicatifs")),
('59', _("Production de films cinématographiques, de vidéo et de programmes de télévision ; enregistrement sonore et édition musicale")),
('591', _("Activités cinématographiques, vidéo et de télévision")),
('5911', _("Production de films cinématographiques, de vidéo et de programmes de télévision ")),
('5911A', _("Production de films et de programmes pour la télévision ")),
('5911B', _("Production de films institutionnels et publicitaires")),
('5911C', _("Production de films pour le cinéma")),
('5912', _("Post-production de films cinématographiques, de vidéo et de programmes de télévision")),
('5912Z', _("Post-production de films cinématographiques, de vidéo et de programmes de télévision")),
('5913', _("Distribution de films cinématographiques, de vidéo et de programmes de télévision ")),
('5913A', _("Distribution de films cinématographiques")),
('5913B', _("Edition et distribution vidéo")),
('5914', _("Projection de films cinématographiques")),
('5914Z', _("Projection de films cinématographiques")),
('592', _("Enregistrement sonore et édition musicale")),
('5920', _("Enregistrement sonore et édition musicale ")),
('5920Z', _("Enregistrement sonore et édition musicale ")),
('60', _("Programmation et diffusion")),
('601', _("Édition et diffusion de programmes radio")),
('6010', _("Édition et diffusion de programmes radio")),
('6010Z', _("Édition et diffusion de programmes radio")),
('602', _("Programmation de télévision et télédiffusion")),
('6020', _("Programmation de télévision et télédiffusion")),
('6020A', _("Edition de chaînes généralistes")),
('6020B', _("Edition de chaînes thématiques")),
('61', _("Télécommunications")),
('611', _("Télécommunications filaires")),
('6110', _("Télécommunications filaires")),
('6110Z', _("Télécommunications filaires")),
('612', _("Télécommunications sans fil ")),
('6120', _("Télécommunications sans fil ")),
('6120Z', _("Télécommunications sans fil ")),
('613', _("Télécommunications par satellite")),
('6130', _("Télécommunications par satellite")),
('6130Z', _("Télécommunications par satellite")),
('619', _("Autres activités de télécommunication")),
('6190', _("Autres activités de télécommunication ")),
('6190Z', _("Autres activités de télécommunication ")),
('62', _("Programmation, conseil et autres activités informatiques ")),
('620', _("Programmation, conseil et autres activités informatiques ")),
('6201', _("Programmation informatique")),
('6201Z', _("Programmation informatique")),
('6202', _("Conseil informatique ")),
('6202A', _("Conseil en systèmes et logiciels informatiques")),
('6202B', _("Tierce maintenance de systèmes et d’applications informatiques")),
('6203', _("Gestion d'installations informatiques")),
('6203Z', _("Gestion d'installations informatiques")),
('6209', _("Autres activités informatiques")),
('6209Z', _("Autres activités informatiques")),
('63', _("Services d'information")),
('631', _("Traitement de données, hébergement et activités connexes ; portails Internet")),
('6311', _("Traitement de données, hébergement et activités connexes")),
('6311Z', _("Traitement de données, hébergement et activités connexes")),
('6312', _("Portails Internet")),
('6312Z', _("Portails Internet")),
('639', _("Autres services d'information")),
('6391', _("Activités des agences de presse")),
('6391Z', _("Activités des agences de presse")),
('6399', _("Autres services d'information n.c.a.")),
('6399Z', _("Autres services d'information n.c.a.")),
('64', _("Activités des services financiers, hors assurance et caisses de retraite")),
('641', _("Intermédiation monétaire")),
('6411', _("Activités de banque centrale")),
('6411Z', _("Activités de banque centrale")),
('6419', _("Autres intermédiations monétaires")),
('6419Z', _("Autres intermédiations monétaires")),
('642', _("Activités des sociétés holding")),
('6420', _("Activités des sociétés holding")),
('6420Z', _("Activités des sociétés holding")),
('643', _("Fonds de placement et entités financières similaires")),
('6430', _("Fonds de placement et entités financières similaires")),
('6430Z', _("Fonds de placement et entités financières similaires")),
('649', _("Autres activités des services financiers, hors assurance et caisses de retraite")),
('6491', _("Crédit-bail ")),
('6491Z', _("Crédit-bail ")),
('6492', _("Autre distribution de crédit")),
('6492Z', _("Autre distribution de crédit")),
('6499', _("Autres activités des services financiers, hors assurance et caisses de retraite, n.c.a.")),
('6499Z', _("Autres activités des services financiers, hors assurance et caisses de retraite, n.c.a.")),
('65', _("Assurance")),
('651', _("Assurance")),
('6511', _("Assurance vie ")),
('6511Z', _("Assurance vie")),
('6512', _("Autres assurances")),
('6512Z', _("Autres assurances")),
('652', _("Réassurance")),
('6520', _("Réassurance")),
('6520Z', _("Réassurance")),
('653', _("Caisses de retraite")),
('6530', _("Caisses de retraite")),
('6530Z', _("Caisses de retraite")),
('66', _("Activités auxiliaires de services financiers et d'assurance ")),
('661', _("Activités auxiliaires de services financiers, hors assurance et caisses de retraite")),
('6611', _("Administration de marchés financiers")),
('6611Z', _("Administration de marchés financiers")),
('6612', _("Courtage de valeurs mobilières et de marchandises")),
('6612Z', _("Courtage de valeurs mobilières et de marchandises")),
('6619', _("Autres activités auxiliaires de services financiers, hors assurance et caisses de retraite")),
('6619A', _("Supports juridiques de gestion de patrimoine mobilier")),
('6619B', _("Autres activités auxiliaires de services financiers, hors assurance et caisses de retraite, n.c.a.")),
('662', _("Activités auxiliaires d'assurance et de caisses de retraite")),
('6621', _("Évaluation des risques et dommages")),
('6621Z', _("Évaluation des risques et dommages")),
('6622', _("Activités des agents et courtiers d'assurances")),
('6622Z', _("Activités des agents et courtiers d'assurances")),
('6629', _("Autres activités auxiliaires d'assurance et de caisses de retraite")),
('6629Z', _("Autres activités auxiliaires d'assurance et de caisses de retraite")),
('663', _("Gestion de fonds")),
('6630', _("Gestion de fonds")),
('6630Z', _("Gestion de fonds")),
('68', _("Activités immobilières")),
('681', _("Activités des marchands de biens immobiliers")),
('6810', _("Activités des marchands de biens immobiliers")),
('6810Z', _("Activités des marchands de biens immobiliers")),
('682', _("Location et exploitation de biens immobiliers propres ou loués")),
('6820', _("Location et exploitation de biens immobiliers propres ou loués ")),
('6820A', _("Location de logements")),
('6820B', _("Location de terrains et d'autres biens immobiliers")),
('683', _("Activités immobilières pour compte de tiers")),
('6831', _("Agences immobilières")),
('6831Z', _("Agences immobilières")),
('6832', _("Administration de biens immobiliers")),
('6832A', _("Administration d'immeubles et autres biens immobiliers")),
('6832B', _("Supports juridiques de gestion de patrimoine immobilier")),
('69', _("Activités juridiques et comptables")),
('691', _("Activités juridiques")),
('6910', _("Activités juridiques")),
('6910Z', _("Activités juridiques")),
('692', _("Activités comptables")),
('6920', _("Activités comptables")),
('6920Z', _("Activités comptables")),
('70', _("Activités des sièges sociaux ; conseil de gestion")),
('701', _("Activités des sièges sociaux")),
('7010', _("Activités des sièges sociaux")),
('7010Z', _("Activités des sièges sociaux")),
('702', _("Conseil de gestion")),
('7021', _("Conseil en relations publiques et communication")),
('7021Z', _("Conseil en relations publiques et communication")),
('7022', _("Conseil pour les affaires et autres conseils de gestion")),
('7022Z', _("Conseil pour les affaires et autres conseils de gestion")),
('71', _("Activités d'architecture et d'ingénierie ; activités de contrôle et analyses techniques")),
('711', _("Activités d'architecture et d'ingénierie")),
('7111', _("Activités d'architecture ")),
('7111Z', _("Activités d'architecture ")),
('7112', _("Activités d'ingénierie")),
('7112A', _("Activité des géomètres")),
('7112B', _("Ingénierie, études techniques")),
('712', _("Activités de contrôle et analyses techniques")),
('7120', _("Activités de contrôle et analyses techniques")),
('7120A', _("Contrôle technique automobile")),
('7120B', _("Analyses, essais et inspections techniques")),
('72', _("Recherche-développement scientifique")),
('721', _("Recherche-développement en sciences physiques et naturelles")),
('7211', _("Recherche-développement en biotechnologie")),
('7211Z', _("Recherche-développement en biotechnologie")),
('7219', _("Recherche-développement en autres sciences physiques et naturelles")),
('7219Z', _("Recherche-développement en autres sciences physiques et naturelles")),
('722', _("Recherche-développement en sciences humaines et sociales")),
('7220', _("Recherche-développement en sciences humaines et sociales")),
('7220Z', _("Recherche-développement en sciences humaines et sociales")),
('73', _("Publicité et études de marché")),
('731', _("Publicité")),
('7311', _("Activités des agences de publicité")),
('7311Z', _("Activités des agences de publicité")),
('7312', _("Régie publicitaire de médias")),
('7312Z', _("Régie publicitaire de médias")),
('732', _("Études de marché et sondages")),
('7320', _("Études de marché et sondages")),
('7320Z', _("Études de marché et sondages")),
('74', _("Autres activités spécialisées, scientifiques et techniques")),
('741', _("Activités spécialisées de design")),
('7410', _("Activités spécialisées de design")),
('7410Z', _("Activités spécialisées de design")),
('742', _("Activités photographiques")),
('7420', _("Activités photographiques")),
('7420Z', _("Activités photographiques")),
('743', _("Traduction et interprétation")),
('7430', _("Traduction et interprétation")),
('7430Z', _("Traduction et interprétation")),
('749', _("Autres activités spécialisées, scientifiques et techniques n.c.a.")),
('7490', _("Autres activités spécialisées, scientifiques et techniques n.c.a.")),
('7490A', _("Activité des économistes de la construction")),
('7490B', _("Activités spécialisées, scientifiques et techniques diverses")),
('75', _("Activités vétérinaires")),
('750', _("Activités vétérinaires")),
('7500', _("Activités vétérinaires")),
('7500Z', _("Activités vétérinaires")),
('77', _("Activités de location et location-bail")),
('771', _("Location et location-bail de véhicules automobiles")),
('7711', _("Location et location-bail de voitures et de véhicules automobiles légers")),
('7711A', _("Location de courte durée de voitures et de véhicules automobiles légers")),
('7711B', _("Location de longue durée de voitures et de véhicules automobiles légers")),
('7712', _("Location et location-bail de camions")),
('7712Z', _("Location et location-bail de camions")),
('772', _("Location et location-bail de biens personnels et domestiques")),
('7721', _("Location et location-bail d'articles de loisirs et de sport ")),
('7721Z', _("Location et location-bail d'articles de loisirs et de sport ")),
('7722', _("Location de vidéocassettes et disques vidéo")),
('7722Z', _("Location de vidéocassettes et disques vidéo")),
('7729', _("Location et location-bail d'autres biens personnels et domestiques")),
('7729Z', _("Location et location-bail d'autres biens personnels et domestiques")),
('773', _("Location et location-bail d'autres machines, équipements et biens")),
('7731', _("Location et location-bail de machines et équipements agricoles")),
('7731Z', _("Location et location-bail de machines et équipements agricoles")),
('7732', _("Location et location-bail de machines et équipements pour la construction")),
('7732Z', _("Location et location-bail de machines et équipements pour la construction")),
('7733', _("Location et location-bail de machines de bureau et de matériel informatique")),
('7733Z', _("Location et location-bail de machines de bureau et de matériel informatique")),
('7734', _("Location et location-bail de matériels de transport par eau")),
('7734Z', _("Location et location-bail de matériels de transport par eau")),
('7735', _("Location et location-bail de matériels de transport aérien")),
('7735Z', _("Location et location-bail de matériels de transport aérien")),
('7739', _("Location et location-bail d'autres machines, équipements et biens matériels n.c.a. ")),
('7739Z', _("Location et location-bail d'autres machines, équipements et | |
<gh_stars>0
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import base64
import copy
import sys
import urllib.parse
import urllib.request
import os
from pathlib import Path
from lxml import etree
from builtins import str
sys.path.append('./')
sys.path.append('./inkex')
sys.path.append('./scour')
sys.path.append('./ebooklib')
import re
import inkex
import scour.scour
import ebooklib
import larscwallin_inx_ebooklib_epub as inx_epub
class ExportToEpub(inkex.Effect):
svg_src_template = """<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{{viewport.width}}" height="{{viewport.height}}" viewBox="0 0 {{document.width}} {{document.height}}" xml:space="preserve" preserveAspectRatio="xMinYMin">
<title>{{title}}</title>
<style id="font-declarations">
{{font-faces}}
</style>
{{defs}}
{{scripts}}
<metadata xmlns="http://www.w3.org/2000/svg" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" id="metadata5">
<rdf:RDF>
<dc:format>image/svg+xml</dc:format>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
<dc:title>{{title}}</dc:title>
</rdf:RDF>
</metadata>
{{element.source}}
</svg>"""
font_face_template = """
@font-face {
font-family: {{font.family}};
src: url("{{font.url}}");
}
"""
def __init__(self):
inkex.Effect.__init__(self)
self.arg_parser.add_argument('--where', action='store',
type=str, dest='where', default='',
help='Where to save the EPUB.')
self.arg_parser.add_argument('--root_folder', action='store',
type=str, dest='root_folder', default='',
help='Project root folder path')
self.arg_parser.add_argument('--filename', action='store',
type=str, dest='filename', default='publication.epub',
help='File name including extension.')
self.arg_parser.add_argument('--resources_folder', action='store',
type=str, dest='resources_folder', default='',
help='Optional relative path to a folder containing additional '
'resources to be added to the EPUB. Could be audio, video etc.')
self.arg_parser.add_argument('--bottom_layer_as_cover', action='store',
type=inkex.Boolean, dest='bottom_layer_as_cover', default=False,
help='Use bottom layer as cover image?')
self.arg_parser.add_argument('--wrap_svg_in_html', action='store',
type=inkex.Boolean, dest='wrap_svg_in_html', default=False,
help='Save documents as HTML instead of SVG?')
def effect(self):
self.publication_title = "Publication Title"
self.publication_desc = ""
# The output path for the created EPUB
self.destination_path = self.options.where
# The project root folder
self.root_folder = self.options.root_folder
# the resource folder is the location, relative to the project root folder, where the all the images, fonts etc
# are stored. If you want a file to be automatically added to the EPUB it needs to be put in this folder.
self.resources_folder = self.options.resources_folder
self.filename = self.options.filename
self.resource_items = []
self.bottom_layer_as_cover = self.options.bottom_layer_as_cover
self.wrap_svg_in_html = self.options.wrap_svg_in_html
self.svg_doc = self.document.xpath('//svg:svg', namespaces=inkex.NSS)[0]
self.svg_doc_width = float(self.svg.unittouu(self.svg_doc.get('width')))
self.svg_doc_height = float(self.svg.unittouu(self.svg_doc.get('height')))
self.svg_viewport_width = float(self.svg.unittouu(self.svg_doc.get('width')))
self.svg_viewport_height = float(self.svg.unittouu(self.svg_doc.get('height')))
self.svg_nav_doc = ebooklib.epub.EpubNav()
# We only care about the "root layers" that are visible. Sub-layers will be included.
self.visible_layers = self.document.xpath('/svg:svg/svg:g[not(contains(@style,"display:none"))]',
namespaces=inkex.NSS)
# Create a new EPUB instance
self.book = ebooklib.epub.EpubBook()
if self.visible_layers.__len__() > 0:
selected_layers = []
content_documents = []
# Get all defs elements. These are "injected" in each of the documents
defs = self.document.xpath('//svg:svg/svg:defs', namespaces=inkex.NSS)
defs_string = ''
# Get all script elements in the document root. These are "injected" in each of the documents.
# Script elements that are children of layers are unique to each document.
scripts = self.document.xpath('//svg:svg/svg:script', namespaces=inkex.NSS)
scripts_string = ''
# We get all text elements in order to later get all used font-families.
text_elements = self.document.xpath('//svg:text', namespaces=inkex.NSS)
font_families = {}
font_faces_string = ''
resource_folder_path = os.path.join(self.root_folder, self.resources_folder)
# Call add_resources to recursively add resources to the EPUB instance.
self.add_resources(resource_folder_path)
# Now let's go through the text elements and see which font families that are used.
for text in text_elements:
style = inkex.Style(text.get('style'))
if style['font-family']:
font = style['font-family']
if font not in font_families:
font_families[font] = font
# Time to loop through the script elements if there are any
if len(scripts) > 0:
for script in scripts:
xlink = script.get('xlink:href')
# If there is an xlink attribute it's an external script. External scripts are handled a
# bit differently than other resources. Instead of assuming that they are located in the
# specified resource folder they will be retrieved and put in the "scripts" folder in the
# root of the EPUB. I made this choice to make it easier to point to js on the web.
# Might change this later.
if xlink:
# Now we'll try to read the contents of the script file.
script_source = self.read_file(xlink)
if script_source is not None:
script_name = os.path.basename(xlink)
script_item = inx_epub.InxEpubItem(file_name=('scripts/' + script_name),
media_type='text/javascript', content=script_source)
self.book.add_item(script_item)
# Scripts should really use xlink:href but scour crashes if I use it :/
# src seems to work, but it does not validate
scripts_string += str(
'<script src="' + ('scripts/' + script_name) + '"></script>')
else:
# If there is no xlink we can assume that this is an embedded script and grab its text content.
script_source = script.text
if script_source:
scripts_string += str(
'<script id="' + script.get('id') + '">' + script_source + '</script>')
else:
# No scripts so we just add a self closing element
scripts_string = '<script />'
# If we found defs we loop through them and add them to string which will be inserted in every document.
# Note that we use Scour later for each doc to remove unused defs.
if len(defs) > 0:
for element in defs:
defs_string += str(etree.tostring(element, method='html', pretty_print=False), 'utf-8')
else:
defs_string = '<defs/>'
metadata = self.document.xpath('//svg:svg/svg:metadata/rdf:RDF/cc:Work', namespaces=inkex.NSS)
metadata_items = {
'title': '',
'date': '',
'creator': '',
'rights': '',
'publisher': '',
'description': '',
'contributor': '',
'language': ''
}
if len(metadata[0]) > 0:
for element in metadata[0]:
# Copy the node to make sure that the input document is not mutated when we remove namespaces
element_copy = copy.deepcopy(element)
# Me being lazy. Flatten any metadata item to only include text
element_copy_text = element_copy.xpath(".//text()")
element_copy_text = ''.join(element_copy_text).strip() if len(element_copy_text) > 0 else ''
self.remove_namespace(element_copy, 'http://purl.org/dc/elements/1.1/')
self.remove_namespace(element_copy, 'http://www.w3.org/2000/svg')
metadata_items[element_copy.tag] = element_copy_text
element_copy = None
else:
pass
if metadata_items['title'] != '':
self.publication_title = metadata_items['title']
self.book.set_title(self.publication_title)
if metadata_items['description'] != '':
self.publication_desc = metadata_items['description']
# Add collected metadata to the book
for term, val in metadata_items.items():
if val != '':
self.book.add_metadata('DC', term, val)
self.book.add_metadata(None, 'meta', 'pre-paginated', {'property': 'rendition:layout'})
self.book.add_metadata(None, 'meta', 'auto', {'property': 'rendition:orientation'})
# All visible layers will be saved as FXL docs in the EPUB. Let's loop through them!
for element in self.visible_layers:
# Save all images to the epub package
self.save_images_to_epub(element, self.book)
element_label = str(element.get(inkex.utils.addNS('label', 'inkscape'), ''))
element_id = element.get('id').replace(' ', '_')
if element_label != '':
element.set('label', element_label)
element.set('class', element_label)
else:
pass
element_source = etree.tostring(element, pretty_print=True)
if element_source != '':
# Wrap the node in an SVG doc
tpl_result = str.replace(self.svg_src_template, '{{defs}}', defs_string)
tpl_result = str.replace(tpl_result, '{{scripts}}', scripts_string)
tpl_result = str.replace(tpl_result, '{{title}}', element_label)
tpl_result = str.replace(tpl_result, '{{viewport.width}}', str(self.svg_viewport_width))
tpl_result = str.replace(tpl_result, '{{viewport.height}}', str(self.svg_viewport_height))
tpl_result = str.replace(tpl_result, '{{document.width}}', str(self.svg_doc_width))
tpl_result = str.replace(tpl_result, '{{document.height}}', str(self.svg_doc_height))
tpl_result = str.replace(tpl_result, '{{element.source}}', str(element_source, 'utf-8'))
for font in font_families:
font_family = str.replace(font, ' ', '+')
font_family = str.replace(font_family, "'", '')
tpl_result = str.replace(tpl_result, font, font_family)
resource_path = os.path.join(self.root_folder, self.resources_folder)
font_file_name = self.find_file_fuzzy(font_family, resource_path)
if font_file_name is not None:
font_path = self.get_relative_resource_path(font_file_name)
font_tpl_result = str.replace(self.font_face_template, '{{font.family}}', font_family)
font_tpl_result = str.replace(font_tpl_result, '{{font.url}}', font_path)
font_faces_string = font_faces_string + font_tpl_result
else:
inkex.utils.debug('Could not find matching font file ' + font_family + ' in location ' + resource_path)
tpl_result = str.replace(tpl_result, '{{font-faces}}', font_faces_string)
font_faces_string = ''
tpl_result = self.scour_doc(tpl_result)
# TODO: Add processing instsruction to head of file
content_doc = etree.fromstring(tpl_result)
content_doc = etree.ElementTree(content_doc)
# If the result of the operation is valid, add the SVG source to the selected array
if tpl_result:
selected_layers.append({
'id': element_id,
'label': element_label,
'source': etree.tostring(content_doc, pretty_print=True),
'element': element
})
for layer in selected_layers:
# Cache these in local vars
content = layer['source']
label = layer['label'] or layer['id']
label = label.replace(' ', '_')
if content != '':
if self.wrap_svg_in_html:
doc = inx_epub.InxEpubHtml(uid=label, file_name=label + '.html', media_type='text/html',
content=content, width=self.svg_viewport_width, height=self.svg_viewport_height)
| |
numpy arrays)
# # (det_scores[i] has shape: (num_detections_i)) (num_detections_i <= num_guided_anchors_in_pc_i)
# (det_fs is a list of batch_size numpy arrays)
# # (det_fs[i] has shape: (num_detections_i))
# (det_bboxes is a list of batch_size numpy arrays)
# # (det_bboxes[i] has shape: (num_detections_i, 7))
# print (len(det_scores))
# print (det_scores[0].shape)
# print (len(det_bboxes))
# print (det_bboxes[0].shape)
# print (" ")
# print ("fs before refinement:")
# print (det_fs.detach().cpu().numpy())
# print ("bboxes before refinement:")
# print (det_bboxes[0])
# print ("%%%%%%%%%%%%%%%%%%%%%%")
# (end of "with torch.no_grad():"") ####################################
if self.test_cfg.extra.EBM_refine:
# (det_bboxes is a list of batch_size numpy arrays)
# # (det_bboxes[i] has shape: (num_detections_i, 7))
# (conv6 has shape: (batch_size, 256, 200, 176))
# (batch_size == 1)
# print (conv6.size())
bboxes = []
for i in range(len(det_bboxes)):
bboxes.append(torch.from_numpy(det_bboxes[i]).cuda())
# (bboxes is a list of batch_size tensors)
# # (bboxes[i] has shape: (num_detections_i, 7))
# print (len(bboxes))
# print (bboxes[0].size())
conv6.requires_grad = True
det_bboxes = []
for i in range(len(bboxes)):
# (conv6 has shape: (batch_size, 256, 200, 176))
# (bboxes[i] has shape: (num_detections_i, 7))
# print (conv6.size())
# print (bboxes[i].size())
bboxes_i = bboxes[i] # (shape: (num_detections_i, 7))
if bboxes_i.size(0) == 0:
det_bboxes.append(bboxes_i.detach().cpu().numpy())
continue
step_sizes = 0.0001*torch.ones(bboxes_i.size(0), 1).cuda() # (shape: (num_detections_i, 1))
print (self.test_cfg.extra.EBM_refine_steps)
for step in range(self.test_cfg.extra.EBM_refine_steps):
# print (step_sizes)
bboxes_init = bboxes_i.clone().detach() # (shape: (num_detections_i, 7))
bboxes_init.requires_grad = True
# print (bboxes_init[0])
# print (bboxes_init.size())
############################################################
############################################################
############################################################
############################################################
############################################################
(ys_pixel_xs, ys_pixel_ys) = self.gen_grid_fn(bboxes_init[:, [0, 1, 3, 4, 6]])
# (both have shape: (4, 7, num_detections_i))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_xs = ys_pixel_xs.permute(2, 0, 1).contiguous() # (shape: (num_detections_i, 4, 7))
ys_pixel_ys = ys_pixel_ys.permute(2, 0, 1).contiguous() # (shape: (num_detections_i, 4, 7))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_coords = torch.cat([ys_pixel_xs.unsqueeze(3), ys_pixel_ys.unsqueeze(3)], 3)
# (shape: (num_detections_i, 4, 7, 2))
# print (ys_pixel_coords.size())
#
conv6_i = conv6[i].unsqueeze(0) # (shape: (1, 256, 200, 176))
# print (conv6_i.size())
conv6_i_ys = conv6_i.expand(ys_pixel_coords.size(0), -1, -1, -1)
# (shape: (num_detections_i, 256, 200, 176))
# print (conv6_i_ys.size())
#
if conv6_i_ys.size(0) < 150:
ys_feature_maps = bilinear_interpolate_torch_gridsample(conv6_i_ys, ys_pixel_coords)
# (shape: (num_detections_i, 256, 4, 7))
# print (ys_feature_maps.size())
else:
num_iters = int(math.floor(conv6_i_ys.size(0)/150.0))
ys_feature_maps_list = []
for iter in range(num_iters):
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*iter):(150*(iter+1))], ys_pixel_coords[(150*iter):(150*(iter+1))]))
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*num_iters):], ys_pixel_coords[(150*num_iters):]))
ys_feature_maps = torch.cat(ys_feature_maps_list, 0)
# (shape: (num_detections_i, 256, 4, 7))
#
ys_features = ys_feature_maps.view(ys_feature_maps.size(0), -1)
# (shape: (num_detections_i, 7168)) (7168 = 256*4*7)
# print (ys_features.size())
#
features = F.relu(self.ebm_fc1(ys_features)) # (shape: (num_detections_i, 1024))
# print (features.size())
features = F.relu(self.ebm_fc2(features)) # (shape: (num_detections_i, 1024))
# print (features.size())
#
fs = self.ebm_fc3(features) # (shape: (num_detections_i, 1))
# print (fs.size())
fs = fs.squeeze(1) # (shape: (num_detections_i))
# print (fs.size())
# print (fs)
############################################################
############################################################
############################################################
############################################################
############################################################
# fs.backward(gradient = torch.ones_like(fs))
#
grad_bboxes_init = torch.autograd.grad(fs.sum(), bboxes_init, create_graph=True)[0]
# (shape: (num_detections_i, 7)) (same as bboxes_init)
# print (grad_bboxes_init.size())
# bboxes_refined = bboxes_init + 0.0001*bboxes_init.grad
#
bboxes_refined = bboxes_init + step_sizes*grad_bboxes_init
with torch.no_grad():
############################################################
############################################################
############################################################
############################################################
############################################################
(ys_pixel_xs, ys_pixel_ys) = self.gen_grid_fn(bboxes_refined[:, [0, 1, 3, 4, 6]])
# (both have shape: (4, 7, num_detections_i))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_xs = ys_pixel_xs.permute(2, 0, 1).contiguous() # (shape: (num_detections_i, 4, 7))
ys_pixel_ys = ys_pixel_ys.permute(2, 0, 1).contiguous() # (shape: (num_detections_i, 4, 7))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_coords = torch.cat([ys_pixel_xs.unsqueeze(3), ys_pixel_ys.unsqueeze(3)], 3)
# (shape: (num_detections_i, 4, 7, 2))
# print (ys_pixel_coords.size())
#
conv6_i = conv6[i].unsqueeze(0) # (shape: (1, 256, 200, 176))
# print (conv6_i.size())
conv6_i_ys = conv6_i.expand(ys_pixel_coords.size(0), -1, -1, -1)
# (shape: (num_detections_i, 256, 200, 176))
# print (conv6_i_ys.size())
#
if conv6_i_ys.size(0) < 150:
ys_feature_maps = bilinear_interpolate_torch_gridsample(conv6_i_ys, ys_pixel_coords)
# (shape: (num_detections_i, 256, 4, 7))
# print (ys_feature_maps.size())
else:
num_iters = int(math.floor(conv6_i_ys.size(0)/150.0))
ys_feature_maps_list = []
for iter in range(num_iters):
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*iter):(150*(iter+1))], ys_pixel_coords[(150*iter):(150*(iter+1))]))
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*num_iters):], ys_pixel_coords[(150*num_iters):]))
ys_feature_maps = torch.cat(ys_feature_maps_list, 0)
# (shape: (num_detections_i, 256, 4, 7))
#
ys_features = ys_feature_maps.view(ys_feature_maps.size(0), -1)
# (shape: (num_detections_i, 7168)) (7168 = 256*4*7)
# print (ys_features.size())
#
features = F.relu(self.ebm_fc1(ys_features)) # (shape: (num_detections_i, 1024))
# print (features.size())
features = F.relu(self.ebm_fc2(features)) # (shape: (num_detections_i, 1024))
# print (features.size())
#
new_fs = self.ebm_fc3(features) # (shape: (num_detections_i, 1))
# print (new_fs.size())
new_fs = new_fs.squeeze(1) # (shape: (num_detections_i))
# print (new_fs.size())
# print (new_fs)
############################################################
############################################################
############################################################
############################################################
############################################################
refinement_failed = (new_fs < fs) # (shape: (num_detections_i))
# print (refinement_failed)
# print (refinement_failed.size())
refinement_failed = refinement_failed.unsqueeze(1) # (shape: (num_detections_i, 1))
r_f = refinement_failed.float()
bboxes_i = r_f*bboxes_init + (1.0-r_f)*bboxes_refined
step_sizes = (1.0-r_f)*step_sizes + r_f*0.5*step_sizes
if step == self.test_cfg.extra.EBM_refine_steps - 1: # (in final step)
refinement_failed = (new_fs < fs) # (shape: (num_detections_i))
# print (refinement_failed)
# print (refinement_failed.size())
r_f = refinement_failed.float()
final_fs = r_f*fs + (1.0-r_f)*new_fs
# print ("###")
# print ("###")
# print ("###")
# print ("fs after refinement:")
# print (final_fs.detach().cpu().numpy())
# print ("***********************")
det_bboxes.append(bboxes_i.detach().cpu().numpy())
# print ("bboxes after refinement:")
# print (det_bboxes[0])
results = [kitti_bbox2results(*param) for param in zip(det_bboxes, det_scores, img_meta)]
print ("{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}")
return results
class SingleStageDetector20(BaseDetector, RPNTestMixin, BBoxTestMixin,
MaskTestMixin):
def __init__(self,
backbone,
neck=None,
bbox_head=None,
extra_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
full_cfg=None):
super(SingleStageDetector20, self).__init__()
self.backbone = builder.build_backbone(backbone)
if full_cfg is not None:
if full_cfg.SA_SSD_fixed:
for p in self.backbone.parameters():
p.requires_grad = False
if neck is not None:
self.neck = builder.build_neck(neck)
if full_cfg is not None:
if full_cfg.SA_SSD_fixed:
for p in self.neck.parameters():
p.requires_grad = False
else:
raise NotImplementedError
if bbox_head is not None:
self.rpn_head = builder.build_single_stage_head(bbox_head)
if full_cfg is not None:
if full_cfg.SA_SSD_fixed:
for p in self.rpn_head.parameters():
p.requires_grad = False
if extra_head is not None:
self.extra_head = builder.build_single_stage_head(extra_head)
self.grid_offsets = self.extra_head.grid_offsets
self.featmap_stride = self.extra_head.featmap_stride
self.gen_grid_fn = partial(gen_sample_grid, grid_offsets=self.grid_offsets, spatial_scale=(1.0/self.featmap_stride))
if full_cfg is not None:
if full_cfg.SA_SSD_fixed:
for p in self.extra_head.parameters():
p.requires_grad = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.full_cfg = full_cfg
self.num_samples = 128
self.stds = stds8
print (self.stds)
self.init_weights(pretrained)
if full_cfg is not None:
if full_cfg.USE_EBM:
self.ebm_fc1 = make_fc(7168+16+16, 1024)
self.ebm_fc2 = make_fc(1024, 1024)
self.ebm_fc3 = nn.Linear(1024, 1)
nn.init.normal_(self.ebm_fc3.weight, std=0.001)
for l in [self.ebm_fc3]:
nn.init.constant_(l.bias, 0)
self.z_fc1 = nn.Linear(1, 16)
self.z_fc2 = nn.Linear(16, 16)
self.h_fc1 = nn.Linear(1, 16)
self.h_fc2 = nn.Linear(16, 16)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
def merge_second_batch(self, batch_args):
ret = {}
for key, elems in batch_args.items():
if key in [
'voxels', 'num_points',
]:
ret[key] = torch.cat(elems, dim=0)
elif key == 'coordinates':
coors = []
for i, coor in enumerate(elems):
coor_pad = F.pad(
coor, [1, 0, 0, 0],
mode='constant',
value=i)
coors.append(coor_pad)
ret[key] = torch.cat(coors, dim=0)
elif key in [
'img_meta', 'gt_labels', 'gt_bboxes',
]:
ret[key] = elems
else:
ret[key] = torch.stack(elems, dim=0)
return ret
def forward_train(self, img, img_meta, **kwargs):
# (img has shape: (batch_size, 3, 384, 1248))
# (img_meta is a list of batch_size elements, example element: {'img_shape': (375, 1242, 3), 'sample_idx': 3132, 'calib': <mmdet.datasets.kitti_utils.Calibration object at 0x7fc3c16ad898>})
# (kwargs is a dict containing the keys "anchors", "voxels", "coordinates", "num_points", "anchors_mask", "gt_labels", "gt_bboxes")
# # (kwargs["anchors"] etc is a list of batch_size tensors)
# print (img.size())
# print (len(img_meta))
batch_size = len(img_meta)
ret = self.merge_second_batch(kwargs)
# (ret["voxels"] has shape: (num_voxels, 4)) (num_voxels is different for different examples) (for batch_size = 2, num_voxels is typically 35000 - 45000)
# (ret["coordinates"] has shape: (num_voxels, 4))
# print (ret["voxels"].size())
# print (ret["coordinates"].size())
vx = self.backbone(ret['voxels'], ret['num_points'])
# (vx has shape: (num_voxels, 4)) (vx is just identical to ret["voxels"]? seems so)
# print (vx.size())
(x, conv6), point_misc = self.neck(vx, ret['coordinates'], batch_size)
# (x has shape: (batch_size, 256, 200, 176))
# (conv6 has shape: (batch_size, 256, 200, 176))
# (point_misc is a list of 3 tensors)
# print (x.size())
# print (conv6.size())
losses = dict()
if not self.full_cfg.SA_SSD_fixed:
aux_loss = self.neck.aux_loss(*point_misc, gt_bboxes=ret['gt_bboxes'])
losses.update(aux_loss)
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
# (rpn_outs is a list of 3 elements)
# (rpn_outs[0] has shape: (batch_size, 200, 176, 14)) (14 = 7*num_anchor_per_loc) (x, y, z, h, w, l, theta)
# (rpn_outs[1] has shape: (batch_size, 200, 176, 2)) (2 = 1*num_anchor_per_loc) (conf_score) (just one class (Car))
# (rpn_outs[2] has shape: (batch_size, 200, 176, 4)) (4 = 2*num_anchor_per_loc) (classification of heading directon (forward or backward))
# print (len(rpn_outs))
# | |
'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861399529':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861454768':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'86145313':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861454766':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861454767':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861454764':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861454765':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861454762':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861454763':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861454760':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861454761':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861379508':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379509':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379502':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379503':{'en': 'Chaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u671d\u9633\u5e02')},
'861379500':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379501':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379506':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861379507':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861379504':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861379505':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861386548':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861386549':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861386544':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861386545':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861386546':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861386547':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861386540':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861386541':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861386542':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861386543':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861393469':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861393468':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861393465':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393464':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393467':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861393466':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393461':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393460':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393463':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861393462':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861454269':{'en': '<NAME>', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861380627':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861380626':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380625':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380624':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380623':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380622':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380621':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861380620':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86139924':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861458439':{'en': 'Pingdingshan, Henan', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861395708':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395709':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395706':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395707':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395704':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395705':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861395702':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861395703':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861395700':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861395701':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861458432':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'86139368':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'86139369':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861458433':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'86139364':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139365':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139366':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139367':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'86139360':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139361':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139362':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139363':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861458431':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861458436':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861458437':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861458434':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861458435':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861391287':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391286':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391285':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391284':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391283':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391282':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861380658':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861380659':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861380656':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861380657':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861380654':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861380655':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861380652':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861380653':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861380650':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861380651':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861384793':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861384792':{'en': 'Xilin, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9521\u6797\u90ed\u52d2\u76df')},
'861384791':{'en': 'Xilin, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9521\u6797\u90ed\u52d2\u76df')},
'861384790':{'en': 'Xilin, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9521\u6797\u90ed\u52d2\u76df')},
'861384797':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861384796':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861384795':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861384794':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861390482':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'861390483':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861384799':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861384798':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861390486':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861390487':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861390484':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861390485':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'861452633':{'en': 'Yancheng, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'86139588':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86139589':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861454298':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'86145054':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86139580':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'86139581':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'86139582':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'86139583':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'86139584':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'86139585':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'86139586':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'86139587':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861396552':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861396553':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861396550':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861396551':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861396556':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861396557':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861396554':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861396555':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861458586':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861458587':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861396558':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861396559':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861458582':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861458583':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861458580':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861458581':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861390730':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861390731':{'en': 'Changsha, Hunan', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861390732':{'en': 'Xiangtan, Hunan', 'zh': u('\u6e56\u5357\u7701\u6e58\u6f6d\u5e02')},
'861390733':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861390734':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861390735':{'en': 'Chenzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861390736':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'86138368':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861390738':{'en': 'Loudi, Hunan', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'86138366':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'86138365':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'86138364':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'86138363':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'86138362':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'86138361':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86138360':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452638':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861454811':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861388681':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861388680':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861388683':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861388682':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861388685':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861388684':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861388687':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861388686':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861388689':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861388688':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861453804':{'en': 'Nanchong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861453805':{'en': 'Nanchong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861453802':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861453803':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861453800':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861453801':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861458328':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861458329':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861458322':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861458323':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861458320':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861458321':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861458326':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861458327':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861458324':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861458325':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'86138989':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'86138988':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861388658':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'86138981':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'86138986':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'86138984':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861453535':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861392251':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861392250':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861392253':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861392252':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861392255':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861392254':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861392257':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861392256':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861392259':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861392258':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861379474':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861453682':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861454939':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861454938':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861454933':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861454932':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861454931':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861454930':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861399522':{'en': 'Shizuishan, Ningxia', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')},
'861454936':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861454935':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861454934':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861390257':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861390256':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861390255':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861390254':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861390253':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861390252':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861390251':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861390250':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861390259':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861390258':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861378901':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378900':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378903':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378902':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378905':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378904':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861378907':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861378906':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861378909':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861378908':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861454492':{'en': 'En<NAME>', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861379479':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861390039':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861390038':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861390033':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861390032':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861390031':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861390030':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390037':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861390036':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390035':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861390034':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861452188':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')},
'861452189':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')},
'861452184':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861452185':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')},
'861452186':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')},
'861452187':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')},
'861452180':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861452181':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861452182':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861452183':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861389875':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'861389874':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'861389877':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'861389876':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'861389871':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861389870':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861389873':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861389872':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861389879':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861389878':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861398644':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861398645':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861398646':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861398647':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861398640':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')},
'861398641':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')},
'861398642':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')},
'861398643':{'en': 'Suizhou, Hubei', | |
it if it does not exist'''
try:
with open(EVENTS_FILE) as f:
pass
except IOError:
with open(EVENTS_FILE, "w+") as f:
pass
with open(EVENTS_FILE, "r") as f:
lines = csv.reader(f, delimiter = ',')
ids, dates, names, statuses = [], [], [], []
try:
for line in lines:
repetitions = int(line[5])
# For recurring events, each repetition becomes an event in the memory:
for r in range(repetitions):
try:
ids.append(int(line[0]))
fr = line[6]
year = int(line[1]) + r*(fr=='y')
month = int(line[2]) + r*(fr=='m')
day = int(line[3]) + r*(fr=='d') + 7*r*(fr=='w')
event_date = calculate_recurring_events(year, month, day, fr)
dates.append(event_date)
names.append(line[4])
status = 'normal' if len(line) < 8 else line [7]
statuses.append(status)
except Exception:
pass
except Exception:
pass
events = {'id': ids, 'dates': dates, 'names': names, 'statuses': statuses}
return events
def add_event(stdscr, day, month, year, recurring):
'''Ask user to input new event and add it the file'''
y_max, x_max = stdscr.getmaxyx()
if day == None:
add_prompt = "Enter the date: "+str(year)+"/"+str(month)+"/"
event_date = int(user_input(stdscr, add_prompt, 2))
else:
event_date = day
# If user's date is the number and is in this month, ask the title:
days_of_this_month = range(1, calendar.monthrange(year, month)[1]+1)
if event_date in days_of_this_month:
title_prompt = "Enter the title: "
name = user_input(stdscr, title_prompt, x_max-len(title_prompt)-2)
if recurring:
rep_prompt = "How many times repeat the event: "
repetitions = user_input(stdscr, rep_prompt, 3)
freq_prompt = "Repeat the event every (d)ay, (w)eek, (m)onth or (y)ear?"
prompt = freq_prompt + " "*abs(x_max - len(freq_prompt) - 1)
stdscr.addstr(y_max-2, 0, prompt[:x_max-1], color_pair(8))
frequency = stdscr.getkey()
else:
repetitions = 1
frequency = "n"
events = load_events()
event_id = 1 if not events['id'] else max(events['id']) + 1
new_event = (str(event_id)+","+str(year)+","+str(month)+","+
str(event_date)+","+'"'+name+'"'+","+str(repetitions)+","+
str(frequency))
if len(name) > 0 and int(repetitions) >= 0 and frequency in ["d","w","m","y","n"]:
with open(EVENTS_FILE, "a") as f:
f.write(new_event+"\n")
def delete_event(stdscr, ids_this_month, names_this_month):
'''Delete chosen events'''
y_max, x_max = stdscr.getmaxyx()
prompt_string = "Number of event to delete: "
try:
# Ask which event to delete:
num = user_input(stdscr, prompt_string, 4)
if int(num) in range(1, len(ids_this_month)+1):
# Ask confirmation:
event_id = ids_this_month[int(num)-1]
event_name = names_this_month[int(num)-1]
prompt_string = "Really delete "+event_name+"? (y/n)"
confirmed = ask_confirmation(stdscr, prompt_string)
# Delete the event if it was confirmed:
if confirmed:
original_file = EVENTS_FILE
dummy_file = EVENTS_FILE + '.bak'
line_deleted = False
with open(original_file, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
for line in read_obj:
if line.startswith(str(event_id)+',') == False:
write_obj.write(line)
else:
line_deleted = True
if line_deleted:
os.remove(original_file)
os.rename(dummy_file, original_file)
else:
os.remove(dummy_file)
except:
pass
def mark_event_as_important(stdscr, ids_this_month, names_this_month, month, year):
'''Mark existing event as important'''
y_max, x_max = stdscr.getmaxyx()
prompt_string = "Mark as important event number: "
num = (user_input(stdscr, prompt_string, 4))
event_chosen = False
# If provided number is correct, then change the status:
try:
if int(num) in range(1, len(ids_this_month)+1):
recurring = True
event_id = ids_this_month[int(num)-1]
event_name = names_this_month[int(num)-1]
# Here we work with a dummy file and replace the original in the last moment:
original_file = EVENTS_FILE
dummy_file = EVENTS_FILE + '.bak'
line_deleted = False
with open(original_file, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
for line in read_obj:
if not line.startswith(str(event_id)+','):
write_obj.write(line)
else:
if "important" in line[-11:]:
line = re.sub(',important', '', line)
else:
line = line[:-1] + ',important\n'
write_obj.write(line)
line_edited = True
if line_edited:
os.remove(original_file)
os.rename(dummy_file, original_file)
else:
os.remove(dummy_file)
except:
pass
def edit_event(stdscr, ids_this_month, names_this_month, month, year):
'''Edit chosen event via deleting it and creating a new one'''
y_max, x_max = stdscr.getmaxyx()
prompt_string = "Number of event to edit: "
num = (user_input(stdscr, prompt_string, 4))
event_chosen = False
# If provided number is correct, then delete the event:
try:
if int(num) in range(1, len(ids_this_month)+1):
recurring = True
event_id = ids_this_month[int(num)-1]
event_name = names_this_month[int(num)-1]
prompt_string = "Really edit " + event_name + "? (y/n)"
confirmed = ask_confirmation(stdscr, prompt_string)
if confirmed:
original_file = EVENTS_FILE
dummy_file = EVENTS_FILE + '.bak'
line_deleted = False
with open(original_file, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
for line in read_obj:
if not line.startswith(str(event_id)+','):
write_obj.write(line)
else:
line_deleted = True
if line_deleted:
os.remove(original_file)
os.rename(dummy_file, original_file)
else:
os.remove(dummy_file)
event_chosen = True
except:
pass
if event_chosen:
# First, ask the date within this month:
add_prompt = "Enter new date: " + str(year) + "/" + str(month) + "/"
event_date = user_input(stdscr, add_prompt, 2)
# If user's date is the number and is in this month, ask the title:
try:
days_this_month = range(1, calendar.monthrange(year, month)[1]+1)
if int(event_date) in days_this_month:
title_prompt = "Enter new title: "
name = user_input(stdscr, title_prompt, x_max-len(title_prompt)-2)
rep_prompt = "How many times repeat the event: "
repetitions = user_input(stdscr, rep_prompt, 3)
freq_prompt = "Repeat the event every (d)ay, (w)eek, (m)onth or (y)ear?"
prompt = freq_prompt + " "*abs(x_max - len(freq_prompt) - 1)
stdscr.addstr(y_max-2, 0, prompt[:x_max-1], color_pair(8))
frequency = stdscr.getkey()
events = load_events()
event_id = 1 if not events['id'] else max(events['id'])+1
new_event = (str(event_id)+","+str(year)+","+str(month)+","+
event_date+","+'"'+name+'"'+","+str(repetitions)+","+str(frequency))
if len(name) > 0 and int(repetitions) >= 0 and frequency in ["d","w","m","y","n"]:
with open(EVENTS_FILE, "a") as f:
f.write(new_event+"\n")
except:
pass
def next_month(month, year):
'''Switches to the next month'''
if month < 12:
month += 1
else:
month = 1
year += 1
return month, year
def previous_month(month, year):
'''Switches to the previous month'''
if month > 1:
month -= 1
else:
month = 12
year -= 1
return month, year
def next_day(day, month, year):
'''Switch the daily veiw to the next day'''
days_in_this_month = calendar.monthrange(year, month)[1]
if day < days_in_this_month:
day += 1
else:
day = 1
if month < 12:
month += 1
else:
month = 1
year += 1
return day, month, year
def previous_day(day, month, year):
'''Switch the daity view to the previous day'''
if day > 1:
day -= 1
else:
if month > 1:
month -= 1
else:
month = 12
year -= 1
days_in_previous_month = calendar.monthrange(year, month)[1]
day = days_in_previous_month
return day, month, year
def user_input(stdscr, prompt_string, answer_length):
'''Ask user to input something and return this string'''
y_max, x_max = stdscr.getmaxyx()
echo()
curs_set(True)
display_string = str(prompt_string) + " "*abs((x_max-len(prompt_string))-1)
stdscr.addstr(y_max - 2, 0, display_string[:x_max-1], color_pair(8))
stdscr.refresh()
user_input = stdscr.getstr(y_max - 2, len(prompt_string), answer_length).decode(encoding="utf-8")
noecho()
curs_set(False)
return user_input
def user_input_for_tasks(stdscr, prompt_string, answer_length, task_number, subtask=False):
'''Ask user to input task at the location where the task is displayed'''
y_max, x_max = stdscr.getmaxyx()
echo()
curs_set(True)
display_string = str(prompt_string) + " "*abs((x_max-len(prompt_string))-1)
line_number = task_number - 1 + 2*SHOW_TITLE
stdscr.addstr(line_number, 2*subtask, display_string[:x_max-1], color_pair(8))
stdscr.refresh()
user_input = stdscr.getstr(line_number, len(prompt_string)+2*subtask, answer_length).decode(encoding="utf-8")
noecho()
curs_set(False)
return user_input
def display_day_names(stdscr, x_max):
'''Display day name depending on the screen available'''
if SHOW_DAY_NAMES:
num = 2 if x_max < 80 else 10
x_cell = int(x_max//7)
for i in range(7):
shift = START_WEEK_DAY-1
day_number = i+shift - 7*((i+shift) > 6)
name = calendar.day_name[day_number][:num].upper()
color = 1 if day_number < 5 else 6
try:
stdscr.addstr(1, i*x_cell, name, color_pair(color))
except:
pass
def display_icon(name, screen, selection_mode=False):
'''Check if event name contains a keyword and return corresponding icon'''
if not selection_mode and DISPLAY_ICONS:
icon = EVENT_ICON + " "
for keyword in ICONS:
if keyword in name.lower():
icon = ICONS[keyword] + " "
elif screen == "journal":
icon = "·"
else:
icon = ""
return icon
def ask_confirmation(stdscr, prompt_string):
'''Ask user confirmation for an action'''
y_max, x_max = stdscr.getmaxyx()
confirmed = True
if ASK_CONFIRMATIONS:
halfdelay(255)
prompt = prompt_string + " "*abs(x_max - len(prompt_string) - 1)
stdscr.addstr(y_max-2, 0, prompt[:x_max-1], color_pair(9))
key = stdscr.getkey()
confirmed = True if key == "y" else False
return confirmed
def initialize_colors(stdscr):
'''Define all the color pairs'''
start_color()
use_default_colors()
init_pair(1, COLOR_DAY_NAMES, COLOR_BACKGROUND)
init_pair(6, COLOR_WEEKEND_NAMES, COLOR_BACKGROUND)
init_pair(3, COLOR_HINTS, COLOR_BACKGROUND)
init_pair(7, COLOR_BIRTHDAYS, COLOR_BACKGROUND)
init_pair(8, COLOR_PROMPTS, COLOR_BACKGROUND)
init_pair(9, COLOR_CONFIRMATIONS, COLOR_BACKGROUND)
if MINIMAL_WEEKEND_INDICATOR:
init_pair(2, COLOR_WEEKENDS, COLOR_BACKGROUND)
else:
init_pair(2, COLOR_BLACK, COLOR_WEEKENDS)
if MINIMAL_TODAY_INDICATOR:
init_pair(4, COLOR_TODAY, COLOR_BACKGROUND)
else:
init_pair(4, COLOR_BLACK, COLOR_TODAY)
if MINIMAL_DAYS_INDICATOR:
init_pair(5, COLOR_DAYS, COLOR_BACKGROUND)
else:
init_pair(5, COLOR_BLACK, COLOR_DAYS)
init_pair(10, COLOR_TITLE, COLOR_BACKGROUND)
init_pair(11, COLOR_TODO, COLOR_BACKGROUND)
init_pair(12, COLOR_DONE, COLOR_BACKGROUND)
init_pair(13, COLOR_IMPORTANT, COLOR_BACKGROUND)
init_pair(14, COLOR_TIMER, COLOR_BACKGROUND)
init_pair(15, COLOR_TIMER_PAUSED, COLOR_BACKGROUND)
init_pair(16, COLOR_HOLIDAYS, COLOR_BACKGROUND)
init_pair(17, COLOR_EVENTS, COLOR_BACKGROUND)
init_pair(18, COLOR_TIME, COLOR_BACKGROUND)
init_pair(19, COLOR_WEATHER, COLOR_BACKGROUND)
def display_weather(stdscr, weather, month_year_string):
'''Load the weather at launch and display weather widget.'''
if SHOW_WEATHER:
_, x_max = stdscr.getmaxyx()
max_load_time = 2 # Time to wait in seconds
# If weather is not | |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from object_database.schema import ObjectFieldId, IndexId, FieldDefinition, ObjectId
from object_database.messages import ClientToServer, getHeartbeatInterval
from object_database.core_schema import core_schema
from object_database.view import View, Transaction, _cur_view, SerializedDatabaseValue
from object_database.reactor import Reactor
from object_database.identity import IdentityProducer
import object_database.keymapping as keymapping
from object_database._types import VersionedIdSet
from typed_python.SerializationContext import SerializationContext
from typed_python.Codebase import Codebase as TypedPythonCodebase
from typed_python import Alternative, Dict, OneOf, TupleOf
import threading
import logging
import traceback
import time
from object_database.view import DisconnectedException
class Everything:
"""Singleton to mark subscription to everything in a slice."""
TransactionResult = Alternative(
"TransactionResult",
Success={},
RevisionConflict={'key': OneOf(str, ObjectFieldId, IndexId)},
Disconnected={}
)
class VersionedBase:
def _best_version_offset_for(self, version):
i = len(self.version_numbers) - 1
while i >= 0:
if self.version_numbers[i] <= version:
return i
i -= 1
return None
def isEmpty(self):
return not self.version_numbers
def validVersionIncoming(self, version_read, transaction_id):
if not self.version_numbers:
return True
top = self.version_numbers[-1]
assert transaction_id > version_read
return version_read >= top
def hasVersionInfoNewerThan(self, tid):
if not self.version_numbers:
return False
return tid < self.version_numbers[-1]
def newestValue(self):
if self.version_numbers:
return self.valueForVersion(self.version_numbers[-1])
else:
return self.valueForVersion(None)
class VersionedValue(VersionedBase):
def __init__(self):
self.version_numbers = []
self.values = []
def setVersionedValue(self, version_number, val):
self.version_numbers.append(version_number)
self.values.append(val)
def valueForVersion(self, version):
i = self._best_version_offset_for(version)
if i is None:
return None
return self.values[i]
def wantsGuaranteedLowestIdMoveForward(self):
return len(self.version_numbers) != 1 or self.values[0].serializedByteRep is None
def moveGuaranteedLowestIdForward(self, version_number):
if not self.values:
return True
while self.values and self.version_numbers[0] < version_number:
if len(self.values) == 1:
if self.values[0].serializedByteRep is None:
# this value was deleted and we can just remove this whole entry
return True
else:
self.version_numbers[0] = version_number
else:
if self.version_numbers[1] <= version_number:
self.values.pop(0)
self.version_numbers.pop(0)
else:
self.version_numbers[0] = version_number
def __repr__(self):
return "VersionedValue(ids=%s)" % (self.version_numbers,)
class FrozenIdSet:
def __init__(self, idSet, transactionId):
self.idSet = idSet
self.transactionId = transactionId
def toSet(self):
return set(self)
def __iter__(self):
if self.idSet is None:
return
o = self.idSet.lookupFirst(self.transactionId)
while o >= 0:
yield o
o = self.idSet.lookupNext(self.transactionId, o)
def pickAny(self, toAvoid):
for objId in self:
if objId not in toAvoid:
return objId
class ManyVersionedObjects:
def __init__(self):
# for each version number we have outstanding
self._version_number_refcount = {}
self._min_reffed_version_number = None
# for each version number, the set of keys that are set with it
self._version_number_objects = {}
# for each key, a VersionedValue or VersionedIdSet
self._versioned_objects = {}
def keycount(self):
return len(self._versioned_objects)
def versionIncref(self, version_number):
if version_number not in self._version_number_refcount:
self._version_number_refcount[version_number] = 1
if self._min_reffed_version_number is None:
self._min_reffed_version_number = version_number
else:
self._min_reffed_version_number = min(version_number, self._min_reffed_version_number)
else:
self._version_number_refcount[version_number] += 1
def versionDecref(self, version_number):
assert version_number in self._version_number_refcount
self._version_number_refcount[version_number] -= 1
assert self._version_number_refcount[version_number] >= 0
if self._version_number_refcount[version_number] == 0:
del self._version_number_refcount[version_number]
if version_number == self._min_reffed_version_number:
if not self._version_number_refcount:
self._min_reffed_version_number = None
else:
self._min_reffed_version_number = min(self._version_number_refcount)
def setForVersion(self, key, version_number):
if key in self._versioned_objects:
return FrozenIdSet(self._versioned_objects[key], version_number)
return FrozenIdSet(None, version_number)
def hasDataForKey(self, key):
return key in self._versioned_objects
def valueForVersion(self, key, version_number):
return self._versioned_objects[key].valueForVersion(version_number)
def _object_has_version(self, key, version_number):
if version_number not in self._version_number_objects:
self._version_number_objects[version_number] = set()
self._version_number_objects[version_number].add(key)
def setVersionedValue(self, key, version_number, serialized_val):
self._object_has_version(key, version_number)
if key not in self._versioned_objects:
self._versioned_objects[key] = VersionedValue()
versioned = self._versioned_objects[key]
initialValue = versioned.newestValue()
versioned.setVersionedValue(version_number, SerializedDatabaseValue(serialized_val, {}))
return initialValue
def setVersionedAddsAndRemoves(self, key, version_number, adds, removes):
self._object_has_version(key, version_number)
if key not in self._versioned_objects:
self._versioned_objects[key] = VersionedIdSet()
if adds or removes:
self._versioned_objects[key].addTransaction(version_number, adds, removes)
def setVersionedTailValueStringified(self, key, serialized_val):
if key not in self._versioned_objects:
self._object_has_version(key, -1)
self._versioned_objects[key] = VersionedValue()
self._versioned_objects[key].setVersionedValue(-1, SerializedDatabaseValue(serialized_val, {}))
def updateVersionedAdds(self, key, version_number, adds):
self._object_has_version(key, version_number)
if key not in self._versioned_objects:
self._versioned_objects[key] = VersionedIdSet()
self._versioned_objects[key].addTransaction(version_number, adds, TupleOf(ObjectId)())
else:
self._versioned_objects[key].addTransaction(version_number, adds, TupleOf(ObjectId)())
def cleanup(self, curTransactionId):
"""Get rid of old objects we don't need to keep around and increase the min_transaction_id"""
if self._min_reffed_version_number is not None:
lowestId = min(self._min_reffed_version_number, curTransactionId)
else:
lowestId = curTransactionId
if self._version_number_objects:
while min(self._version_number_objects) < lowestId:
toCollapse = min(self._version_number_objects)
for key in self._version_number_objects[toCollapse]:
if key not in self._versioned_objects:
pass
elif self._versioned_objects[key].moveGuaranteedLowestIdForward(lowestId):
del self._versioned_objects[key]
else:
if self._versioned_objects[key].wantsGuaranteedLowestIdMoveForward():
self._object_has_version(key, lowestId)
del self._version_number_objects[toCollapse]
class DatabaseConnection:
def __init__(self, channel, connectionMetadata=None):
self._channel = channel
self._transaction_callbacks = {}
self._connectionMetadata = connectionMetadata or {}
self._lock = threading.Lock()
# transaction of what's in the KV store
self._cur_transaction_num = 0
# a datastructure that keeps track of all the different versions of the objects
# we have mapped in.
self._versioned_data = ManyVersionedObjects()
# a map from lazy object id to (schema, typename)
self._lazy_objects = {}
self._lazy_object_read_blocks = {}
self.initialized = threading.Event()
self.disconnected = threading.Event()
# for each schema name we've sent, an event that's triggered
# when the server has acknowledged the schema and given us a definition
self._schema_response_events = {}
self._fields_to_field_ids = Dict(FieldDefinition, int)()
self._field_id_to_field_def = Dict(int, FieldDefinition)()
self.connectionObject = None
# transaction handlers. These must be nonblocking since we call them under lock
self._onTransactionHandlers = set()
self._flushEvents = {}
# Map: schema.name -> schema
self._schemas = {}
self._messages_received = 0
self._pendingSubscriptions = {}
# if we have object-level subscriptions to a particular type (e.g. not everything)
# then, this is from (schema, typename) -> {object_id -> transaction_id} so that
# we can tell when the subscription should become valid. Subscriptions are permanent
# otherwise, if we're subscribed, it's 'Everything'
self._schema_and_typename_to_subscription_set = {}
# from (schema, typename, field_val) -> {'values', 'index_values', 'identities'}
self._subscription_buildup = {}
self._channel.setServerToClientHandler(self._onMessage)
self._flushIx = 0
self._largeSubscriptionHeartbeatDelay = 0
self.serializationContext = TypedPythonCodebase.coreSerializationContext().withoutCompression()
self._logger = logging.getLogger(__name__)
self._auth_token = None
def getConnectionMetadata(self):
"""Return any data provided to us by the underlying transport.
Returns:
A dictionary of extra metadata.
If we are a TCP-based connection, this will have the members:
'peername': the remote address to which the socket is connected,
result of socket.socket.getpeername() (None on error)
'socket': socket.socket instance
'sockname': the socket's own address, result of socket.socket.getsockname()
"""
return self._connectionMetadata
def registerOnTransactionHandler(self, handler):
with self._lock:
self._onTransactionHandlers.add(handler)
def dropTransactionHandler(self, handler):
with self._lock:
self._onTransactionHandlers.discard(handler)
def setSerializationContext(self, context):
assert isinstance(context, SerializationContext), context
self.serializationContext = context.withoutCompression()
return self
def serializeFromModule(self, module):
"""Give the project root we want to serialize from."""
self.setSerializationContext(
TypedPythonCodebase.FromRootlevelModule(module).serializationContext
)
def _stopHeartbeating(self):
self._channel._stopHeartbeating()
def disconnect(self):
self.disconnected.set()
self._channel.close()
def _noViewsOutstanding(self):
with self._lock:
return not self._versioned_data._version_number_refcount
def authenticate(self, token):
assert self._auth_token is None, "We already authenticated."
self._auth_token = token
self._channel.write(
ClientToServer.Authenticate(token=token)
)
def addSchema(self, schema):
schema.freeze()
with self._lock:
if schema.name not in self._schemas:
self._schemas[schema.name] = schema
schemaDesc = schema.toDefinition()
self._channel.write(
ClientToServer.DefineSchema(
name=schema.name,
definition=schemaDesc
)
)
self._schema_response_events[schema.name] = threading.Event()
e = self._schema_response_events[schema.name]
e.wait()
if self.disconnected.is_set():
raise DisconnectedException()
def flush(self):
"""Make sure we know all transactions that have happened up to this point."""
with self._lock:
if self.disconnected.is_set():
raise DisconnectedException()
self._flushIx += 1
ix = self._flushIx
e = self._flushEvents[ix] = threading.Event()
self._channel.write(ClientToServer.Flush(guid=ix))
e.wait()
if self.disconnected.is_set():
raise DisconnectedException()
def subscribeToObject(self, t):
self.subscribeToObjects([t])
def subscribeToObjects(self, objects):
for t in objects:
self.addSchema(type(t).__schema__)
self.subscribeMultiple([
(type(t).__schema__.name, type(t).__qualname__,
("_identity", keymapping.index_value_to_hash(t._identity, self.serializationContext)),
False)
for t in objects
])
def _lazinessForType(self, typeObj, desiredLaziness):
if desiredLaziness is not None:
return desiredLaziness
if hasattr(typeObj, '__object_database_lazy_subscription__'):
return True
return False
def subscribeToIndex(self, t, block=True, lazySubscription=None, **kwarg):
self.addSchema(t.__schema__)
toSubscribe = []
for fieldname, fieldvalue in kwarg.items():
toSubscribe.append((
t.__schema__.name,
t.__qualname__,
(fieldname, keymapping.index_value_to_hash(fieldvalue, self.serializationContext)),
self._lazinessForType(t, lazySubscription))
)
return self.subscribeMultiple(toSubscribe, block=block)
def subscribeToType(self, t, block=True, lazySubscription=None):
self.addSchema(t.__schema__)
if self._isTypeSubscribedAll(t):
return ()
return self.subscribeMultiple([(t.__schema__.name, t.__qualname__, None, self._lazinessForType(t, lazySubscription))], block)
def subscribeToNone(self, t, block=True):
self.addSchema(t.__schema__)
with self._lock:
self._schema_and_typename_to_subscription_set.setdefault(
(t.__schema__.name, t.__qualname__), set()
)
return ()
def subscribeToSchema(self, *schemas, block=True, lazySubscription=None, excluding=()):
for s in schemas:
self.addSchema(s)
unsubscribedTypes = []
for schema in schemas:
for tname, t in schema._types.items():
if not self._isTypeSubscribedAll(t) and t not in excluding:
unsubscribedTypes.append((schema.name, tname, None, self._lazinessForType(t, lazySubscription)))
if unsubscribedTypes:
return self.subscribeMultiple(unsubscribedTypes, block=block)
return ()
def isSubscribedToSchema(self, schema):
return all(self._isTypeSubscribed(t) for t in schema._types.values())
def isSubscribedToType(self, t):
return self._isTypeSubscribed(t)
def _isTypeSubscribed(self, t):
return (t.__schema__.name, t.__qualname__) in self._schema_and_typename_to_subscription_set
def _isTypeSubscribedAll(self, t):
return self._schema_and_typename_to_subscription_set.get((t.__schema__.name, t.__qualname__)) is Everything
def subscribeMultiple(self, subscriptionTuples, block=True):
with self._lock:
if self.disconnected.is_set():
raise DisconnectedException()
events = []
for tup in subscriptionTuples:
| |
accelerator.
FrameworkValueValuesEnum: Type of framework.
Fields:
acceleratorType: Type of device accelerator.
description: Description of the edge ml model. Max 8192 bytes.
framework: Type of framework.
inputTensors: List of input tensors.
inputTopics: List of input topics.
modelUri: Required. URI that points to ML model file of Google Cloud
Storage. Max 8192 bytes.
name: Required. Name of the ML model. It must be unique among the ML
models running on the same edge device. For example,
`projects/p1/locations/us-
central1/registries/registry0/devices/dev0/mlModels/m1`. The last token
of the name (For example `m1`) should be no more than 256 bytes, and
should start with a letter followed by up to letters, numbers, hyphens
or underscores, and cannot end with a hyphen or an underscore.
numTfliteThreads: Number of threads that are spawned by TF Lite.
outputTensors: List of output tensors.
outputTopics: List of output topics.
requestTimeout: Timeout for one request to this ML model. Default value is
1 minute if not specified or equals to 0.
samplingInfo: Sampling information for sampling and uploading ML inference
results.
updateTime: Output only. Last updated time of this ml model config. This
is assigned by Edge Manager API.
version: Output only. Version of this ML model config. This is assigned by
Edge Manager API.
"""
class AcceleratorTypeValueValuesEnum(_messages.Enum):
r"""Type of device accelerator.
Values:
ACCELERATOR_TYPE_UNSPECIFIED: Default value, used when its value
unspecified.
TPU: TPU, tensor processing unit.
GPU: GPU, graphics processing unit.
"""
ACCELERATOR_TYPE_UNSPECIFIED = 0
TPU = 1
GPU = 2
class FrameworkValueValuesEnum(_messages.Enum):
r"""Type of framework.
Values:
FRAMEWORK_UNSPECIFIED: Default value, used when its value unspecified.
TFLITE: TFlite framework.
SCIKIT_LEARN: Scikit-learn framework.
"""
FRAMEWORK_UNSPECIFIED = 0
TFLITE = 1
SCIKIT_LEARN = 2
acceleratorType = _messages.EnumField('AcceleratorTypeValueValuesEnum', 1)
description = _messages.StringField(2)
framework = _messages.EnumField('FrameworkValueValuesEnum', 3)
inputTensors = _messages.MessageField('TensorInfo', 4, repeated=True)
inputTopics = _messages.MessageField('TopicInfo', 5, repeated=True)
modelUri = _messages.StringField(6)
name = _messages.StringField(7)
numTfliteThreads = _messages.IntegerField(8, variant=_messages.Variant.INT32)
outputTensors = _messages.MessageField('TensorInfo', 9, repeated=True)
outputTopics = _messages.MessageField('TopicInfo', 10, repeated=True)
requestTimeout = _messages.StringField(11)
samplingInfo = _messages.MessageField('MlSamplingInfo', 12)
updateTime = _messages.StringField(13)
version = _messages.IntegerField(14)
class MlModelState(_messages.Message):
r"""Indicates the state of an ML model.
Enums:
StateValueValuesEnum: State of the ML model, For example, `RUNNING` or
`ERROR`.
Fields:
mlModel: Name of the ML model. For example, `projects/p1/locations/us-
central1/registries/registry0/devices/dev0/mlModels/m1`.
modelUri: URI that points to ML model file in Google Cloud Storage. This
value should be matched to the model_uri field of MlModel message.
state: State of the ML model, For example, `RUNNING` or `ERROR`.
version: Version number of deployed ML model config. This value should be
matched to the version of MlModel of the latest deployed config.
"""
class StateValueValuesEnum(_messages.Enum):
r"""State of the ML model, For example, `RUNNING` or `ERROR`.
Values:
JOB_STATE_UNSPECIFIED: Unspecified.
RUNNING: Job is running.
ERROR: An error occurred.
"""
JOB_STATE_UNSPECIFIED = 0
RUNNING = 1
ERROR = 2
mlModel = _messages.StringField(1)
modelUri = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
version = _messages.IntegerField(4)
class MlSamplingInfo(_messages.Message):
r"""Data used to control retraining process of this MlModel.
Messages:
ClassPolicyValue: The policies to specify sampling strategy for each
class.
Fields:
classPolicy: The policies to specify sampling strategy for each class.
datasetName: Required. The dataset name used in AutoML. Can contain
alphabets, digits and underscores. Max 1024 bytes.
defaultPolicy: The default policy for all classes.
keepSampleMax: The number of maximum samples on local storage.
default=10000.
labelFileUri: Required. The dataset label text file uri. Must start with
'gs://' and can contain up to 8192 bytes.
tag: Required. tag for sampled data used for uploading. Can contain
alphabets, digits and underscores. Max 1024 bytes.
targetUri: Required. The target uri where the sampled data will be
located. Must start with 'gs:// and can contain up to 8192 bytes.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ClassPolicyValue(_messages.Message):
r"""The policies to specify sampling strategy for each class.
Messages:
AdditionalProperty: An additional property for a ClassPolicyValue
object.
Fields:
additionalProperties: Additional properties of type ClassPolicyValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ClassPolicyValue object.
Fields:
key: Name of the additional property.
value: A MlSamplingPolicy attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('MlSamplingPolicy', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
classPolicy = _messages.MessageField('ClassPolicyValue', 1)
datasetName = _messages.StringField(2)
defaultPolicy = _messages.MessageField('MlSamplingPolicy', 3)
keepSampleMax = _messages.IntegerField(4, variant=_messages.Variant.INT32)
labelFileUri = _messages.StringField(5)
tag = _messages.StringField(6)
targetUri = _messages.StringField(7)
class MlSamplingPolicy(_messages.Message):
r"""Data used to control the sampling process of the ML inference results.
Fields:
samplingRate: The default rate to sample a target data. default=0.1 for
10% sampling.
timeDelaySec: The time delay between sampling in sec. default=0 for 0 sec.
"""
samplingRate = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
timeDelaySec = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class PortBinding(_messages.Message):
r"""Defines the publish information of the docker container.
Enums:
ProtocolValueValuesEnum: Protocol for communication. TCP will be used if
not specified.
Fields:
containerPortEnd: Required. The ending number of container port mapping
range. Should be between 1-65535, and equals to or larger than
container_port_start.
containerPortStart: Required. The starting number of container port
mapping range. All the ports between container_port_start and
container_port_end will be published to host. Should be between
1-65535.
hostIp: Should be ip address of host machine. For example, `127.0.0.1`. If
specified, host_port_start and host_port_end also should be specified.
hostPortEnd: The ending number of host port mapping range. Should be
between 1-65535. If specified, should be equals to or larger than
host_port_start. If not specified, host_port_start should not be
specified.
hostPortStart: The starting number of host port mapping range. Published
container ports will be mapped to the host ports in this range. If not
specified, host_port_end should not be specified, and container ports
will be mapped to randomly selected host ports. Should be between
1-65535.
protocol: Protocol for communication. TCP will be used if not specified.
"""
class ProtocolValueValuesEnum(_messages.Enum):
r"""Protocol for communication. TCP will be used if not specified.
Values:
PROTOCOL_UNSPECIFIED: Default value, used when its value unspecified.
TCP: TCP. Edge runtime will use TCP if unspecified.
UDP: UDP
SCTP: SCTP
"""
PROTOCOL_UNSPECIFIED = 0
TCP = 1
UDP = 2
SCTP = 3
containerPortEnd = _messages.IntegerField(1, variant=_messages.Variant.INT32)
containerPortStart = _messages.IntegerField(2, variant=_messages.Variant.INT32)
hostIp = _messages.StringField(3)
hostPortEnd = _messages.IntegerField(4, variant=_messages.Variant.INT32)
hostPortStart = _messages.IntegerField(5, variant=_messages.Variant.INT32)
protocol = _messages.EnumField('ProtocolValueValuesEnum', 6)
class Rule(_messages.Message):
r"""Defines rules between topics.
Enums:
DestinationDomainValueValuesEnum: Required. The destination of the
messages to be republished. Cannot be unspecified.
OperationValueValuesEnum: Required. Indicates which action will be
applied. If FORWARD, the messages will be imported from cloud to edge or
exported from edge to cloud. If REWRITE, the messages will be
republished within the edge device with new topic name, that is defined
in `rewrite_topic_name`. Cannot be unspecified.
SourceDomainValueValuesEnum: Required. The source of the messages to be
rewritten. Cannot be unspecified.
Fields:
destinationDomain: Required. The destination of the messages to be
republished. Cannot be unspecified.
operation: Required. Indicates which action will be applied. If FORWARD,
the messages will be imported from cloud to edge or exported from edge
to cloud. If REWRITE, the messages will be republished within the edge
device with new topic name, that is defined in `rewrite_topic_name`.
Cannot be unspecified.
rewriteTopicName: The new topic name to be rewritten if the operation is
REWRITE. Will be ignored if operation is FORWARD. Max 1024 bytes.
sourceDomain: Required. The source of the messages to be rewritten. Cannot
be unspecified.
sourceFilter: Required. Indicates the topic filter of the messages to
apply this rule. Max 1024 bytes.
"""
class DestinationDomainValueValuesEnum(_messages.Enum):
r"""Required. The destination of the messages to be republished. Cannot be
unspecified.
Values:
DOMAIN_UNSPECIFIED: Default value, used when its value unspecified.
CLOUD: Cloud
EDGE: Edge
"""
DOMAIN_UNSPECIFIED = 0
CLOUD = 1
EDGE = 2
class OperationValueValuesEnum(_messages.Enum):
r"""Required. Indicates which action will be applied. If FORWARD, the
messages will be imported from cloud to edge or exported from edge to
cloud. If REWRITE, the messages will be republished within the edge device
with new topic name, that is defined in `rewrite_topic_name`. Cannot be
unspecified.
Values:
OPERATION_UNSPECIFIED: Default value, used when its value unspecified.
FORWARD: Forward the topic without changing the topic.
REWRITE: Rewrite the message to the specified destination topic.
"""
OPERATION_UNSPECIFIED = 0
FORWARD = 1
REWRITE | |
<filename>rk/rk.py
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from errno import EACCES, ENOTDIR
from getpass import getuser
from json import dumps, load
from os import link, listdir, makedirs, remove, strerror
from os.path import dirname, exists, expanduser, isdir, isfile, join
from shutil import copyfile, rmtree
from subprocess import call
from sys import argv, exit
from configobj import ConfigObj
module_location = dirname(__file__)
config_rk_abs_path = join(module_location, "config/rk.ini")
config = ConfigObj(config_rk_abs_path)
argparse = {} # Strings for -h --help
messages = {} # Strings for output
def create_dictionaries():
"""Create "argparse" and "messages" dictionaries"""
config_argparse_rel_path = config["config_argparse_rel_path"]
config_argparse_abs_path = join(module_location, config_argparse_rel_path)
config_messages_rel_path = config["config_messages_rel_path"]
config_messages_abs_path = join(module_location, config_messages_rel_path)
with open(config_argparse_abs_path, 'r') as f:
argparse_list = f.read().splitlines()
for i in range(0, len(argparse_list), 2):
argparse[argparse_list[i]] = argparse_list[i+1]
with open(config_messages_abs_path, 'r') as f:
messages_list = f.read().splitlines()
for i in range(0, len(messages_list), 2):
messages[messages_list[i]] = messages_list[i+1]
def install_all(args):
"""Install all remote jupyter kernels from kernels dict"""
config_kernels_rel_path = config["config_kernels_rel_path"]
config_kernels_abs_path = join(module_location, config_kernels_rel_path)
# Load kernels.json file
with open(config_kernels_abs_path, 'r') as f:
kernels_dict = load(f)
# Create kernels list from kernels dict
kernels_list = [k for k in kernels_dict.keys()]
# Sort kernels list
kernels_list.sort()
# Install remote jupyter kernels
args.kernel_names = kernels_list
install_kernel(args)
def install_kernel(args):
"""Install remote jupyter kernel/kernels"""
def copy_logos(img_location, logo_name_srt, destination):
"""Copy logos"""
for size in ["32", "64"]:
logo_abs_path_str = join(join(module_location, img_location),
logo_name_srt)
logo_abs_path = logo_abs_path_str.format(size)
logo_name = logo_name_srt.format(size)
if exists(logo_abs_path) and isfile(logo_abs_path):
try:
link(logo_abs_path, join(destination, logo_name))
except Exception:
try:
copyfile(logo_abs_path, join(destination, logo_name))
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] %
strerror(error_code))
exit(1)
def create_directory(directory_name, mode=0o777):
"""Recursive directory creation function
os.chmod work only for last directory
"""
try:
makedirs(directory_name, mode)
except Exception as exception:
error_code = exception.errno
if error_code == EACCES: # 13 (Python3 PermissionError)
print(messages["_error_NoRoot"])
exit(1)
elif error_code == ENOTDIR: # 20 (Python3 NotADirectoryError)
path = directory_name
while path != '/':
if isfile(path):
try:
remove(path)
except Exception as exception: # Python3
# PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] %
strerror(error_code))
exit(1)
path = dirname(path)
try:
makedirs(directory_name, mode)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
def create_kernel_json_file(display_name, language, script, interpreter,
connection_file, remote_host, destination):
"""Create kernel.json file"""
kernel_dict = {"argv": [], "display_name": display_name,
"language": language}
kernel_dict["argv"].append(script)
kernel_dict["argv"].append(interpreter)
kernel_dict["argv"].append(connection_file)
kernel_dict["argv"].append(remote_host)
try:
with open(join(destination, "kernel.json"), 'w') as f:
f.write(dumps(kernel_dict, indent=1, sort_keys=True))
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
kernels_location = config["kernels_location"]
if '~' in kernels_location:
kernels_location = expanduser(kernels_location)
img_location = config["img_location"]
logo_name_srt = config["logo_name_srt"]
script = config["script"]
connection_file = config["connection_file"]
config_kernels_rel_path = config["config_kernels_rel_path"]
config_kernels_abs_path = join(module_location,
config_kernels_rel_path)
kernel_names = args.kernel_names
if kernel_names == None:
# Install template of remote kernel
kernel_name = config["kernel_name"]
display_name = config["display_name"]
language = config["language"]
interpreter = config["interpreter"]
remote_host = config["remote_host"]
kernel_abs_path = join(kernels_location, kernel_name)
if exists(kernel_abs_path) and isfile(kernel_abs_path):
try:
remove(kernel_abs_path)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
if not exists(kernel_abs_path):
# Create directory
create_directory(kernel_abs_path, 0o755)
# Copy logos
copy_logos(img_location, logo_name_srt, kernel_abs_path)
# Create kernel.json
create_kernel_json_file(display_name, language, script,
interpreter, connection_file,
remote_host, kernel_abs_path)
print(messages["_installed_template"])
else:
print(messages["_delete_template"])
answer = raw_input()
answer_lower = answer.lower()
if ((answer_lower == 'y') or (answer_lower == 'yes') or
(answer_lower == 'yep')):
uninstall_kernel(args)
install_kernel(args)
else:
# Install kernel/kernels
# Load kernels.json file
with open(config_kernels_abs_path, 'r') as f:
kernels_dict = load(f)
# Check kernel_names list/
no_kernel_names = []
for kernel_name in kernel_names:
if kernel_name not in kernels_dict:
no_kernel_names.append(kernel_name)
if len(no_kernel_names) != 0:
if len(no_kernel_names) == 1:
print(messages["_error_NoKernel"] % no_kernel_names[0])
else:
print(messages["_error_NoKernels"] %
'\' \''.join(no_kernel_names))
exit(1)
# /Check kernel_names list
for kernel_name in kernel_names:
display_name = kernels_dict[kernel_name]["display_name"]
language = kernels_dict[kernel_name]["language"]
interpreter = kernels_dict[kernel_name]["interpreter"]
remote_host = kernels_dict[kernel_name]["remote_host"]
kernel_abs_path = join(kernels_location, kernel_name)
if exists(kernel_abs_path) and isfile(kernel_abs_path):
try:
remove(kernel_abs_path)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
if not exists(kernel_abs_path):
# Create directory
create_directory(kernel_abs_path, 0o755)
# Copy logos
copy_logos(img_location, logo_name_srt, kernel_abs_path)
# Create kernel.json
create_kernel_json_file(display_name, language, script,
interpreter, connection_file,
remote_host, kernel_abs_path)
print(messages["_installed"] % kernel_name)
else:
print(messages["_delete"] % kernel_name)
answer = raw_input()
answer_lower = answer.lower()
if ((answer_lower == 'y') or (answer_lower == 'yes') or
(answer_lower == 'yep')):
args.kernel_names = [kernel_name]
uninstall_kernel(args)
install_kernel(args)
def main():
"""Main function"""
create_dictionaries()
args = parse_command_line_args()
args.function_name(args)
def parse_command_line_args():
"""Parse command line arguments"""
# Create top parser
parser = ArgumentParser(prog="rk", description=argparse["_parser"],
add_help=True)
parser.add_argument("-v", "--version", action="version",
version="rk 0.3b1")
# Create subparsers for the top parser
subparsers = parser.add_subparsers(title=argparse["_subparsers"])
# Create the parser for the "list" subcommand
parser_list = subparsers.add_parser("list",
description=argparse["_parser_list"],
help=argparse["_parser_list"])
parser_list.set_defaults(function_name=show_kernels_list)
# Create the parser for the "install" subcommand
parser_install = subparsers.add_parser("install",
description=argparse["_parser_install"],
help=argparse["_parser_install"])
parser_install.add_argument("kernel_names", action="store", nargs='+',
metavar="KERNEL_NAME")
parser_install.set_defaults(function_name=install_kernel)
# Create the parser for the "install-template" subcommand
parser_install_template = subparsers.add_parser("install-template",
description=argparse["_parser_install_template"],
help=argparse["_parser_install_template"])
parser_install_template.set_defaults(function_name=install_kernel,
kernel_names=None)
# Create the parser for the "install-all" subcommand
parser_install_all = subparsers.add_parser("install-all",
description=argparse["_parser_install_all"],
help=argparse["_parser_install_all"])
parser_install_all.set_defaults(function_name=install_all)
# Create the parser for the "uninstall" subcommand
parser_uninstall= subparsers.add_parser("uninstall",
description=argparse["_parser_uninstall"],
help=argparse["_parser_uninstall"])
parser_uninstall.add_argument("kernel_names", action="store", nargs='+',
metavar="KERNEL_NAME")
parser_uninstall.set_defaults(function_name=uninstall_kernel)
# Create the parser for the "uninstall-template" subcommand
parser_uninstall_template = subparsers.add_parser("uninstall-template",
description=argparse["_parser_uninstall_template"],
help=argparse["_parser_uninstall_template"])
parser_uninstall_template.set_defaults(function_name=uninstall_kernel,
kernel_names=None)
# Create the parser for the "uninstall-all" subcommand
parser_uninstall_all = subparsers.add_parser("uninstall-all",
description=argparse["_parser_uninstall_all"],
help=argparse["_parser_uninstall_all"])
parser_uninstall_all.set_defaults(function_name=uninstall_all)
# Create the parser for the "ssh" subcommand
parser_list = subparsers.add_parser("ssh",
description=argparse["_parser_ssh"],
help=argparse["_parser_ssh"])
parser_list.set_defaults(function_name=setup_ssh_auto_login)
if len(argv) == 1:
parser.print_help()
exit(0) # Clean exit without any errors/problems
return parser.parse_args()
def setup_ssh_auto_login(args):
"""Setup SSH for auto login without a password"""
keys_location = "~/.ssh"
pri_key_paths = ["~/.ssh/id_dsa", "~/.ssh/id_ecdsa", "~/.ssh/id_ed25519",
"~/.ssh/id_rsa"]
# Check current keys
total_keys_flag = False
pri_key_flag = False
pub_key_flag = False
for pri_key_path in pri_key_paths:
pri_key_abs_path = expanduser(pri_key_path)
if exists(pri_key_abs_path) and isfile(pri_key_abs_path):
pri_key_flag = True
pub_key_abs_path = pri_key_abs_path + ".pub"
if exists(pub_key_abs_path) and isfile(pub_key_abs_path):
pub_key_flag = True
if (pri_key_flag == True) and (pub_key_flag == True):
total_keys_flag = True
break
else:
pri_key_flag = False
pub_key_flag = False
if total_keys_flag == False:
# Check keys dir
keys_dir = expanduser(keys_location)
if not exists(keys_dir):
# Create keys dir
makedirs(keys_dir)
# Create a public and a private keys using the ssh-keygen command
call("ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/id_rsa", shell=True)
# Ask about a remote machine
print(messages["_ask_remote_host"])
remote_username_at_remote_host = raw_input()
if '@' in remote_username_at_remote_host:
l_username = getuser()
r_username, r_host = remote_username_at_remote_host.split('@')
if l_username == r_username:
# Local username is the same as a remote username
remote_username_at_remote_host = r_host
# Copy a public key to a remote machine using the ssh-copy-id command
call("ssh-copy-id %s" % remote_username_at_remote_host, shell=True)
# Ensure ssh-agent is enabled
call("eval \"$(ssh-agent -s)\"", shell=True)
# Adds private key identities to the authentication agent
call("ssh-add ~/.ssh/id_rsa", shell=True)
def show_kernels_list(args):
"""Show list of remote jupyter kernels from kernels dict"""
config_kernels_rel_path = config["config_kernels_rel_path"]
config_kernels_abs_path = join(module_location, config_kernels_rel_path)
# Load kernels.json file
with open(config_kernels_abs_path, 'r') as f:
kernels_dict = load(f)
# Create kernels list from kernels dict
kernels_list = [k for k in kernels_dict.keys()]
# Sort kernels list
kernels_list.sort()
# Print kernels list
for kernel in kernels_list:
print("%s (display name: \"%s\")" % (kernel,
kernels_dict[kernel]["display_name"]))
def uninstall_all(args):
"""Uninstall all jupyter kernels from kernels location"""
kernels_location = config["kernels_location"]
if '~' in kernels_location:
kernels_location = expanduser(kernels_location)
kernel_names = []
for element in listdir(kernels_location):
element_abs_path = join(kernels_location, element)
if isdir(element_abs_path):
try:
rmtree(element_abs_path)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
kernel_names.append(element)
kernel_names.sort()
if len(kernel_names) == 0:
print(messages["_uninstalled_all_zero"])
elif len(kernel_names) == 1:
print(messages["_uninstalled_all"] % kernel_names[0])
else:
print(messages["_uninstalled_all_multiple"] %
'\' \''.join(kernel_names))
def uninstall_kernel(args):
"""Uninstall remote jupyter kernel/kernels"""
kernels_location = config["kernels_location"]
if '~' in kernels_location:
kernels_location = expanduser(kernels_location)
kernel_names = args.kernel_names
if kernel_names == None:
# Uninstall template of remote kernel
kernel_name = config["kernel_name"]
kernel_abs_path = join(kernels_location, kernel_name)
if exists(kernel_abs_path):
if isdir(kernel_abs_path):
try:
rmtree(kernel_abs_path)
except Exception as exception: # Python3 PermissionError
error_code = exception.errno
if error_code == EACCES: # 13
print(messages["_error_NoRoot"])
exit(1)
else:
print(messages["_error_Oops"] % strerror(error_code))
exit(1)
elif isfile(kernel_abs_path):
try:
remove(kernel_abs_path)
except Exception as exception: # | |
<reponame>aws-solutions/serverless-transit-network-orchestrator
# !/bin/python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""State Machine Handler module"""
from os import environ
import inspect
from time import sleep
from datetime import datetime, timedelta
from state_machine.lib.organizations import Organizations
import json
from secrets import choice
from state_machine.utils.metrics import Metrics
from state_machine.lib.ec2 import EC2
from state_machine.lib.dynamodb import DDB
from state_machine.lib.ram import RAM
from state_machine.lib.sts import STS
from state_machine.lib.sns import SNS
import logging
from state_machine.lib.cloud_watch_logs import CloudWatchLogs
from state_machine.utils.helper import timestamp_message, current_time
from state_machine.utils.string_manipulation import convert_string_to_list
from state_machine.lib.assume_role_helper import AssumeRole
from state_machine.lib.exceptions import (
ResourceNotFoundException,
AttachmentCreationInProgressException,
AlreadyConfiguredException,
ResourceBusyException,
RouteTableNotFoundException,
)
CLASS_EVENT = " Class Event"
EXECUTING = "Executing: "
TGW_VPC_ERROR = "The TGW-VPC Attachment is not in 'available'"
class TransitGateway:
"""
This class contains functions to manage Transit Gateway related resources.
"""
def __init__(self, event):
self.event = event
self.logger = logging.getLogger(__name__)
self.spoke_account_id = self.event.get("account")
self.spoke_region = self.event.get("region")
self.assume_role = AssumeRole()
self.logger.info(self.__class__.__name__ + CLASS_EVENT)
self.logger.info(event)
def _session(self, account_id):
# instantiate EC2 sessions
return EC2(
credentials=self.assume_role(account_id),
)
def _print(self, description, response):
self.logger.info(f"Printing {description}")
self.logger.info(response)
def _message(self, method, e):
return {
"FILE": __file__.split("/")[-1],
"CLASS": self.__class__.__name__,
"METHOD": method,
"EXCEPTION": str(e),
}
def _create_tag(self, resource, key, message):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
ec2.create_tags(
resource, "STNOStatus-" + key, timestamp_message(message)
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
def get_transit_gateway_vpc_attachment_state(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# skip checking the TGW attachment status if it does not exist
if self.event.get("TgwAttachmentExist").lower() == "yes":
ec2 = self._session(self.spoke_account_id)
response = ec2.get_transit_gateway_vpc_attachment_state(
self.event.get("TransitGatewayAttachmentId")
)
self._print("Transit Gateway Attachment State: ", response)
# the list should always contain a single item
self.event.update({"AttachmentState": response[0].get("State")})
if (
response[0].get("State") == "pending"
or response[0].get("State") == "modifying"
):
# if the tgw attachment stage is modifying and multiple state machine executions are in progress
# sleeping for random number of seconds to avoid race condition failure.
_seconds = choice(range(5, 10))
sleep(_seconds)
else:
# set attachment state to deleted because it does not exist
# and creation was skipped in the CRUD operation stage.
# The attachment was previously deleted or was never created.
self.logger.info("TGW Attachment does not exist.")
self.event.update({"AttachmentState": "deleted"})
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def describe_transit_gateway_vpc_attachments(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
states = ["available", "pending", "modifying"]
response = ec2.describe_transit_gateway_vpc_attachments(
environ.get("TGW_ID"), self.event.get("VpcId"), states
)
self._print("Transit Gateway Attachment List", response)
if response:
self.event.update({"TgwAttachmentExist": "yes"})
# check if the subnet is already in the TGW VPC Attachment
for attachment in response:
if attachment.get("VpcId") == self.event.get("VpcId"):
# add TGW Attachment Id in the event for modifications in the state machine
self.event.update(
{
"TransitGatewayAttachmentId": attachment.get(
"TransitGatewayAttachmentId"
)
}
)
self.event.update(
{"AttachmentState": attachment.get("State")}
)
# look for subnet id in existing attachment
if self.event.get("SubnetId") in attachment.get(
"SubnetIds"
):
self._print(
"subnet found in existing attachment",
self.event.get("SubnetId"),
)
self.event.update(
{"FoundExistingSubnetInAttachment": "yes"}
)
else:
self._print(
"subnet list for existing TGW-VPC attachment",
attachment.get("SubnetIds"),
)
self.event.update(
{"FoundExistingSubnetInAttachment": "no"}
)
else:
self.event.update({"TgwAttachmentExist": "no"})
self.event.update({"AttachmentState": "does-not-exist"})
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _create_tgw_attachment(self, ec2):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
self.logger.info(
"Creating TGW Attachment with Subnet ID: {}".format(
self.event.get("SubnetId")
)
)
response = ec2.create_transit_gateway_vpc_attachment(
environ.get("TGW_ID"),
self.event.get("VpcId"),
self.event.get("SubnetId"),
)
self._print("Create Transit Gateway Attachment Response", response)
self.event.update(
{
"AttachmentState": response.get(
"TransitGatewayVpcAttachment", {}
).get("State")
}
)
self.event.update(
{
"TransitGatewayAttachmentId": response.get(
"TransitGatewayVpcAttachment", {}
).get("TransitGatewayAttachmentId")
}
)
self.event.update({"Action": "CreateTgwVpcAttachment"})
self.event.update({"TgwAttachmentExist": "yes"})
except Exception as e:
try:
error_code = e.response["Error"]["Code"]
except Exception:
error_code = ""
# If there is another step function execution happening in parallel that is creating
# an attachment, we'd get a DuplicateTransitGatewayAttachment error (code ResourceNotFoundException).
# Raise a specific exception so that the step function can try again:
if error_code == "DuplicateTransitGatewayAttachment":
raise AttachmentCreationInProgressException(e)
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _delete_tgw_attachment(self, ec2):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# if this exception is thrown then it is safe to delete transit gateway attachment
delete_response = ec2.delete_transit_gateway_vpc_attachment(
self.event.get("TransitGatewayAttachmentId")
)
self._print(
"Delete Transit Gateway Attachment Response", delete_response
)
self.event.update(
{
"AttachmentState": delete_response.get(
"TransitGatewayVpcAttachment", {}
).get("State")
}
)
# during this step the associations and propagation are also removed.
self._create_tag(
self.event.get("VpcId"),
"VPCAttachment",
"VPC has been detached from the Transit Gateway",
)
self._create_tag(
self.event.get("VpcId"),
"VPCAssociation",
"VPC has been dissociated with the Transit Gateway Routing Table/Domain",
)
self._create_tag(
self.event.get("VpcId"),
"VPCPropagation",
"VPC RT propagation has been disabled from the "
"Transit Gateway Routing Table/Domain",
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _add_subnet_to_tgw_attachment(self, ec2):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
self.logger.info(
"Add Subnet: {} to Tgw Attachment".format(
self.event.get("SubnetId")
)
)
self.event.update({"Action": "AddSubnet"})
response = ec2.add_subnet_to_tgw_attachment(
self.event.get("TransitGatewayAttachmentId"),
self.event.get("SubnetId"),
)
if response.get("Error") == "IncorrectState":
raise ResourceBusyException
elif response.get("Error") == "DuplicateSubnetsInSameZone":
self.event.update({"Status": "auto-rejected"})
comment = "DuplicateSubnetsInSameZoneError: In a TGW VPC attchment, you can add only one subnet per Availability Zone."
self.event.update({"Comment": comment})
self._create_tag(self.event.get("SubnetId"), "Subnet", comment)
else:
self._print(
"Modify (Add Subnet) Transit Gateway Attachment Response",
response,
)
self.event.update(
{
"AttachmentState": response.get(
"TransitGatewayVpcAttachment", {}
).get("State")
}
)
self._create_tag(
self.event.get("SubnetId"),
"Subnet",
"Subnet appended to the TGW attachment.",
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _remove_subnet_from_tgw_attachment(self, ec2):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
self.logger.info(
"Remove Subnet: {} from Tgw Attachment".format(
self.event.get("SubnetId")
)
)
self.event.update({"Action": "RemoveSubnet"})
response = ec2.remove_subnet_from_tgw_attachment(
self.event.get("TransitGatewayAttachmentId"),
self.event.get("SubnetId"),
)
if response.get("Error") == "IncorrectState":
raise ResourceBusyException
# this exception is caught if the last subnet in the attachment is being deleted
elif response.get("Error") == "InsufficientSubnetsException":
self.logger.info(
"Insufficient Subnets when calling the ModifyTransitGatewayVpcAttachment operation, "
"This is the last subnet in the TGW-VPC Attachment. Deleting TGW Attachment..."
)
self.event.update({"Action": "DeleteTgwVpcAttachment"})
self._delete_tgw_attachment(ec2)
else:
self._print(
"Modify (Remove Subnet) Transit Gateway Attachment Response",
response,
)
self.event.update(
{
"AttachmentState": response.get(
"TransitGatewayVpcAttachment", {}
).get("State")
}
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def tgw_attachment_crud_operations(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
# create attachment if TGW Attachment does not exist and Subnet tag exists
if (
self.event.get("TgwAttachmentExist") == "no"
and self.event.get("SubnetTagFound") == "yes"
):
self._create_tgw_attachment(ec2)
self._create_tag(
self.event.get("SubnetId"),
"Subnet",
"Subnet added to the TGW attachment.",
)
self._create_tag(
self.event.get("VpcId"),
"VPCAttachment",
"VPC has been attached to the Transit Gateway",
)
# update - add subnet to attachment
if (
self.event.get("FoundExistingSubnetInAttachment") == "no"
and self.event.get("SubnetTagFound") == "yes"
):
self._add_subnet_to_tgw_attachment(ec2)
# update - remove subnet from attachment
# OR
# delete - if only one subnet left in attachment
elif (
self.event.get("FoundExistingSubnetInAttachment") == "yes"
and self.event.get("SubnetTagFound") == "no"
):
self._remove_subnet_from_tgw_attachment(ec2)
self._create_tag(
self.event.get("SubnetId"),
"Subnet",
"Subnet removed from the TGW attachment.",
)
else:
self.logger.info("No action performed.")
# find existing TGW route table association to support update action
self._find_existing_tgw_rt_association(
ec2, self.event.get("RouteTableList")
)
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _extract_tgw_route_table_names(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# look for defined tag keys in the event
associate_with, propagate_to = None, None
for key, value in self.event.items():
if (
key.lower().strip()
== environ.get("ASSOCIATION_TAG").lower().strip()
):
self.logger.info(
"Key matched {}:".format(
environ.get("ASSOCIATION_TAG").lower().strip()
)
)
self.logger.info("{} : {}".format(key, value))
associate_with = value.lower().strip()
elif (
key.lower().strip()
== environ.get("PROPAGATION_TAG").lower().strip()
):
self.logger.info(
"Key matched {}:".format(
environ.get("PROPAGATION_TAG").lower().strip()
)
)
self.logger.info("{} : {}".format(key, value))
propagate_to = [x.lower().strip() for x in value]
return associate_with, propagate_to
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def describe_transit_gateway_route_tables(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = EC2()
# describe tgw route tables for the provided TGW ID
response = ec2.describe_transit_gateway_route_tables(
environ.get("TGW_ID")
)
self._print("Transit Gateway Route Tables", response)
# returns a tuple (string, list)
(
associate_with_table,
propagate_to_tables,
) = self._extract_tgw_route_table_names()
self.logger.info(
"Table Names in the association: {} | propagation: {}".format(
associate_with_table, propagate_to_tables
)
)
# extract route table ids
rtb_list = self._extract_route_table_ids(
associate_with_table, propagate_to_tables, response
)
self.event.update({"RouteTableList": rtb_list})
# find existing TGW route table association to support update action
# needed for 'Association changed?' choice
self._find_existing_tgw_rt_association(
ec2, self.event.get("RouteTableList")
)
# find existing TGW route table propagations
self.get_transit_gateway_attachment_propagations()
# set approval flag
self._set_approval_flag(response)
# set status based on | |
required. Alternatively where more response
headers are required setting ResponseDatesAsStrings to True will suppress
conversion of LastModified, Expires etc. to datetime, instead returning
them as strings, which canimprove performance if datetime representation
of those fields is not required.
"""
"""
response = await self.api_request("GetObject", "GET", f"/{Bucket}/{Key}")
if response.status == 200:
obj = {}
if not MinimiseResponse:
headers = response.headers
# Map API header keys to SDK response dict keys using a table for speed.
for key, value in headers.items():
key = self.header_mapping.get(key.lower())
if not key:
pass
elif isinstance(key, str):
obj[key] = value
elif key[1] == int or key[1] == float:
obj[key[0]] = key[1](value)
elif key[1] == bool:
value = value.lower()
value = True if value == "true" else False
obj[key[0]] = value
elif key[1] == datetime:
if ResponseDatesAsStrings:
obj[key[0]] = value
else:
Oddly the LastModified and Date values returned as headers
in GetObject are in "%a, %d %b %Y %H:%M:%S %Z" format
whereas LastModified in ListObjectsV2 XML is actually in
rfc3339. Try the observed format and fall back to rfc3339
in case this is just a quirk with minio.
try:
time = datetime.strptime(value, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
time = parse_rfc3339_datetime(value)
obj[key[0]] = time
obj["Body"] = StreamingBody(response)
return obj
"""
async def create_bucket(self, Bucket=None):
redis = await self.session()
if await redis.sismember("s3://", Bucket):
code = "BucketAlreadyOwnedByYou"
message = "Your previous request to create the named bucket succeeded and you already own it."
raise self.exceptions.from_code(code)(code, "CreateBucket", message)
await redis.sadd("s3://", Bucket) # Add the "Bucket"
async def delete_bucket(self, Bucket=None):
redis = await self.session()
if not await redis.sismember("s3://", Bucket):
code = "NoSuchBucket"
message = "The specified bucket does not exist."
raise self.exceptions.from_code(code)(code, "DeleteBucket", message)
bucket_has_objects = False
cursor, iterable = await redis.scan(cursor=0, match=f"{Bucket}/*")
for key in iterable:
bucket_has_objects = True
break
if bucket_has_objects:
code = "BucketNotEmpty"
message = "The bucket you tried to delete is not empty."
raise self.exceptions.from_code(code)(code, "DeleteBucket", message)
await redis.srem("s3://", Bucket) # Remove the "Bucket"
async def list_objects(self, **kwargs):
"""
Implements both ListObjects and ListObjectsV2.
"""
#api_call = "ListObjectsV2" if kwargs.get("list-type") == "2" else "ListObjects"
bucket = kwargs.get("Bucket", "")
max_keys = int(kwargs.get("MaxKeys", 1000))
prefix = kwargs.get("Prefix", "")
response = {"Name": bucket, "MaxKeys": max_keys, "Prefix": prefix}
continuation_token = kwargs.get("ContinuationToken")
if continuation_token:
response["ContinuationToken"] = continuation_token
cursor = continuation_token
else:
cursor = 0
redis = await self.session()
cursor, iterable = await redis.scan(
cursor=cursor, count=max_keys, match=f"{bucket}/{prefix}*"
)
if cursor:
response["IsTruncated"] = True
response["NextContinuationToken"] = cursor
else:
response["IsTruncated"] = False
"""
TODO how to get things like Size and LastModified metadata for objects
without having to do a secondary look-up, as redis.scan() only recovers
keys. One option is to cache the info client side and use server
assisted client side caching to notify of any changes made to keys.
"""
contents = []
for key in iterable:
contents.append({
"Key": key.decode("utf-8")[len(bucket) + 1:],
"StorageClass": "STANDARD"
})
response["Contents"] = contents
response["KeyCount"] = len(contents)
return response
async def list_objects_v2(self, **kwargs):
kwargs["list-type"] = "2"
return await self.list_objects(**kwargs)
async def delete_objects(self, **kwargs):
prefix = kwargs.get("Bucket", "")
# Get "Objects" and "Quiet" from "Delete" field of request.
delete = kwargs.get("Delete", {})
# From docs: "When you add this element, you must set its value to true."
quiet = True if "Quiet" in delete else False
objects = delete.get("Objects", [])
keys = []
for obj in objects:
key = prefix + "/" + obj.get("Key")
keys.append(key)
redis = await self.session()
await redis.delete(*keys)
"""
For now just assume successful deletion. Redis only returns a count of
the number of deleted keys so there's no (obvious) way to identify which
specific keys might have failed to delete.
"""
response = {}
if not quiet:
response["Deleted"] = objects
return response
#-------------------------------------------------------------------------------
def decode_token(token):
"""
Decodes an "opaque" token string to a dictionary.
"""
json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
return json.loads(json_string)
def encode_token(token):
"""
Encodes dictionaries into an "opaque" string.
"""
json_string = json.dumps(token)
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
def inject_token(dictionary, token):
for key, value in token.items():
if (value is not None) and (value != 'None'):
dictionary[key] = value
elif key in dictionary:
del dictionary[key]
class Paginator(object):
def __init__(self, method, config):
self.method = method
self.config = config
def paginate(self, **kwargs):
"""
Create paginator object for an operation.
This returns an iterable object. Iterating over
this object will yield a single page of a response
at a time.
"""
pagination_config = kwargs.pop("PaginationConfig", {})
limit_key = self.config["limit_key"]
max_items = pagination_config.get("MaxItems", None)
max_items = int(max_items) if max_items else None
starting_token = pagination_config.get("StartingToken", None)
page_size = pagination_config.get("PageSize", None)
if page_size and not limit_key:
raise PaginationError("PageSize parameter is not supported for the " +
"pagination interface for this operation.")
page_size = int(page_size) if page_size else None
return PageIterator(
self.method,
self.config["input_token"], self.config["output_token"],
self.config["more_results"], self.config["result_key"],
self.config.get('non_aggregate_keys', []), limit_key,
# From PaginationConfig
max_items, starting_token, page_size,
kwargs)
class PageIterator():
def __init__(self, method, input_token, output_token, more_results,
result_keys, non_aggregate_keys, limit_key, max_items,
starting_token, page_size, method_kwargs):
self.method = method
self.input_token = input_token # e.g. ["Marker"] or ["ContinuationToken"]
self.output_token = output_token # e.g. ["NextMarker", "Contents[-1].Key"]
self.more_results = more_results # e.g. "IsTruncated"
self.result_keys = result_keys # e.g. ["Contents", "CommonPrefixes"]
self.max_items = max_items
self.limit_key = limit_key # e.g. "MaxKeys"
self.starting_token = starting_token
self.page_size = page_size
self.method_kwargs = method_kwargs
self.resume_token = None
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
method_kwargs = self.method_kwargs
previous_next_token = None
next_token = {key: None for key in self.input_token}
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
# If StartingToken has been specified inject it into method's kwargs.
if self.starting_token is not None:
# The starting token is a dict passed as a base64 encoded string.
next_token = decode_token(self.starting_token)
starting_truncation = next_token.pop("boto_truncate_amount", 0)
inject_token(method_kwargs, next_token)
if self.page_size is not None:
# Set limit_key parameter name (e.g. MaxKeys) to page size if set
method_kwargs[self.limit_key] = self.page_size
#print(method_kwargs)
#print()
while True:
# Actually invoke the paginated method e.g. list_objects_v2
response = await self.method(**method_kwargs)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self.starting_token is not None:
self._handle_first_request(response, primary_result_key, starting_truncation)
first_request = False
current_response = response.get(primary_result_key, [])
num_current_response = len(current_response)
#print(current_response)
#print(num_current_response)
truncate_amount = 0
if self.max_items is not None:
truncate_amount = total_items + num_current_response - self.max_items
if truncate_amount > 0:
self._truncate_response(response, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = {}
# If more results (e.g. IsTruncated is True) create next token
if self.more_results and response.get(self.more_results, False):
next_token = {}
# Get e.g. NextContinuationToken value and set ContinuationToken
for output, input in zip(self.output_token, self.input_token):
#print(f"output: {output}")
#print(f"input: {input}")
if output == "NextMarker || Contents[-1].Key":
token = response.get("NextMarker", response.get("Contents"))
if isinstance(token, list):
token = token[-1].get("Key")
else:
token = response.get(output)
#print(f"token: {token}")
# Don't include empty strings as tokens, treat them as None.
if token:
next_token[input] = token
else:
next_token[input] = None
#print(f"next_token: {next_token}")
if all(t is None for t in next_token.values()):
break # If the next token has no values we're done iterating
if self.max_items is not None and total_items == self.max_items:
#print("On a page boundary")
# We're on a page boundary so we can set the current
# next token to be the resume token.
if "boto_truncate_amount" in value:
token_keys = sorted(self._input_token + ["boto_truncate_amount"])
else:
token_keys = sorted(self._input_token)
dict_keys = sorted(next_token.keys())
if token_keys == dict_keys:
self.resume_token = encode_token(next_token)
else:
raise ValueError("Bad starting token: {next_token}")
break
if previous_next_token is not None and previous_next_token == next_token:
raise PaginationError(
f"The same next token was received twice: {next_token}"
)
inject_token(method_kwargs, next_token)
previous_next_token = next_token
def _handle_first_request(self, response, primary_result_key,
starting_truncation):
#print("_handle_first_request")
#print(primary_result_key)
#print(response)
original = response.get(primary_result_key, []) # e.g. response["Contents"]
truncated = original[starting_truncation:]
response[primary_result_key] = truncated
# We also need to truncate any secondary result keys
# because they were not truncated in the previous last
# response.
for token in self.result_keys:
if token == primary_result_key:
continue
#print(token)
sample = | |
mypandas = mypandas.loc[(mypandas['date'] >= dico['when_beg']) & (mypandas['date'] <= dico['when_end'])]
mypandas[input_field[0]] = mypandas[input_field[0]].astype(float)
mypandas = mypandas[['date', 'clustername', input_field[0]]]
mypivot = pd.pivot_table(mypandas, index='date', columns='clustername', values=input_field[0])
source = ColumnDataSource(mypivot)
filter_data1 = mypivot[[uniqloc[0]]].rename(columns={uniqloc[0]: 'cases'})
src1 = ColumnDataSource(filter_data1)
filter_data2 = mypivot[[uniqloc[1]]].rename(columns={uniqloc[1]: 'cases'})
src2 = ColumnDataSource(filter_data2)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@{cases}' + '{custom}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'},
point_policy="snap_to_data") # ,PanTool())
panels = []
for axis_type in ax_type:
standardfig = self.standardfig(y_axis_type = axis_type,
x_axis_type = 'datetime', title = dico['titlebar'])
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
if dico['title']:
standardfig.title.text = dico['title']
standardfig.add_tools(hover_tool)
def add_line(src, options, init, color):
s = Select(options = options, value = init)
r = standardfig.line(x = 'date', y = 'cases', source = src, line_width = 3, line_color = color)
li = LegendItem(label = init, renderers = [r])
s.js_on_change('value', CustomJS(args=dict(s0=source, s1=src, li=li),
code="""
var c = cb_obj.value;
var y = s0.data[c];
s1.data['cases'] = y;
li.label = {value: cb_obj.value};
s1.change.emit();
"""))
return s, li
s1, li1 = add_line(src1, uniqloc, uniqloc[0], self.scolors[0])
s2, li2 = add_line(src2, uniqloc, uniqloc[1], self.scolors[1])
standardfig.add_layout(Legend(items = [li1, li2]))
standardfig.legend.location = 'top_left'
layout = row(column(row(s1, s2), row(standardfig)))
panel = Panel(child = layout, title = axis_type)
panels.append(panel)
tabs = Tabs(tabs = panels)
label = dico['titlebar']
return tabs
###################### END Plots ##################
##################### BEGIN HISTOS/MAPS##################
def decohistomap(func):
"""
Decorator function used for histogram and map
"""
def generic_hm(self, mypandas, input_field = None, cursor_date = False, maplabel = False, **kwargs):
mypandas, dico = self.standard_input(mypandas, input_field, **kwargs, plot_last_date=True)
#if func.__name__ == 'pycoa_mapfolium' or func.__name__ == 'innerdecopycoageo' or :
if isinstance(mypandas['location'].iloc[0],list):
mypandas['rolloverdisplay'] = mypandas['clustername']
mypandas = mypandas.explode('location')
else:
mypandas['rolloverdisplay'] = mypandas['location']
if type(input_field) is None.__class__ and dico['which'] is None.__class__:
input_field = mypandas.columns[2]
else:
if type(input_field) is None.__class__:
input_field = dico['var_displayed']
else:
input_field = dico['input_field'][0]
uniqloc = mypandas.clustername.unique()
if func.__name__ != 'pycoa_mapfolium' and func.__name__ != 'innerdecopycoageo':
mypandas = mypandas.drop_duplicates(["date", "codelocation","clustername"])
geopdwd = mypandas
geopdwd = geopdwd.sort_values(by = input_field, ascending=False)
geopdwd = geopdwd.reset_index(drop = True)
orientation = kwargs.get('orientation', 'horizontal')
if dico['when_end'] <= geopdwd.date.min():
started = geopdwd.date.min()
ended = geopdwd.date.min() + dt.timedelta(days=1)
else:
started = geopdwd.date.min()
ended = dico['when_end']
date_slider = DateSlider(title = "Date: ", start = started, end = ended,
value = ended, step=24 * 60 * 60 * 1000, orientation = orientation)
geopdwd_filter = geopdwd.copy()
wanted_date = date_slider.value_as_datetime.date()
geopdwd_filter = geopdwd_filter.loc[geopdwd_filter.date == wanted_date]
geopdwd_filter = geopdwd_filter.reset_index(drop = True)
if func.__name__ == 'pycoa_mapfolium' or func.__name__ == 'innerdecomap' or func.__name__ == 'innerdecopycoageo':
if isinstance(mypandas.location.to_list()[0],list):
geom = self.location_geometry
geodic={loc:geom.loc[geom.location==loc]['geometry'].values[0] for loc in geopdwd_filter.location.unique()}
geopdwd_filter['geometry'] = geopdwd_filter['location'].map(geodic)
else:
geopdwd_filter = pd.merge(geopdwd_filter, self.location_geometry, on='location')
geopdwd_filter = gpd.GeoDataFrame(geopdwd_filter, geometry=geopdwd_filter.geometry, crs="EPSG:4326")
dico['tile'] = CocoDisplay.get_tile(dico['tile'], func.__name__)
if func.__name__ == 'inner' or func.__name__ == 'pycoa_histo':
pos = {}
new = pd.DataFrame()#columns=geopdwd_filter.columns, dtype=lisgeopdwd_filter.dtypes)
n = 0
for i in uniqloc:
perloc = geopdwd_filter.loc[geopdwd_filter.clustername == i]
if all(perloc != 0):
pos = perloc.index[0]
if new.empty:
new = perloc
else:
new = new.append(perloc)
n += 1
geopdwd_filter = new.reset_index(drop=True)
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value = i ).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
geopdwd_filter=geopdwd_filter.sort_values(by=[input_field], ascending=False)
geopdwd_filter = geopdwd_filter.reset_index(drop=True)
if cursor_date is False:
date_slider = False
return func(self, input_field, date_slider, maplabel, dico, geopdwd, geopdwd_filter)
return generic_hm
def pycoa_heatmap(self, pycoa_pandas):
"""Create a Bokeh heat map from a pandas input
location in the column is mandatory in the pandas structure
Keyword arguments
-----------------
pycoa_pandas : pandas considered
y_axis : location
x_axis : column name
The values are normalized to maximun observed in the current columns
"""
if 'location' not in pycoa_pandas.columns:
raise CoaKeyError('location column name is not present, this is mandatory')
pycoa_pandas = pycoa_pandas.set_index('location')
pycoa_pandas = pycoa_pandas.apply(lambda x: (x - x.min()) / (x.max() - x.min()))
pycoa_pandas.columns.name = 'data'
pycoa_pandas = pycoa_pandas.stack().rename("value").reset_index()
standardfig = self.standardfig(y_range = list(pycoa_pandas.location.unique()),
x_range = list(pycoa_pandas.data.unique()))
standardfig.xaxis.major_label_orientation = "vertical"
invViridis256 = Viridis256[::-1]
color_mapper = LinearColorMapper(palette = invViridis256, low = pycoa_pandas.value.min(),
high = pycoa_pandas.value.max(), nan_color = '#ffffff')
color_bar = ColorBar(color_mapper = color_mapper, label_standoff=4,
border_line_color = None, location=(0, 0), orientation = 'vertical', ticker = BasicTicker())
standardfig.add_layout(color_bar, 'right')
standardfig.rect(
y="location",
x="data",
width=1,
height=1,
source=ColumnDataSource(pycoa_pandas),
line_color=None,
fill_color=transform('value', color_mapper))
standardfig.add_tools(HoverTool(
tooltips=[('location', '@rolloverdisplay'), ('value', '@value')],
point_policy="snap_to_data"))
return standardfig
@decohistomap
def pycoa_histo(self, input_field, date_slider, maplabel, dico, geopdwd, geopdwd_filtered):
"""Create a Bokeh histogram from a pandas input
Keyword arguments
-----------------
babepandas : pandas consided
input_field : variable from pandas data. If pandas is produced from pycoa get_stat method
then 'daily','weekly' and 'cumul' can be also used
title: title for the figure , no title by default
width_height : as a list of width and height of the histo, default [500,400]
bins : number of bins of the hitogram default 50
when : - default None
dates are given under the format dd/mm/yyyy. In the when
option, one can give one date which will be the end of
the data slice. Or one can give two dates separated with
":", which will define the time cut for the output data
btw those two dates.
Note
-----------------
HoverTool is available it returns position of the middle of the bin and the value.
"""
mypandas = geopdwd_filtered.rename(columns = {'cases': input_field})
if 'location' in mypandas.columns:
uniqloc = list(mypandas.clustername.unique())
allval = mypandas.loc[mypandas.clustername.isin(uniqloc)][['clustername', input_field,'permanentdisplay']]
min_val = allval[input_field].min()
max_val = allval[input_field].max()
if len(uniqloc) == 1:
dico['bins'] = 2
min_val = 0.
if dico['bins']:
bins = dico['bins']
else:
bins = len(uniqloc)
delta = (max_val - min_val ) / (bins-1)
interval = [ min_val + (i-1)*delta for i in range(1,(bins+1)+1)]
contributors = { i : [] for i in range(bins)}
for i in range(len(allval)):
rank = bisect.bisect_left(interval, allval.iloc[i][input_field])
contributors[rank].append(allval.iloc[i]['clustername'])
colors = itertools.cycle(self.lcolors)
lcolors = [next(colors) for i in range(bins)]
contributors = dict(sorted(contributors.items()))
frame_histo = pd.DataFrame({
'left': interval[:-1],
'right':interval[1:],
'middle_bin': [format((i+j)/2, ".1f") for i,j in zip(interval[:-1],interval[1:])],
'top': [len(i) for i in list(contributors.values())],
'contributors': [', '.join(i) for i in contributors.values() ],
'colors': lcolors})
#tooltips = """
#<div style="width: 400px">
#<b>Middle value:</b> @middle_bin <br>
#<b>Contributors:</b> @contributors{safe} <br>
#</div>
#"""
tooltips = """
<b>Middle value:</b> @middle_bin <br>
<b>Contributors:</b> @contributors{safe} <br>
"""
hover_tool = HoverTool(tooltips = tooltips)
panels = []
bottom = 0
x_axis_type, y_axis_type, axis_type_title = 3 * ['linear']
for axis_type in ["linear", "linlog", "loglin", "loglog"]:
if axis_type == 'linlog':
y_axis_type, axis_type_title = 'log', 'logy'
if axis_type == 'loglin':
x_axis_type, y_axis_type, axis_type_title = 'log', 'linear', 'logx'
if axis_type == 'loglog':
x_axis_type, y_axis_type = 'log', 'log'
axis_type_title = 'loglog'
standardfig = self.standardfig(x_axis_type=x_axis_type, y_axis_type=y_axis_type, title=dico['titlebar'])
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.xaxis[0].formatter = PrintfTickFormatter(format="%4.2e")
standardfig.add_tools(hover_tool)
standardfig.x_range = Range1d(1.05 * interval[0], 1.05 * interval[-1])
standardfig.y_range = Range1d(0, 1.05 * frame_histo['top'].max())
if x_axis_type == "log":
left = 0.8
if frame_histo['left'][0] <= 0:
frame_histo.at[0, 'left'] = left
else:
left = frame_histo['left'][0]
standardfig.x_range = Range1d(left, 10 * interval[-1])
if y_axis_type == "log":
bottom = 0.0001
standardfig.y_range = Range1d(0.001, 10 * frame_histo['top'].max())
standardfig.quad(source=ColumnDataSource(frame_histo), top='top', bottom=bottom, left='left', \
right='right', fill_color='colors')
panel = Panel(child=standardfig, title=axis_type_title)
panels.append(panel)
tabs = Tabs(tabs=panels)
return tabs
def decohistopie(func):
def inner(self, input_field, date_slider, maplabel, dico, geopdwd, geopdwd_filtered):
"""
Decorator for
Horizontal histogram & Pie Chart
"""
title_fig = input_field
geopdwd['cases'] = geopdwd[input_field]
geopdwd_filtered['cases'] = geopdwd_filtered[input_field]
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value=i).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
geopdwd = geopdwd.drop_duplicates(["date", "codelocation","clustername"])#for sumall avoid duplicate
geopdwd_filtered = geopdwd_filtered.sort_values(by='cases', ascending = False).reset_index()
locunique = geopdwd_filtered.clustername.unique()#geopdwd_filtered.location.unique()
geopdwd_filter = geopdwd_filtered.copy()
nmaxdisplayed = 18
if len(locunique) >= nmaxdisplayed :#and func.__name__ != 'pycoa_pie' :
if func.__name__ != 'pycoa_pie' :
geopdwd_filter = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed])]
else:
geopdwd_filter_first = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed-1])]
geopdwd_filter_other = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[nmaxdisplayed-1:])]
geopdwd_filter_other = geopdwd_filter_other.groupby('date').sum()
geopdwd_filter_other['location'] = 'others'
geopdwd_filter_other['clustername'] = 'others'
geopdwd_filter_other['codelocation'] = 'others'
geopdwd_filter_other['permanentdisplay'] = 'others'
geopdwd_filter_other['rolloverdisplay'] = 'others'
geopdwd_filter_other['colors'] = '#FFFFFF'
geopdwd_filter = geopdwd_filter_first
geopdwd_filter = geopdwd_filter.append(geopdwd_filter_other)
if func.__name__ == 'pycoa_horizonhisto' :
#geopdwd_filter['bottom'] = geopdwd_filter.index
geopdwd_filter['left'] = geopdwd_filter['cases']
geopdwd_filter['right'] = geopdwd_filter['cases']
geopdwd_filter['left'] = geopdwd_filter['left'].apply(lambda x: 0 if x > 0 else x)
geopdwd_filter['right'] = geopdwd_filter['right'].apply(lambda x: 0 if x < 0 else x)
bthick = 0.95
geopdwd_filter['top'] = [len(geopdwd_filter.index) + bthick / 2 - i for i in
geopdwd_filter.index.to_list()]
geopdwd_filter['bottom'] = [len(geopdwd_filter.index) - bthick / 2 - i for i in
geopdwd_filter.index.to_list()]
geopdwd_filter['horihistotexty'] = geopdwd_filter['bottom'] + bthick/2
geopdwd_filter['horihistotextx'] | |
#-*- coding: utf-8 -*-
import copy
import datetime
import functools
import json
import logging
import random
import re
from contextlib import nested
import celery
import httpretty
import mock # noqa
from django.utils import timezone
from django.db import IntegrityError
from mock import call
import pytest
from nose.tools import * # flake8: noqa
from scripts.stuck_registration_audit import find_failed_registrations
from framework.auth import Auth
from framework.celery_tasks import handlers
from website.archiver import (
ARCHIVER_INITIATED,
ARCHIVER_SUCCESS,
ARCHIVER_FAILURE,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
NO_ARCHIVE_LIMIT,
)
from website.archiver import utils as archiver_utils
from website.archiver.tasks import ArchivedFileNotFound
from website.app import * # noqa
from website.archiver import listeners
from website.archiver.tasks import * # noqa
from osf.models.archive import ArchiveTarget, ArchiveJob
from website.archiver.decorators import fail_archive_on_error
from website import mails
from website import settings
from website.util import waterbutler_api_url_for
from website.util.sanitize import strip_html
from osf.models import MetaSchema
from addons.base.models import BaseStorageAddon
from osf_tests import factories
from tests.base import OsfTestCase, fake
from tests import utils as test_utils
from tests.utils import unique as _unique
SILENT_LOGGERS = (
'framework.celery_tasks.utils',
'website.app',
'website.archiver.tasks',
)
for each in SILENT_LOGGERS:
logging.getLogger(each).setLevel(logging.CRITICAL)
sha256_factory = _unique(fake.sha256)
name_factory = _unique(fake.ean13)
def file_factory(name=None, sha256=None):
fname = name or name_factory()
return {
'path': '/' + fname,
'name': fname,
'kind': 'file',
'size': random.randint(4, 4000),
'extra': {
'hashes': {
'sha256': sha256 or sha256_factory()
}
}
}
def folder_factory(depth, num_files, num_folders, path_above):
new_path = os.path.join(path_above.rstrip('/'), fake.word())
return {
'path': new_path,
'kind': 'folder',
'children': [
file_factory()
for i in range(num_files)
] + [
folder_factory(depth - 1, num_files, num_folders, new_path)
] if depth > 0 else []
}
def file_tree_factory(depth, num_files, num_folders):
return {
'path': '/',
'kind': 'folder',
'children': [
file_factory()
for i in range(num_files)
] + [
folder_factory(depth - 1, num_files, num_folders, '/')
] if depth > 0 else []
}
def select_files_from_tree(file_tree):
"""
Select a file from every depth of a file_tree. This implementation relies on:
- every folder has a subtree of equal depth (i.e. any folder selection is
adequate to select a file from the maximum depth)
The file_tree_factory fulfills this condition.
"""
selected = {}
stack = [file_tree]
while len(stack):
file_node = stack.pop(0)
target_files = [f for f in file_node['children'] if f['kind'] == 'file']
if target_files:
target_file = target_files[0]
selected[target_file['extra']['hashes']['sha256']] = target_file
target_folders = [f for f in file_node['children'] if f['kind'] == 'folder']
if target_folders:
stack.append(target_folders[0])
return selected
FILE_TREE = {
'path': '/',
'name': '',
'kind': 'folder',
'children': [
{
'path': '/1234567',
'name': 'Afile.file',
'kind': 'file',
'size': '128',
},
{
'path': '/qwerty',
'name': 'A Folder',
'kind': 'folder',
'children': [
{
'path': '/qwerty/asdfgh',
'name': 'coolphoto.png',
'kind': 'file',
'size': '256',
}
],
}
],
}
WB_FILE_TREE = {
'attributes': {
'path': '/',
'name': '',
'kind': 'folder',
'children': [
{
'attributes': {
'path': '/1234567',
'name': 'Afile.file',
'kind': 'file',
'size': '128',
}
},
{
'attributes': {
'path': '/qwerty',
'name': 'A Folder',
'kind': 'folder',
'children': [
{
'attributes': {
'path': '/qwerty/asdfgh',
'name': 'coolphoto.png',
'kind': 'file',
'size': '256',
}
}
],
}
}
],
}
}
class MockAddon(object):
complete = True
config = mock.MagicMock()
def __init__(self, **kwargs):
self._id = fake.md5()
def _get_file_tree(self, user, version):
return FILE_TREE
def after_register(self, *args):
return None, None
@property
def archive_folder_name(self):
return 'Some Archive'
def archive_errors(self):
return False
mock_osfstorage = MockAddon()
mock_osfstorage.config.short_name = 'osfstorage'
mock_dropbox = MockAddon()
mock_dropbox.config.short_name = 'dropbox'
active_addons = {'osfstorage', 'dropbox'}
def _mock_get_addon(name, *args, **kwargs):
if name not in active_addons:
return None
if name == 'dropbox':
return mock_dropbox
if name == 'osfstorage':
return mock_osfstorage
def _mock_delete_addon(name, *args, **kwargs):
try:
active_addons.remove(name)
except ValueError:
pass
def _mock_get_or_add(name, *args, **kwargs):
active_addons.add(name)
return _mock_get_addon(name)
def use_fake_addons(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with nested(
mock.patch('osf.models.mixins.AddonModelMixin.add_addon', mock.Mock(side_effect=_mock_get_or_add)),
mock.patch('osf.models.mixins.AddonModelMixin.get_addon', mock.Mock(side_effect=_mock_get_addon)),
mock.patch('osf.models.mixins.AddonModelMixin.delete_addon', mock.Mock(side_effect=_mock_delete_addon)),
mock.patch('osf.models.mixins.AddonModelMixin.get_or_add_addon', mock.Mock(side_effect=_mock_get_or_add))
):
ret = func(*args, **kwargs)
return ret
return wrapper
def generate_file_tree(nodes):
file_trees = {
n._id: file_tree_factory(3, 3, 3)
for n in nodes
}
selected_files = {}
selected_file_node_index = {}
for n in nodes:
file_tree = file_trees[n._id]
selected = select_files_from_tree(file_tree)
selected_file_node_index.update({
sha256: n._id
for sha256 in selected.keys()
})
selected_files.update(selected) # select files from each Node
return file_trees, selected_files, selected_file_node_index
def generate_schema_from_data(data):
def from_property(id, prop):
if isinstance(prop.get('value'), dict):
return {
'id': id,
'type': 'object',
'properties': [
from_property(pid, sp)
for pid, sp in prop['value'].items()
]
}
else:
return {
'id': id,
'type': 'osf-upload' if prop.get('extra') else 'string'
}
def from_question(qid, question):
if q.get('extra'):
return {
'qid': qid,
'type': 'osf-upload'
}
elif isinstance(q.get('value'), dict):
return {
'qid': qid,
'type': 'object',
'properties': [
from_property(id, value)
for id, value in question.get('value').items()
]
}
else:
return {
'qid': qid,
'type': 'string'
}
_schema = {
'name': 'Test',
'version': 2,
'config': {
'hasFiles': True
},
'pages': [{
'id': 'page1',
'questions': [
from_question(qid, q)
for qid, q in data.items()
]
}]
}
schema = MetaSchema(
name=_schema['name'],
schema_version=_schema['version'],
schema=_schema
)
try:
schema.save()
except IntegrityError:
# Unfortunately, we don't have db isolation between test cases for some
# reason. Update the doc currently in the db rather than saving a new
# one.
schema = MetaSchema.objects.get(name=_schema['name'], schema_version=_schema['version'])
schema.schema = _schema
schema.save()
return schema
def generate_metadata(file_trees, selected_files, node_index):
data = {}
uploader_types = {
('q_' + selected_file['name']): {
'value': fake.word(),
'extra': [{
'sha256': sha256,
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node_index[sha256],
selected_file['path']
),
'selectedFileName': selected_file['name'],
'nodeId': node_index[sha256]
}]
}
for sha256, selected_file in selected_files.items()
}
data.update(uploader_types)
object_types = {
('q_' + selected_file['name'] + '_obj'): {
'value': {
name_factory(): {
'value': fake.word(),
'extra': [{
'sha256': sha256,
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node_index[sha256],
selected_file['path']
),
'selectedFileName': selected_file['name'],
'nodeId': node_index[sha256]
}]
},
name_factory(): {
'value': fake.word()
}
}
}
for sha256, selected_file in selected_files.items()
}
data.update(object_types)
other_questions = {
'q{}'.format(i): {
'value': fake.word()
}
for i in range(5)
}
data.update(other_questions)
return data
class ArchiverTestCase(OsfTestCase):
def setUp(self):
super(ArchiverTestCase, self).setUp()
handlers.celery_before_request()
self.user = factories.UserFactory()
self.auth = Auth(user=self.user)
self.src = factories.NodeFactory(creator=self.user)
self.dst = factories.RegistrationFactory(user=self.user, project=self.src, send_signals=False, archive=True)
archiver_utils.before_archive(self.dst, self.user)
self.archive_job = self.dst.archive_job
class TestStorageAddonBase(ArchiverTestCase):
tree_root = WB_FILE_TREE['attributes']['children']
tree_child = tree_root[0]
tree_grandchild = tree_root[1]['attributes']['children']
tree_great_grandchild = tree_grandchild[0]
URLS = ['/', '/1234567', '/qwerty', '/qwerty/asdfgh']
def get_resp(self, url):
if '/qwerty/asdfgh' in url:
return dict(data=self.tree_great_grandchild)
if '/qwerty' in url:
return dict(data=self.tree_grandchild)
if '/1234567' in url:
return dict(data=self.tree_child)
return dict(data=self.tree_root)
@httpretty.activate
def _test__get_file_tree(self, addon_short_name):
requests_made = []
# requests_to_make = []
def callback(request, uri, headers):
requests_made.append(uri)
return (200, headers, json.dumps(self.get_resp(uri)))
for path in self.URLS:
url = waterbutler_api_url_for(
self.src._id,
addon_short_name,
meta=True,
path=path,
user=self.user,
view_only=True,
_internal=True,
)
httpretty.register_uri(httpretty.GET,
url,
body=callback,
content_type='applcation/json')
addon = self.src.get_or_add_addon(addon_short_name, auth=self.auth)
root = {
'path': '/',
'name': '',
'kind': 'folder',
}
file_tree = addon._get_file_tree(root, self.user)
assert_equal(FILE_TREE, file_tree)
assert_equal(len(requests_made), 2)
# Makes a request for folders ('/qwerty') but not files ('/1234567', '/qwerty/asdfgh')
assert_true(any('/qwerty' in url for url in requests_made))
assert_false(any('/1234567' in url for url in requests_made))
assert_false(any('/qwerty/asdfgh' in url for url in requests_made))
def _test_addon(self, addon_short_name):
self._test__get_file_tree(addon_short_name)
# @pytest.mark.skip('Unskip when figshare addon is implemented')
def test_addons(self):
# Test that each addon in settings.ADDONS_ARCHIVABLE other than wiki/forward implements the StorageAddonBase interface
for addon in [a for a in settings.ADDONS_ARCHIVABLE if a not in ['wiki', 'forward']]:
self._test_addon(addon)
class TestArchiverTasks(ArchiverTestCase):
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
@mock.patch('celery.chain')
def test_archive(self, mock_chain, mock_enqueue):
archive(job_pk=self.archive_job._id)
targets = [self.src.get_addon(name) for name in settings.ADDONS_ARCHIVABLE]
target_addons = [addon for addon in targets if (addon and addon.complete and isinstance(addon, BaseStorageAddon))]
assert_true(self.dst.archiving)
mock_chain.assert_called_with(
[
celery.group(
stat_addon.si(
addon_short_name=addon.config.short_name,
job_pk=self.archive_job._id,
) for addon in target_addons
),
archive_node.s(job_pk=self.archive_job._id)
]
)
def test_stat_addon(self):
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
res = stat_addon('osfstorage', self.archive_job._id)
assert_equal(res.target_name, 'osfstorage')
assert_equal(res.disk_usage, 128 + 256)
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_pass(self, mock_archive_addon):
settings.MAX_ARCHIVE_SIZE = 1024 ** 3
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage']]
with mock.patch.object(celery, 'group') as mock_group:
archive_node(results, self.archive_job._id)
archive_osfstorage_signature = archive_addon.si(
'osfstorage',
self.archive_job._id
)
assert(mock_group.called_with(archive_osfstorage_signature))
@use_fake_addons
def test_archive_node_fail(self):
settings.MAX_ARCHIVE_SIZE = 100
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage', 'dropbox']]
with mock.patch('website.archiver.tasks.ArchiverTask.on_failure') as mock_fail:
try:
archive_node.apply(args=(results, self.archive_job._id))
except:
pass
assert_true(isinstance(mock_fail.call_args[0][0], ArchiverSizeExceeded))
@mock.patch('website.project.signals.archive_callback.send')
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_does_not_archive_empty_addons(self, mock_archive_addon, mock_send):
with mock.patch('osf.models.mixins.AddonModelMixin.get_addon') as mock_get_addon:
mock_addon = MockAddon()
def empty_file_tree(user, version):
return {
'path': '/',
'kind': 'folder',
'name': 'Fake',
'children': []
}
setattr(mock_addon, '_get_file_tree', empty_file_tree)
mock_get_addon.return_value = mock_addon
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage']]
archive_node(results, job_pk=self.archive_job._id)
assert_false(mock_archive_addon.called)
assert_true(mock_send.called)
@use_fake_addons
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_no_archive_size_limit(self, mock_archive_addon):
settings.MAX_ARCHIVE_SIZE = 100
self.archive_job.initiator.add_system_tag(NO_ARCHIVE_LIMIT)
self.archive_job.initiator.save()
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage', 'dropbox']]
with mock.patch.object(celery, 'group') as mock_group:
archive_node(results, self.archive_job._id)
archive_dropbox_signature = archive_addon.si(
'dropbox',
self.archive_job._id
)
assert(mock_group.called_with(archive_dropbox_signature))
@mock.patch('website.archiver.tasks.make_copy_request.delay')
def test_archive_addon(self, mock_make_copy_request):
archive_addon('osfstorage', self.archive_job._id)
assert_equal(self.archive_job.get_target('osfstorage').status, ARCHIVER_INITIATED)
cookie = self.user.get_or_create_cookie()
assert(mock_make_copy_request.called_with(
self.archive_job._id,
settings.WATERBUTLER_URL + '/ops/copy',
data=dict(
source=dict(
cookie=cookie,
nid=self.src._id,
provider='osfstorage',
path='/',
),
destination=dict(
cookie=cookie,
nid=self.dst._id,
provider=settings.ARCHIVE_PROVIDER,
path='/',
),
rename='Archive of OSF Storage',
)
))
| |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Monitors a list of groups for user membership changes"
class Input:
GROUP_IDS = "group_ids"
INTERVAL = "interval"
class Output:
USERS_ADDED_FROM_GROUPS = "users_added_from_groups"
USERS_REMOVED_FROM_GROUPS = "users_removed_from_groups"
class UsersAddedRemovedFromGroupInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"group_ids": {
"type": "array",
"title": "Group ID's",
"description": "A list of group ID's",
"items": {
"type": "string"
},
"order": 1
},
"interval": {
"type": "integer",
"title": "Interval",
"description": "The time in seconds between checks for changes to the groups users",
"default": 300,
"order": 2
}
},
"required": [
"group_ids",
"interval"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class UsersAddedRemovedFromGroupOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"users_added_from_groups": {
"type": "array",
"title": "Additions",
"description": "Users added to a group since the last check",
"items": {
"$ref": "#/definitions/user_group"
},
"order": 1
},
"users_removed_from_groups": {
"type": "array",
"title": "Removals",
"description": "Users removed from a group since the last check",
"items": {
"$ref": "#/definitions/user_group"
},
"order": 2
}
},
"required": [
"users_added_from_groups",
"users_removed_from_groups"
],
"definitions": {
"_links": {
"type": "object",
"title": "_links",
"properties": {
"changePassword": {
"$ref": "#/definitions/changePassword",
"title": "ChangePassword",
"order": 1
},
"changeRecoveryQuestion": {
"$ref": "#/definitions/changePassword",
"title": "ChangeRecoveryQuestion",
"order": 2
},
"deactivate": {
"$ref": "#/definitions/changePassword",
"title": "Deactivate",
"order": 3
},
"expirePassword": {
"$ref": "#/definitions/changePassword",
"title": "ExpirePassword",
"order": 4
},
"forgotPassword": {
"$ref": "#/definitions/changePassword",
"title": "ForgotPassword",
"order": 5
},
"resetFactors": {
"$ref": "#/definitions/changePassword",
"title": "ResetFactors",
"order": 6
},
"resetPassword": {
"$ref": "#/definitions/changePassword",
"title": "ResetPassword",
"order": 7
}
},
"definitions": {
"changePassword": {
"type": "object",
"title": "changePassword",
"properties": {
"href": {
"type": "string",
"title": "Href",
"order": 1
}
}
}
}
},
"changePassword": {
"type": "object",
"title": "changePassword",
"properties": {
"href": {
"type": "string",
"title": "Href",
"order": 1
}
}
},
"credentials_input": {
"type": "object",
"title": "credentials_input",
"properties": {
"password": {
"$ref": "#/definitions/password",
"title": "Password",
"order": 1
},
"provider": {
"$ref": "#/definitions/provider",
"title": "Provider",
"order": 2
},
"recovery_question": {
"$ref": "#/definitions/recovery_question",
"title": "Recovery Question",
"order": 3
}
},
"definitions": {
"password": {
"type": "object",
"title": "password",
"properties": {
"value": {
"type": "string",
"title": "Value",
"description": "Password value e.g. <PASSWORD>",
"order": 1
}
}
},
"provider": {
"type": "object",
"title": "provider",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Provider name e.g. OKTA",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Provider type e.g. OKTA",
"order": 1
}
}
},
"recovery_question": {
"type": "object",
"title": "recovery_question",
"properties": {
"answer": {
"type": "string",
"title": "Answer",
"description": "Recovery answer e.g. <NAME>",
"order": 2
},
"question": {
"type": "string",
"title": "Question",
"description": "Recovery question e.g. Who's a major player in the cowboy scene?",
"order": 1
}
}
}
}
},
"password": {
"type": "object",
"title": "password",
"properties": {
"value": {
"type": "string",
"title": "Value",
"description": "Password value e.g. <PASSWORD>",
"order": 1
}
}
},
"profile": {
"type": "object",
"title": "profile",
"properties": {
"email": {
"type": "string",
"title": "Email",
"order": 1
},
"firstName": {
"type": "string",
"title": "FirstName",
"order": 3
},
"lastName": {
"type": "string",
"title": "LastName",
"order": 4
},
"login": {
"type": "string",
"title": "Login",
"order": 5
},
"mobilePhone": {
"type": "string",
"title": "MobilePhone",
"order": 6
},
"secondEmail": {
"type": "string",
"title": "SecondEmail",
"order": 2
}
}
},
"provider": {
"type": "object",
"title": "provider",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Provider name e.g. OKTA",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Provider type e.g. OKTA",
"order": 1
}
}
},
"recovery_question": {
"type": "object",
"title": "recovery_question",
"properties": {
"answer": {
"type": "string",
"title": "Answer",
"description": "Recovery answer e.g. <NAME>",
"order": 2
},
"question": {
"type": "string",
"title": "Question",
"description": "Recovery question e.g. Who's a major player in the cowboy scene?",
"order": 1
}
}
},
"user": {
"type": "object",
"title": "user",
"properties": {
"activated": {
"type": "string",
"title": "Activated",
"order": 4
},
"created": {
"type": "string",
"title": "Created",
"order": 3
},
"credentials": {
"$ref": "#/definitions/credentials_input",
"title": "Credentials",
"order": 10
},
"id": {
"type": "string",
"title": "Id",
"order": 1
},
"lastLogin": {
"type": "string",
"title": "LastLogin",
"order": 6
},
"lastUpdated": {
"type": "string",
"title": "LastUpdated",
"order": 7
},
"links": {
"$ref": "#/definitions/_links",
"title": "Links",
"order": 11
},
"passwordChanged": {
"type": "string",
"title": "PasswordChanged",
"order": 8
},
"profile": {
"$ref": "#/definitions/profile",
"title": "Profile",
"order": 9
},
"status": {
"type": "string",
"title": "Status",
"order": 2
},
"statusChanged": {
"type": "string",
"title": "StatusChanged",
"order": 5
}
},
"definitions": {
"_links": {
"type": "object",
"title": "_links",
"properties": {
"changePassword": {
"$ref": "#/definitions/changePassword",
"title": "ChangePassword",
"order": 1
},
"changeRecoveryQuestion": {
"$ref": "#/definitions/changePassword",
"title": "ChangeRecoveryQuestion",
"order": 2
},
"deactivate": {
"$ref": "#/definitions/changePassword",
"title": "Deactivate",
"order": 3
},
"expirePassword": {
"$ref": "#/definitions/changePassword",
"title": "ExpirePassword",
"order": 4
},
"forgotPassword": {
"$ref": "#/definitions/changePassword",
"title": "ForgotPassword",
"order": 5
},
"resetFactors": {
"$ref": "#/definitions/changePassword",
"title": "ResetFactors",
"order": 6
},
"resetPassword": {
"$ref": "#/definitions/changePassword",
"title": "ResetPassword",
"order": 7
}
},
"definitions": {
"changePassword": {
"type": "object",
"title": "changePassword",
"properties": {
"href": {
"type": "string",
"title": "Href",
"order": 1
}
}
}
}
},
"changePassword": {
"type": "object",
"title": "changePassword",
"properties": {
"href": {
"type": "string",
"title": "Href",
"order": 1
}
}
},
"credentials_input": {
"type": "object",
"title": "credentials_input",
"properties": {
"password": {
"$ref": "#/definitions/password",
"title": "Password",
"order": 1
},
"provider": {
"$ref": "#/definitions/provider",
"title": "Provider",
"order": 2
},
"recovery_question": {
"$ref": "#/definitions/recovery_question",
"title": "Recovery Question",
"order": 3
}
},
"definitions": {
"password": {
"type": "object",
"title": "password",
"properties": {
"value": {
"type": "string",
"title": "Value",
"description": "Password value e.g. <PASSWORD>",
"order": 1
}
}
},
"provider": {
"type": "object",
"title": "provider",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Provider name e.g. OKTA",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Provider type e.g. OKTA",
"order": 1
}
}
},
"recovery_question": {
"type": "object",
"title": "recovery_question",
"properties": {
"answer": {
"type": "string",
"title": "Answer",
"description": "Recovery answer e.g. <NAME>",
"order": 2
},
"question": {
"type": "string",
"title": "Question",
"description": "Recovery question e.g. Who's a major player in the cowboy scene?",
"order": 1
}
}
}
}
},
"password": {
"type": "object",
"title": "password",
"properties": {
"value": {
"type": "string",
"title": "Value",
"description": "Password value e.g. tlpWENT2m",
"order": 1
}
}
},
"profile": {
"type": "object",
"title": "profile",
"properties": {
"email": {
"type": "string",
"title": "Email",
"order": 1
},
"firstName": {
"type": "string",
"title": "FirstName",
"order": 3
},
"lastName": {
"type": "string",
"title": "LastName",
"order": 4
},
"login": {
"type": "string",
"title": "Login",
"order": 5
},
"mobilePhone": {
"type": "string",
"title": "MobilePhone",
"order": 6
},
"secondEmail": {
"type": "string",
"title": "SecondEmail",
"order": 2
}
}
},
"provider": {
"type": "object",
"title": "provider",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Provider name e.g. OKTA",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Provider type e.g. OKTA",
"order": 1
}
}
},
"recovery_question": {
"type": "object",
"title": "recovery_question",
"properties": {
"answer": {
"type": "string",
"title": "Answer",
"description": "Recovery answer e.g. <NAME>",
"order": 2
},
"question": {
"type": "string",
"title": "Question",
"description": "Recovery question e.g. Who's a major player in the cowboy scene?",
"order": 1
}
}
}
}
},
"user_group": {
"type": "object",
"title": "user_group",
"properties": {
"group_id": {
"type": "string",
"title": "Group Id",
"order": 2
},
"group_name": {
"type": "string",
"title": "Group Name",
"order": 1
},
"users": {
"type": "array",
"title": "Users",
"items": {
"$ref": "#/definitions/user"
},
"order": 3
}
},
"definitions": {
"_links": {
"type": "object",
"title": "_links",
"properties": {
"changePassword": {
"$ref": "#/definitions/changePassword",
"title": "ChangePassword",
"order": 1
},
"changeRecoveryQuestion": {
"$ref": "#/definitions/changePassword",
"title": "ChangeRecoveryQuestion",
"order": 2
},
"deactivate": {
"$ref": "#/definitions/changePassword",
"title": "Deactivate",
"order": 3
},
"expirePassword": {
"$ref": "#/definitions/changePassword",
"title": "ExpirePassword",
"order": 4
},
"forgotPassword": {
"$ref": "#/definitions/changePassword",
"title": "ForgotPassword",
"order": 5
},
"resetFactors": {
"$ref": "#/definitions/changePassword",
"title": "ResetFactors",
"order": 6
},
"resetPassword": {
"$ref": "#/definitions/changePassword",
"title": "ResetPassword",
"order": 7
}
},
"definitions": {
"changePassword": {
"type": "object",
"title": "changePassword",
"properties": {
| |
Reply")
except Exception:
logger.exception("An error occurred handling MHP Reply!")
self.issue_err(err=self.ERR_OTHER)
def _extract_mhp_reply(self, result):
"""
Extracts the MHP processing results from the passed data.
:param result: tuple containing generation results
:return:
"""
try:
r, mhp_seq, aid, proto_err = result
return r, mhp_seq, aid, proto_err
except Exception:
self.issue_err(err=self.ERR_OTHER)
raise LinkLayerException("Malformed MHP reply received: {}".format(result))
def _handle_mhp_err(self, result):
"""
Handles errors from the MHP
:param result: tuple
Result data returned by MHP
"""
# Unpack the results
midpoint_outcome, mhp_seq, aid, proto_err = result
# If the error occurred while program was running stop the program and free the resources
self.clear_if_handling_emission(aid)
# Process the error
if proto_err == self.mhp.conn.ERR_QUEUE_MISMATCH or proto_err == self.mhp.conn.ERR_NO_CLASSICAL_OTHER:
# Get our absolute queue id based on error
if proto_err == self.mhp.conn.ERR_QUEUE_MISMATCH:
aidA, aidB = aid
local_aid, remote_aid = (aidA, aidB) if self.node.nodeID == self.mhp.conn.nodeA.nodeID else (aidB, aidA)
# Increment mismatch counter if we received this before
if aid == self._previous_mismatch:
self._nr_of_mismatch += 1
else:
self._previous_mismatch = aid
self._nr_of_mismatch = 1
max_nr_mismatch = int(1.5 * self.scheduler.mhp_full_cycle / self.scheduler.mhp_cycle_period)
if self._nr_of_mismatch > max_nr_mismatch:
for a in aid:
qid, qseq = a
if self.dqp.contains_item(qid, qseq):
req = self.dqp.remove_item(qid, qseq).request
if self.dqp.master ^ req.master_request:
originID = self.get_otherID()
else:
originID = self.node.nodeID
self.send_expire_notification(aid=a, createID=req.create_id, originID=originID,
old_seq=self.expected_seq, new_seq=self.expected_seq)
self.issue_err(err=self.ERR_EXPIRE, create_id=req.create_id, origin_id=originID,
old_exp_mhp_seq=self.expected_seq, new_exp_mhp_seq=self.expected_seq - 1)
else:
self.send_expire_notification(aid=a, createID=None, originID=None,
old_seq=self.expected_seq, new_seq=self.expected_seq)
else:
local_aid = aid
# If we still have the request issue and error
if self.scheduler.has_request(local_aid):
self.issue_err(err=proto_err)
# Check if we may have lost a message
if mhp_seq >= self.expected_seq:
# Issue an expire for the request
request = self.scheduler.get_request(local_aid)
new_mhp_seq = mhp_seq + 1
if request is not None:
if self.dqp.master ^ request.master_request:
originID = self.get_otherID()
else:
originID = self.node.nodeID
createID = request.create_id
self.send_expire_notification(aid=local_aid, createID=createID, originID=originID,
old_seq=self.expected_seq, new_seq=new_mhp_seq)
# Clear the request
self.scheduler.clear_request(aid=local_aid)
if request.measure_directly:
# Pop the earliest measurement results if it exists
try:
self.measurement_results[local_aid].pop(0)
except IndexError:
pass
# self._remove_measurement_data(aid)
# Alert higher layer protocols
self.issue_err(err=self.ERR_EXPIRE, create_id=createID, origin_id=originID,
old_exp_mhp_seq=self.expected_seq, new_exp_mhp_seq=mhp_seq)
# Update our expected seq, because error came back we should expect the subsequent seq
self.expected_seq = new_mhp_seq
def _handle_generation_reply(self, r, mhp_seq, aid):
"""
Handles a successful generation reply from the heralding midpoint. If we are the entanglement request
originator then we also correct the qubit locally if necessary.
If the request says measure directly, the qubit is simply meaured and
:param r: int
Outcome of the generation attempt
:param mhp_seq: int
MHP Sequence number corresponding to this outcome
:param aid: tuple of (int, int)
Absolute Queue ID corresponding to the request this generation attempt belongs to
:return:
"""
creq = self.scheduler.get_request(aid)
if creq is None:
logger.error("Request not found!")
self.issue_err(err=self.ERR_OTHER)
return
# Get comm and storage qubit
comm_q = self.scheduler.curr_gen.comm_q
storage_q = self.scheduler.curr_gen.storage_q
# Check if the corresponding request is measure directly
if creq.measure_directly:
# Grab the result and correct
try:
ecycle, m, basis = self.measurement_results[aid].pop(0)
logger.debug("Removing measurement outcome {} in basis {} for aid {} (successful attempt)"
.format(m, basis, aid))
except IndexError:
logger.warning("Trying to grab a measurement result but there are no there")
return
# Flip this outcome in the case we need to apply a correction
creator = not (self.dqp.master ^ creq.master_request)
if creator: # True if we're master and request was from master etc.
# Measurements in computational basis are always anti-correlated for the entangled state
if basis == 0:
m ^= 1
# Measurements in hadamard and Y basis are only anti-correlated when r == 2
elif (basis == 1 or basis == 2) and r == 2:
m ^= 1
# Pass up the meaurement info to higher layers
self.corrected_measurements[aid].append((m, basis))
self._return_ok(mhp_seq, aid)
# We need to move state to memory qubit
elif comm_q != storage_q:
self.midpoint_outcome = r
self.move_info = mhp_seq, aid, storage_q
self._move_comm_to_storage(comm_q, storage_q)
# Otherwise we're leaving the state in the communication qubit and just return the ok
else:
self.midpoint_outcome = r
self._return_ok(mhp_seq=mhp_seq, aid=aid)
def _handle_photon_emission(self, evt):
"""
Catches the event produced when MHP has emitted a photon to the midpoint. The EGP then checks if the current
request requires measurement of the qubit immediately and acts as such. If the current request is for a
create and keep request and the specified storage id is different from the communicaiton qubit initiate
a move.
:param evt: obj `~netsquid.pydynaa.Event`
The event that triggered this handler
"""
# Get request resources
comm_q = self.scheduler.curr_gen.comm_q
logger.debug("Handling photon emission")
if self.scheduler.is_handling_measure_directly():
logger.debug("Beginning measurement of qubit for measure directly")
# Set a flag to make sure we catch replies that occur during the measurement
self.emission_handling_in_progress = self.EMIT_HANDLER_MD
# Constuct a quantum program
prgm = QuantumProgram()
q = prgm.get_qubit_indices(1)[0]
# Select the basis based on the mhp cycle number
possible_bases = [0, 1, 2]
basis = possible_bases[self.scheduler.mhp_cycle_number % len(possible_bases)]
if basis == 0:
logger.debug("Measuring comm_q {} in Standard basis".format(comm_q))
elif basis == 1:
logger.debug("Measuring comm_q {} in Hadamard basis".format(comm_q))
prgm.apply(INSTR_H, q)
else:
logger.debug("Measuring comm_q {} in Y basis".format(comm_q))
prgm.apply(INSTR_ROT_X, q, angle=np.pi / 2)
# Store the aid and basis for retrieval post measurement
self.measurement_info.append((self.scheduler.curr_aid, basis, comm_q))
prgm.apply(INSTR_MEASURE, q, output_key="m")
# self.node.qmem.set_program_done_callback(self._handle_measurement_outcome, prgm=prgm)
self._current_prgm = prgm
self._current_prgm_name = self.OP_MEAS
self.node.qmem.execute_program(prgm, qubit_mapping=[comm_q])
# elif comm_q != storage_q:
# self._move_comm_to_storage(comm_q, storage_q)
else:
logger.debug("Entangled qubit will remain in comm_q until midpoint reply")
def _move_comm_to_storage(self, comm_q, storage_q):
"""
Moves the state in the communication qubit to a specified storage qubit.
Suspends scheduler during this time
:param comm_q:
:param storage_q:
:return:
"""
logger.debug("Node {} : Moving comm_q {} to storage_q {}".format(self.node.name, comm_q, storage_q))
if self.node.qmem._memory_positions[storage_q]._qubit is None:
raise RuntimeError("No qubit before trying to swap")
# Reset init info of this storage qubit
self._next_init_cycle.pop(storage_q)
# Construct a quantum program to correct and move
prgm = QuantumProgram()
qs = prgm.get_qubit_indices(2)
qprgms.move_using_CXDirections(prgm, qs[0], qs[1])
# Set the callback of the program
self.scheduler.suspend_generation(self.max_move_delay)
# self.node.qmem.set_program_done_callback(self._handle_move_completion, prgm=prgm)
self._current_prgm = prgm
self._current_prgm_name = self.OP_MOVE
self.node.qmem.execute_program(prgm, qubit_mapping=[comm_q, storage_q])
def handling_emission(self, aid):
"""
Checks if we are handling photon emission for the specified aid
:param aid: tuple (int, int)
The absolute queue id to check if we are handling
:return:
"""
if self.emission_handling_in_progress == self.EMIT_HANDLER_NONE:
return False
# Handle create and keep program
elif self.emission_handling_in_progress == self.EMIT_HANDLER_CK:
raise RuntimeError("Shouldn't be handling CK after emission now")
# Handle measure directly program
else:
emit_aid, _, _ = self.measurement_info[0]
return emit_aid == aid
def clear_if_handling_emission(self, aid):
"""
Stops the program and clears internal information if we are currently handling photon emission for the
specified absolute queue id
:param aid: tuple (int, int)
The absolute queue id to check for
:return:
"""
# Check if we are handling the emission for this aid
if self.handling_emission(aid):
# Halt handling
# logger.info("Stopping swap program")
# self.node.qmemory.stop_program()
# Allow scheduler to resume
self.scheduler.resume_generation()
# Remove emission handling metadata
if self.emission_handling_in_progress == self.EMIT_HANDLER_CK:
self.move_info = None
# else:
# self.measurement_results.pop(0)
def _remove_old_measurement_results(self, aid):
cycle = self.scheduler.mhp_cycle_number
rtt_cycles = floor(self.mhp_service.get_midpoint_rtt_delay(self.node) / self.scheduler.mhp_cycle_period)
while self.measurement_results[aid]:
emission_cycle, basis, outcome = self.measurement_results[aid][0]
if emission_cycle >= cycle - rtt_cycles:
break
elif emission_cycle < cycle - rtt_cycles:
logger.warning("Failed to get expected reply, removing measurement result ")
self.measurement_results[aid].pop(0)
def _handle_measurement_outcome(self):
"""
Handles the measurement outcome from measureing the communication qubit
directly after the photon was emitted.
Calls back to complete MHP reply handling.
:return: None
"""
prgm = self._current_prgm
self._current_prgm_name = self.OP_NONE
self._current_prgm = None
outcome = prgm.output["m"][0]
# Saves measurement outcome
self.emission_handling_in_progress = self.EMIT_HANDLER_NONE
logger.debug("Measured {} on qubit".format(outcome))
# If the request did not time out during the measurement then store the result
try:
aid, basis, comm_q = self.measurement_info.pop(0)
except IndexError:
logger.error("No measurement info when handling measurement")
return
if self.scheduler.has_request(aid):
# Free the communication qubit
# comm_q = self.scheduler.curr_gen.comm_q
self.qmm.vacate_qubit(comm_q)
# Store the measurement result
ecycle = self.scheduler.mhp_cycle_number
self.measurement_results[aid].append((ecycle, outcome, basis))
logger.debug("Adding measurement outcome {} in basis {} for aid {}".format(outcome, basis, aid))
# If we received a reply for this attempt during measurement we can handle it immediately
if self.mhp_reply:
self.handle_reply_mhp(self.mhp_reply)
self.mhp_reply = None
elif self.scheduler.previous_request(aid):
logger.debug("Handling | |
# coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CouponbookApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_coupon_book(self, coupon_book, **kwargs):
"""
Create a coupon-book.
{\"nickname\":\"Create a new coupon book\",\"request\":\"createCouponBookRequest.html\",\"response\":\"createCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_coupon_book(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be created. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_coupon_book_with_http_info(coupon_book, **kwargs)
else:
(data) = self.create_coupon_book_with_http_info(coupon_book, **kwargs)
return data
def create_coupon_book_with_http_info(self, coupon_book, **kwargs):
"""
Create a coupon-book.
{\"nickname\":\"Create a new coupon book\",\"request\":\"createCouponBookRequest.html\",\"response\":\"createCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_coupon_book_with_http_info(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be created. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_coupon_book" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book' is set
if ('coupon_book' not in params) or (params['coupon_book'] is None):
raise ValueError("Missing the required parameter `coupon_book` when calling `create_coupon_book`")
resource_path = '/coupon-books'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'coupon_book' in params:
body_params = params['coupon_book']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_coupon_book(self, coupon_book_id, **kwargs):
"""
Retire a coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Delete coupon book\",\"response\":\"deleteCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_coupon_book(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_coupon_book_with_http_info(coupon_book_id, **kwargs)
else:
(data) = self.delete_coupon_book_with_http_info(coupon_book_id, **kwargs)
return data
def delete_coupon_book_with_http_info(self, coupon_book_id, **kwargs):
"""
Retire a coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Delete coupon book\",\"response\":\"deleteCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_coupon_book_with_http_info(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_coupon_book" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book_id' is set
if ('coupon_book_id' not in params) or (params['coupon_book_id'] is None):
raise ValueError("Missing the required parameter `coupon_book_id` when calling `delete_coupon_book`")
resource_path = '/coupon-books/{coupon-book-ID}'.replace('{format}', 'json')
path_params = {}
if 'coupon_book_id' in params:
path_params['coupon-book-ID'] = params['coupon_book_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_attachable_coupon_books(self, attachableness, has_code, **kwargs):
"""
Returns a collection of attachable coupon-books. An attachable coupon-book has at least one remaining use, and is not deleted. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all attachable coupon books\",\"response\":\"getCouponBookAllAttachable.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_attachable_coupon_books(attachableness, has_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool attachableness: The attachableness of the coupon-book. (required)
:param bool has_code: Whether the coupon-books have book codes or not. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, **kwargs)
else:
(data) = self.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, **kwargs)
return data
def get_all_attachable_coupon_books_with_http_info(self, attachableness, has_code, **kwargs):
"""
Returns a collection of attachable coupon-books. An attachable coupon-book has at least one remaining use, and is not deleted. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all attachable coupon books\",\"response\":\"getCouponBookAllAttachable.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool attachableness: The attachableness of the coupon-book. (required)
:param bool has_code: Whether the coupon-books have book codes or not. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['attachableness', 'has_code', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_attachable_coupon_books" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'attachableness' is set
if ('attachableness' not in params) or (params['attachableness'] is None):
| |
#!/usr/bin/env python
"""Utility functions and classes for creating mixture models"""
########################################################################
# File: mixture_model.py
#
# Author: <NAME>
# History: 1/7/19 Created
########################################################################
import os
import sys
import numpy as np
import pandas as pd
from argparse import ArgumentParser
from sklearn.mixture import GaussianMixture
from timeit import default_timer as timer
from py3helpers.utils import load_json, create_dot_dict
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from signalalign.hiddenMarkovModel import HmmModel, parse_assignment_file, parse_alignment_file
from signalalign.utils.sequenceTools import get_motif_kmers, find_modification_index_and_character
import tempfile
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn.datasets.samples_generator import make_blobs
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--config', '-c', required=True, action='store',
dest='config', type=str, default=None,
help="Path to config file")
args = parser.parse_args()
return args
def get_nanopore_gauss_mixture(event_means, n_models):
"""Create a mixture model from an array of event means and fit some number of models to the data
:param event_means: array of event means
:param n_models: number of gaussians to fit
"""
model = GaussianMixture(n_models).fit(event_means)
assert model.converged_, "Model has not converged"
return model
def find_best_1d_gaussian_fit(x, max_n, aic=True):
"""
:param x: input data
:param max_n: max number of gaussians to try to fit to data
:param aic: boolean option to use aic or bic. if false use bic as selection criterion
:return:
"""
N = np.arange(1, max_n)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(x)
# use AIC or BIC for model selection
if aic:
aic = [m.aic(x) for m in models]
m_best = models[np.argmin(aic)]
else:
bic = [m.bic(x) for m in models]
m_best = models[np.argmin(bic)]
return m_best
def get_mus_and_sigmas_1d(gaussian_model):
"""Get the mean and stdv of each normal curve given a GaussianMixture model
:param gaussian_model: an already converged GaussianMixture model
:return: list of tuples with tup[0] = mu and tup[1] = sigma
"""
assert gaussian_model.converged_, "Model has not converged"
normals = []
for i, mu in enumerate(gaussian_model.means_):
assert len(gaussian_model.covariances_[i]) == 1, "This function only works for 1D gaussian mixture models"
sigma = np.sqrt(gaussian_model.covariances_[i][0])
# sigma = sigma / gaussian_model.weights_[i]
normals.append((mu, sigma))
return normals
def closest_to_canonical(mixture_normals, canonical_mu):
"""Find the normal distribution closet to canonical mu"""
min_index = 0
min_distance = 1000
for i in range(len(mixture_normals)):
mu = mixture_normals[i][0]
distance = abs(mu - canonical_mu)
if distance < min_distance:
min_index = i
min_distance = distance
match = mixture_normals.pop(min_index)
return match, mixture_normals, min_distance
def fit_model_to_kmer_dist(all_assignments, kmer, n_normals=2):
"""Return a mixture model from the distribution of event means for a given kmer
:param all_assignments: master table of assignments (must have fields "k-mer" and "descaled_event_mean"
:param kmer: str that must be in the assignments table
:param n_normals: number of normal gaussians to fit to distirbution
"""
samples = all_assignments[all_assignments["kmer"] == kmer]["level_mean"].values.reshape(-1, 1)
model = False
if len(samples) == 0:
print("No alignments found for kmer: {}".format(kmer))
else:
model = get_nanopore_gauss_mixture(samples, n_normals)
return model
def generate_gaussian_mixture_model_for_motifs(model_h, assignments, all_kmer_pars, strand,
output_dir, plot=False, name="", target_model=None, show=False):
"""Generate new hmm model using mixture model of assignment data for each required kmer given the set of motifs
:param model_h: HmmModel
:param strand: 't' for template or 'c' for complement
:param plot: plot model data
:param assignments: assignment DataFrame with "strand", "kmer" and "level_mean"
:param all_kmer_pars: list of list of [canonical, modified] kmers
:param output_dir: path to save figures, models and log file
:param name: optional argument for naming the mixture model
:param target_model: use for plotting expected distribution for modified kmer
"""
assert strand in ('t', 'c'), "Strand must be either 'c' or 't'. strand = {}".format(strand)
assignments = assignments[assignments["strand"] == strand]
canonical_mixture_components_comparison = []
if name is not "":
name += "_"
output_model_path = os.path.join(output_dir, "{}_{}mixture_model.hmm".format(strand, name))
for kmer_pair in all_kmer_pars:
old_kmer = kmer_pair[0]
new_kmer = kmer_pair[1]
# fit
mixture_model = fit_model_to_kmer_dist(assignments, old_kmer, n_normals=2)
if mixture_model:
mixture_normals = get_mus_and_sigmas_1d(mixture_model)
kmer_mean, kmer_sd = model_h.get_event_mean_gaussian_parameters(old_kmer)
match, other, distance = closest_to_canonical(mixture_normals, kmer_mean)
# set parameters
model_h.set_kmer_event_mean(new_kmer, other[0][0][0])
model_h.set_kmer_event_sd(new_kmer, other[0][1][0])
canonical_mixture_components_comparison.append(
[old_kmer, kmer_mean, kmer_sd, match[0][0], match[1][0], other[0][0][0],
other[0][1][0], distance[0], strand])
print(old_kmer, mixture_normals)
if plot:
# model_h.plot_kmer_distributions([old_kmer, new_kmer],
# alignment_file_data=assignments,
# savefig_dir=output_dir,
# name=strand)
plot_output_dir = output_dir
if show:
plot_output_dir = None
plot_mixture_model_distribution(old_kmer, new_kmer, kmer_mean, kmer_sd, match[0][0], match[1][0],
other[0][0][0], other[0][1][0], strand, mixture_model=mixture_model,
kmer_assignments=assignments,
save_fig_dir=plot_output_dir,
target_model=target_model)
model_h.normalize(False, False)
model_h.write(output_model_path)
data = pd.DataFrame(canonical_mixture_components_comparison, columns=["kmer",
"canonical_model_mean",
"canonical_model_sd",
"canonical_mixture_mean",
"canonical_mixture_sd",
"modified_mixture_mean",
"modified_mixture_sd",
"distance",
"strand"])
data.sort_values("distance", inplace=True, ascending=False)
log_file = os.path.join(output_dir, "{}_distances.tsv".format(strand))
data.to_csv(log_file, sep="\t", index=False)
return data
def get_motif_kmer_pairs(motif_pair, k, alphabet="ATGC"):
"""Given a motif pair, create a list of all kmers which contain modification """
all_kmer_pars = []
motif_kmers = get_motif_kmers(motif_pair, k, alphabet=alphabet)
pos, old_char, new_char = find_modification_index_and_character(motif_pair[0], motif_pair[1])
for new_kmer in motif_kmers:
# get original kmer
pos = new_kmer.find(new_char)
old_kmer = new_kmer[0:pos] + old_char + new_kmer[pos + 1:]
all_kmer_pars.append([old_kmer, new_kmer])
return all_kmer_pars
def plot_mixture_model_distribution(canonical_kmer, modified_kmer, canonical_model_mean, canonical_model_sd,
canonical_mixture_mean,
canonical_mixture_sd, modified_mixture_mean, modified_mixture_sd,
strand, mixture_model=None, target_model=None,
kmer_assignments=None, save_fig_dir=None):
"""Plot normal distributions from mixture model and compare with original canonical model
:param canonical_model_mean: canonical_model_mean
:param canonical_model_sd: canonical_model_sd
:param canonical_mixture_mean: canonical_mixture_mean
:param canonical_mixture_sd: canonical_mixture_sd
:param modified_mixture_mean: modified_mixture_mean
:param modified_mixture_sd: modified_mixture_sd
:param save_fig_dir: optional path to save figure
:param strand: template or complement ('t' or 'c')
:param canonical_kmer: kmer to plot
:param modified_kmer: modified kmer
:param target_model: model to compare the mixture to
:param mixture_model: an already fit GaussianMixture model
:param kmer_assignments: assignments with ("level_mean" and "kmer") named columns of DataFrame
"""
fig = plt.figure(figsize=(12, 8))
panel1 = plt.axes([0.1, 0.1, .6, .8])
panel1.set_xlabel('pA')
panel1.set_ylabel('Density')
panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)
panel1.set_title("Mixture Model Comparison: {}".format(canonical_kmer))
# original canonical model
x = np.linspace(canonical_model_mean - 4 * canonical_model_sd, canonical_model_mean + 4 * canonical_model_sd, 200)
panel1.plot(x, norm.pdf(x, canonical_model_mean, canonical_model_sd), label="{} ONT model".format(canonical_kmer))
# selected mixture model for canonical kmer
x = np.linspace(canonical_mixture_mean - 4 * canonical_mixture_sd,
canonical_mixture_mean + 4 * canonical_mixture_sd, 200)
panel1.plot(x, norm.pdf(x, canonical_mixture_mean, canonical_mixture_sd), label="{} mixture".format(canonical_kmer))
# selected mixture model for modified kmer
x = np.linspace(modified_mixture_mean - 4 * modified_mixture_sd,
modified_mixture_mean + 4 * modified_mixture_sd, 200)
panel1.plot(x, norm.pdf(x, modified_mixture_mean, modified_mixture_sd), label="{} mixture".format(modified_kmer))
x_min = min([canonical_mixture_mean - 4 * canonical_mixture_sd, modified_mixture_mean - 4 * modified_mixture_sd,
canonical_model_mean - 4 * canonical_model_sd])
x_max = max([canonical_mixture_mean + 4 * canonical_mixture_sd, modified_mixture_mean + 4 * modified_mixture_sd,
canonical_model_mean + 4 * canonical_model_sd])
panel1.set_xlim(x_min, x_max)
if mixture_model is not None:
x = np.linspace(x_min, x_max, 1000).reshape(1000, 1)
responsibilities = mixture_model.predict_proba(x)
logprob = mixture_model.score_samples(x)
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
panel1.plot(x, pdf, '-k', label="mixture pdf")
panel1.plot(x, pdf_individual, '--k', label="individual pdf")
if kmer_assignments is not None:
kmer_assignments = kmer_assignments.loc[kmer_assignments['kmer'] == canonical_kmer]
kmer_data = kmer_assignments["level_mean"]
# get event means and linspace in correct format
x = np.asarray(kmer_data).reshape(len(kmer_data), 1)
x_plot = np.linspace(x_min, x_max, 200)[:, np.newaxis]
# get estimate for data
if len(kmer_data) > 0:
kde = KernelDensity(kernel="gaussian", bandwidth=0.5).fit(x)
# estimate across the linspace
log_dens = kde.score_samples(x_plot)
panel1.plot(x_plot[:, 0], np.exp(log_dens), '-', label="Gaussian KDE Estimate")
panel1.plot(x[:, 0], -0.005 - 0.01 * np.random.random(x.shape[0]), '+k',
label="Event Means: {} points".format(len(kmer_data)))
if target_model is not None:
if target_model.has_hdp_model:
# plot HDP predicted distribution
kmer_id = target_model.get_kmer_index(modified_kmer)
x = target_model.linspace
hdp_y = target_model.all_posterior_pred[kmer_id]
if len(hdp_y) == len(x):
panel1.plot(x, hdp_y, '-', label="{} HDP Distribution".format(modified_kmer))
else:
normal_mean, normal_sd = target_model.get_event_mean_gaussian_parameters(modified_kmer)
x = np.linspace(normal_mean - 4 * normal_sd, normal_mean + 4 * normal_sd, 200)
panel1.plot(x, norm.pdf(x, normal_mean, normal_sd), label="{} ONT Distribution".format(modified_kmer))
panel1.legend(loc='upper right', fancybox=True, shadow=True)
# option to save figure or just show it
if save_fig_dir:
out_name = "{}_{}_{}_{}.png".format(canonical_kmer, modified_kmer, strand, "mixture_model")
out_path = os.path.join(save_fig_dir, out_name)
plt.savefig(out_path)
else:
plt.show()
plt.close(fig)
def main(config=None):
"""Plot event to reference labelled ONT nanopore reads"""
start = timer()
if config is None:
args = parse_args()
# load model files
assert os.path.exists(args.config), "Config file does not exist: {}".format(args.config)
config = load_json(args.config)
args = create_dot_dict(config)
# get assignments and load model
try:
assignments = parse_assignment_file(args.assignments)
except ValueError:
assignments = parse_alignment_file(args.assignments)
model_h = HmmModel(args.model_path, rna=args.rna)
target_model = None
if args.target_hmm_model is not None:
target_model = HmmModel(args.target_hmm_model, hdp_model_file=args.target_hdp_model, rna=args.rna)
# generate kmers to match
all_kmer_pairs = set()
for motif in args.motifs:
all_kmer_pairs |= set(tuple(row) for row in get_motif_kmer_pairs(motif_pair=motif, k=model_h.kmer_length))
data = generate_gaussian_mixture_model_for_motifs(model_h, assignments, all_kmer_pairs, args.strand,
args.output_dir, plot=args.plot, name="ccwgg",
target_model=target_model, show=args.show)
# data = pd.read_csv(os.path.join(args.output_dir, "t_distances.tsv"), delimiter="\t")
# data = data.ix[0]
# plot_mixture_model_distribution(data["kmer"], data["canonical_model_mean"], data["canonical_model_sd"],
# data["canonical_mixture_mean"],
# data["canonical_mixture_sd"], data["modified_mixture_mean"],
# data["modified_mixture_sd"],
# data["strand"], kmer_assignments=assignments, save_fig_dir=None)
stop = timer()
print("Running Time = {} seconds".format(stop - start), file=sys.stderr)
##################################################
##################################################
##################################################
# t_base_model = "/Users/andrewbailey/data/ccwgg_new_em_trained_model/all_models/template_trained.hmm"
# c_base_model = "/Users/andrewbailey/data/ccwgg_new_em_trained_model/all_models/complement_trained.hmm"
# built_model = | |
299792458. # speed of light in m/s
ceq = spfit.spfitPrediction(fname = fegy)
ceq.read_egy()
ceq.read_cat()
self.egy_mhz = ceq.egy_content['egy']*c*100./1.e6
self.egy_qn = ceq.egy_content['qn']
self.cat_qn = ceq.cat_content['qn']
self.cat_mhz = ceq.cat_content['freq']
self.cat_logint = ceq.cat_content['lgint']
def allowedtrans(self, J = 2, ka = 0, kc = 2):
'''
Looks for allowed transitions from the input.
Parameters
----------
J (int): J quantum number of requested level (default = 2)
ka (int): ka quantum number of requested level (default = 0)
kc (int): kc quantum number of requested level (default = 2)
Returns
-------
atype
btype
ctype
Notes
-----
none
'''
if (len(self.egy_mhz)<3):
self.readegy()
Jto=J-1
if (Jto<0):
Jto=0
atype = np.array([])
btype = np.array([])
ctype = np.array([])
lvls = []
Jto = Jto-1
while (Jto<J+1):
Jto = Jto + 1
for kato in np.arange (0,Jto+1):
kcto = Jto - kato
if (kcto < Jto+1):
if (len(lvls) > 1):
lvls = np.vstack((lvls,[Jto,kato,kcto]))
if (len(lvls) < 1):
lvls = [Jto,kato,kcto]
kcto = Jto + 1 - kato
if (kcto < Jto+1):
if (len(lvls) > 1):
lvls = np.vstack((lvls,[Jto,kato,kcto]))
if (len(lvls) < 1):
lvls = [Jto,kato,kcto]
for x in lvls:
Jt = x[0]
kat = x[1]
kct = x[2]
for y in np.arange(len(self.egy_mhz)):
Je = int(self.egy_qn[y][0:3])
kae = int(self.egy_qn[y][3:7])
kce = int(self.egy_qn[y][7:10])
egt = 0.0
if ((Je == Jt) and (kae == kat) and (kce == kct)):
egt = self.egy_mhz[y]
break
if ((abs(Jt-J)<2) and (np.mod(abs(kat-ka),2)==0) and (np.mod(abs(kct-kc),2)!=0)):
if (len(atype) > 1):
atype = np.vstack((atype,[Jt, kat, kct, egt]))
if (len(atype) < 1):
atype = [Jt,kat,kct, egt]
elif ((abs(Jt-J)<2) and (np.mod(abs(kat-ka),2)!=0) and (np.mod(abs(kct-kc),2)!=0)):
if (len(btype) > 1):
btype = np.vstack((btype,[Jt, kat, kct, egt]))
if (len(btype) < 1):
btype = [Jt,kat,kct,egt]
elif ((abs(Jt-J)<2) and (np.mod(abs(kat-ka),2)!=0) and (np.mod(abs(kct-kc),2)==0)):
if (len(ctype) > 1):
ctype = np.vstack((ctype,[Jt, kat, kct, egt]))
if (len(ctype) < 1):
ctype = [Jt,kat,kct,egt]
return [atype,btype,ctype]
def checktranstype(self,Jl,kal,kcl,Ju,kau,kcu):
'''
Determines whether a transition is a-, b-, or c-type or forbidden (f).
return type = a, b, c, or f (string), where f means forbidden.
'''
if (abs(Jl-Ju)<2) and np.mod(abs(kal-kau),2)==0 and np.mod(abs(kcu-kcl),2)!=0:
return 'a'
elif (abs(Jl-Ju)<2) and np.mod(abs(kal-kau),2)!=0 and np.mod(abs(kcu-kcl),2)!=0:
return 'b'
elif (abs(Jl-Ju)<2) and np.mod(abs(kal-kau),2)!=0 and np.mod(abs(kcu-kcl),2)==0:
return 'c'
else:
return 'f'
def getlevelenergy(self,J,ka,kc):
'''
Search for and return the energy of a level as given in the .egy file.
'''
if (len(self.egy_mhz)<3):
self.readegy()
for y in np.arange(len(self.egy_mhz)):
Je = int(self.egy_qn[y][0:3])
kae = int(self.egy_qn[y][3:7])
kce = int(self.egy_qn[y][7:10])
if (abs(Je-J)<.5) and (abs(kae-ka)<0.5) and (abs(kce-kc)<0.5):
return self.egy_mhz[y]
def searchintensity(self,quantumnumbers):
'''
Get the intensities for a transtion from the .cat file.
quantnumbers is assumed to have the format:
[[Jlower, kalower, kclower, Jupper, kaupper, kcupper, transition energy]]
'''
if (len(self.cat_qn)<3):
self.readegy()
for x in quantumnumbers:
Jl = int(x[0])
kal = int(x[1])
kcl = int(x[2])
Ju = int(x[3])
kau = int(x[4])
kcu = int(x[5])
for y in np.arange(len(self.cat_qn)):
Ji = int(self.cat_qn[y][0:2])
kai = int(self.cat_qn[y][2:4])
kci = int(self.cat_qn[y][4:6])
Jf = int(self.cat_qn[y][12:14])
kaf = int(self.cat_qn[y][14:16])
kcf = int(self.cat_qn[y][16:18])
#print Jl,kal,kcl,Ju,kau,kcu
#print Ji,kai,kci,Jf,kaf,kcf
if Jl==Ji and kal==kai and kcl==kci and Ju==Jf and kau==kaf and kcu==kcf:
return self.cat_logint[y]
elif Jl==Jf and kal==kaf and kcl==kcf and Ju==Ji and kau==kai and kcu==kci:
return self.cat_logint[y]
elif (y==len(self.cat_qn)):
return -100.
def searchlisdrv(self,drvt,tl,xl,xu,listenmax,listenmin,drivemax,drivemin):
'''
To Do
- Add intensity information
- Duplicate cycle checking and removing
Checks if potential drive and listen transitions have the appropriate types
and transition energies within the requested limits. It puts found cycles
into the appropriate attribute.
Please note: For three wave mixing cycle searches, it is expected that the
transition type of the twist (xl -> xu) should differ from that of the lower
twist level to the inputted rotational levels in tl (xl -> tl), and that the
drive type transitions (drvt) should be of the third transition type. For example,
if (xu -> xl) is a-type and the drvt = 'b', then all (xl -> tl) should be c-type.
Inputs
------
drvt : the desired type for the drive transition, a, b, or c (str)
tl : array containing quantum numbers and energy of a rotational level.
Note that tl can contain multiple rows.
tl = [[J, ka, kc, level energy]]
xl : array containing the quantum numbers of the lower level of the
desired twist transition as well as the transition energy of
the twist transition. xl is a single row array.
xl = [transition energy, J, ka, kc]
xu : array containing the quantum numbers of the upper level of the
desired twist transition. xu is a single row array.
xu = [J, ka, kc]
listenmax : The *max and *min variables are scalars that set the transition
listenmin energy limits for searching for a cycle.
drivemax
drivemin
Returns
-------
None
'''
twst = np.array([])
lstn = np.array([])
drve = np.array([])
for y in tl:
egl = y[3]-self.getlevelenergy(xl[1],xl[2],xl[3])
if abs(egl) <= listenmax and abs(egl) >= listenmin:
drvtyp = self.checktranstype(y[0],y[1],y[2],xu[0],xu[1],xu[2])
if drvtyp == drvt:
# Check if transition energy is within limits
egd = self.getlevelenergy(y[0],y[1],y[2])-self.getlevelenergy(xu[0],xu[1],xu[2])
if (abs(egd) <= drivemax) and abs(egd) >= drivemin:
# Cycle found, get the intensities.
inttw = self.searchintensity(quantumnumbers = [[xl[1],xl[2],xl[3],xu[0],xu[1],xu[2],xl[0]]])
intls = self.searchintensity(quantumnumbers = [[xl[1],xl[2],xl[3],y[0],y[1],y[2],egl]])
intdr = self.searchintensity(quantumnumbers = [[y[0],y[1],y[2],xu[0],xu[1],xu[2],egd]])
if (len(twst)>1):
twst = np.vstack((twst,[xl[1],xl[2],xl[3],xu[0],xu[1],xu[2],xl[0],inttw]))
lstn = np.vstack((lstn,[xl[1],xl[2],xl[3],y[0],y[1],y[2],egl,intls]))
drve = np.vstack((drve,[y[0],y[1],y[2],xu[0],xu[1],xu[2],egd,intdr]))
if len(twst) < 2:
twst = [xl[1],xl[2],xl[3],xu[0],xu[1],xu[2],xl[0],inttw]
lstn = [xl[1],xl[2],xl[3],y[0],y[1],y[2],egl,intls]
drve = [y[0],y[1],y[2],xu[0],xu[1],xu[2],egd,intdr]
# Put the found cycles, if any, into the appropriate attributes.
if (len(self.twists)>1) and len(twst) > 2:
self.twists = np.vstack((self.twists,twst))
self.listens = np.vstack((self.listens,lstn))
self.drives = np.vstack((self.drives,drve))
elif (len(self.twists)<2) and len(twst) > 2:
self.twists = twst
self.listens = lstn
self.drives = drve
def threewavesearch(self,rfmin=50.,rfmax=560.,drivemin=2000.,drivemax=6000.,
listenmin=2000.,listenmax=6560.,Jmin=0,Jmax=7):
'''
Looks for threewave transitions.
Parameters
----------
rfmin, rfmax : Minimum and maximum RF (twist) frequencies
drivemin, drive max : Minimum and Maximum drive frequencies.
listenmin, listenmax : Minimum and Maximum listen frequencies.
Jmin, Jmax : Minimum and Maximum J quantum numbers to consider in cycles.
Returns
-------
none
Notes
-----
none
'''
# First, check if the .egy file has been read.
if (len(self.egy_mhz)<3):
self.readegy()
self.rfmin = rfmin
self.rfmax = rfmax
self.dmin = drivemin
self.dmax = drivemax
self.lmin = listenmin
self.lmax = listenmax
self.Jmin = Jmin
self.Jmax = Jmax
# Now search for possible twist transitions.
twists = np.array([])
for x in np.arange(len(self.cat_mhz)):
if ((self.cat_mhz[x] >= rfmin) and (self.cat_mhz[x] <= rfmax)
and (int(self.cat_qn[x][0:2]) >= Jmin)
and (int(self.cat_qn[x][12:14]) <= Jmax)):
tegy = self.cat_mhz[x]
tJi = int(self.cat_qn[x][0:2])
tkai = int(self.cat_qn[x][2:4])
tkci = int(self.cat_qn[x][4:6])
tJf = int(self.cat_qn[x][12:14])
tkaf = int(self.cat_qn[x][14:16])
tkcf = int(self.cat_qn[x][16:18])
if (len(twists)>1):
twists = np.vstack((twists,[tegy,tJi,tkai,tkci,tJf,tkaf,tkcf,self.cat_logint[x]]))
elif (len(twists)<1):
twists = [tegy,tJi,tkai,tkci,tJf,tkaf,tkcf,self.cat_logint[x]]
self.cantwists = twists
for x in twists:
# check twist transition type
trtyp = self.checktranstype(x[1],x[2],x[3],x[4],x[5],x[6])
# Look for transitions from twist levels
[atl,btl,ctl] = self.allowedtrans(x[1],x[2],x[3])
[atu,btu,ctu] = self.allowedtrans(x[4],x[5],x[6])
atl = np.vstack((atl,atu))
btl = np.vstack((btl,btu))
ctl = np.vstack((ctl,ctu))
# For a-type twists, give either the b or c-type allowed transitions and look
# for c or b-type drives, respectively.
if trtyp == 'a':
# For b-type listens
self.searchlisdrv('c',btl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
# for c-type listens
self.searchlisdrv('b',ctl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
# For b-type twists, give either the a or c-type allowed transitions and look
# for c or a-type drives, respectively.
if trtyp == 'b':
# For a-type listens
self.searchlisdrv('c',atl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
# for c-type listens
self.searchlisdrv('a',ctl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
# For c-type twists, give either the b or a-type allowed transitions and look
# for a or b-type drives, respectively.
if trtyp == 'c':
# For b-type listens
self.searchlisdrv('a',btl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
# for c-type listens
self.searchlisdrv('b',atl,x[0:4],x[4::1],listenmax,listenmin,drivemax,drivemin)
def plot(self,J1=1,ka1=0,kc1=1,J2=2,ka2=0,kc2=2,J3=2,ka3=1,kc3=1,cycle_num=1e9):
'''
Plot the cycle specified by the quantum numbers or the cycle_num param.
The quantum numbers are given as groups of 3 as Ji,kai,kci for the
i = 1, 2, and 3 levels. Alteratively, the parameter cycle_num may be given,
which corresponds to the index of the desired cycle contained in | |
<reponame>gonzalesMK/MetaHeuristic<filename>feature_selection/meta_base.py<gh_stars>10-100
from abc import ABCMeta
from warnings import warn
from itertools import compress
from random import sample
import random
import numpy as np
from sklearn.base import BaseEstimator, MetaEstimatorMixin, TransformerMixin, clone
from sklearn.model_selection import cross_val_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import make_scorer
from sklearn.utils.validation import check_array, check_is_fitted, column_or_1d
import six
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils import check_random_state
from sklearn.svm import SVC
from deap import base
from deap import tools
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_X_y
import copy
from timeit import time
from collections import Counter
class Fitness(base.Fitness):
def __init__(self, weights=(1, -1e-5), values=(0, 0)):
self.weights = weights
super(Fitness, self).__init__(values)
class BaseMask(list, object):
def __init__(self, mask):
self[:] = mask
self.fitness = Fitness((1, -1e-5), (0, 0))
class SelectorMixin(six.with_metaclass(ABCMeta, TransformerMixin)):
"""
Transformer mixin that performs feature selection given a support mask
This mixin provides a feature selector implementation with `transform` and
`inverse_transform` functionality given an implementation of
`_get_best_mask_mask`.
"""
@staticmethod
def safe_mask(x, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.unsignedinteger) or np.issubdtype(mask.dtype, np.signedinteger) or np.issubdtype(np.dtype(mask.dtype).type, np.dtype(np.bool).type):
if x.shape[1] != len(mask):
raise ValueError("X columns %d != mask length %d"
% (x.shape[1], len(mask)))
else:
raise ValueError("Mask type is {} not allowed".format(mask.dtype))
return mask
def get_support(self, indices=False):
"""
Get a mask, or integer index, of the features selected
Parameters
----------
indices : boolean (default False)
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
"""
mask = self._get_best_mask()
mask = mask if not indices else np.where(mask)[0]
return np.asarray(mask, dtype=np.bool)
def _get_best_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
check_is_fitted(self, 'best_')
return np.asarray(self.best_[0][:], dtype=bool)
def transform(self, X, mask=None):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
X = check_array(X, accept_sparse='csr')
if mask is None:
mask = self.get_support()
if not mask.any():
warn("No features were selected: either the data is"
" too noisy or the selection test too strict.",
UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, self.safe_mask(X, mask)]
class _BaseMetaHeuristic(BaseEstimator, SelectorMixin, MetaEstimatorMixin):
def __init__(self, estimator=None, number_gen=20,
verbose=0, repeat=1, parallel=False,
make_logbook=False, random_state=None,
cv_metric_function=make_scorer(matthews_corrcoef),
):
self.estimator = estimator
self.number_gen = number_gen
self.verbose = verbose
self.repeat = repeat
self.parallel = parallel
self.make_logbook = make_logbook
self.random_state = random_state
self.cv_metric_function = cv_metric_function
np.random.seed(self.random_state)
def _gen_in(self):
"""
Generate a individual, DEAP function
"""
random_number = self._random_object.randint(1, self.n_features_ + 1)
zeros = (np.zeros([self.n_features_-random_number, ], dtype=int))
ones = np.ones([random_number, ], dtype=int)
return sample(list(np.concatenate((zeros, ones), axis=0)), self.n_features_)
def _evaluate(self, individual, X, y, cv=2):
"""
Evaluate method. Each individual is a mask of features.
Given one individual, train the estimator on the dataset and get the scores
Parameters
----------
individual: list [n_features]
The input individual to be evaluated
Return
----------
Score of the individual : turple( cross valid score, feature length score)
"""
# Select Features
features = list(compress(range(len(individual)), individual))
train = np.reshape([X[:, i] for i in features],
[len(features), len(X)]).T
if train.shape[1] == 0:
return 0, 1,
if individual[:] in self._all_solutions:
return self.all_solutions[individual[:]]
# Applying K-Fold Cross Validation
accuracies = cross_val_score(estimator=clone(self._estimator), X=train,
y=y, cv=cv,
scoring=self.cv_metric_function)
if hasattr(self, 'features_metric_function'):
if self.features_metric_function == None:
feature_score = sum(individual) / len(individual)
else:
feature_score = self.features_metric_function(individual)
else:
feature_score = sum(individual) / len(individual)
self._all_solutions[individual[:]] = (accuracies.mean(), feature_score)
return accuracies.mean(), feature_score
def predict(self, X):
if not hasattr(self, "classes_"):
raise ValueError('fit')
if self.normalize_:
X = self._sc_X.fit_transform(X)
X_ = self.transform(X)
y_pred = self._estimator.predict(X_)
return self.classes_.take(np.asarray(y_pred, dtype=np.intp))
# elif self.predict_with == 'all':
#
# predict_ = []
#
# for mask in self.mask_:
# self.estimator.fit(X=self.transform(self.X_, mask=mask), y=self.y_)
# X_ = self.transform(X, mask=mask)
# y_pred = self.estimator.predict(X_)
# predict_.append(self.classes_.take(np.asarray(y_pred, dtype=np.intp)))
# return np.asarray(predict_)
'''
@staticmethod # is different, but I don't think that we use it anyway
def score_func_to_gridsearch(estimator, X_test=None, y_test=None):
Function to be given as a scorer function to Grid Search Method.
It is going to transform the matrix os predicts generated by 'all' option
to an final accuracy score. Use a high value to CV
if not hasattr(estimator, 'fitnesses_'):
raise ValueError("Fit")
return sum([ i[0]-i[1] for i in estimator.fitnesses_]) / float(len(estimator.fitnesses_))
'''
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
if len(cls) < 1:
# print(y)
raise ValueError("The number of classes has to be at least one;"
"got %d" % len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def fit_transform(self, X, y, normalize=False, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, normalize, **fit_params).transform(X)
#@staticmethod
#def _get_accuracy(ind):
# return ind.fitness.values[0]
@staticmethod
def _get_features(ind):
return sum(ind)
def __getstate__(self):
self_dict = self.__dict__.copy()
if '_toolbox' in self_dict:
del self_dict['_toolbox']
if 'print_fnc' in self_dict:
del self_dict['print_fnc']
if 'stats' in self_dict:
del self_dict['stats']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
def _make_stats(self):
stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values[0])
stats_size = tools.Statistics(key=sum)
self.stats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
self.stats.register("avg", np.mean)
self.stats.register("std", np.std)
self.stats.register("min", np.min)
self.stats.register("max", np.max)
self.stats.register("25_percentile", np.percentile, q=25, interpolation='higher')
self.stats.register("50_percentile", np.percentile, q=50, interpolation='higher')
self.stats.register("75_percentile", np.percentile, q=75, interpolation='higher')
self.logbook = [tools.Logbook() for i in range(self.repeat)]
for i in range(self.repeat):
self.logbook[i].header = "gen", 'hallOfFame', "paretoFront", "time", "fitness", "size"
for i in range(self.repeat):
self.logbook[i].chapters["fitness"].header = self.stats.fields
self.logbook[i].chapters["size"].header = self.stats.fields
def _set_dataset(self, X, y, normalize):
""" Standard datset pre-process:
Using standardScaler()
Validating/setting the correct size of X and y
Validating the number of classes of y
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X : numpy array of shape [n_samples, n_features_new]
Training set pre-processed
y : numpy array of shape [n_samples]
Training set pre-processed
"""
if normalize:
X = X.astype(np.float64, copy=False)
self._sc_X = StandardScaler()
X = np.asarray(X, dtype=np.float64)
X = self._sc_X.fit_transform(X)
self.normalize_ = normalize
y = self._validate_targets(y)
X, y = check_X_y(X, y, dtype=np.float64,
order='C', accept_sparse='csr')
self.n_features_ = X.shape[1]
return X, y
def _set_fit(self):
pass
def _make_generation_log(self, gen, repetition, pop, hof, pareto_front):
self.i_gen_pareto_.append(copy.deepcopy(pareto_front))
self.i_gen_hof_.append(copy.deepcopy(hof))
record = self.stats.compile(pop)
self.logbook[repetition].record(gen=gen, hallOfFame=copy.deepcopy(hof), paretoFront=copy.deepcopy(pareto_front), time=time.clock(), **record)
if self.verbose:
self._toolbox.print("********* Report {} ************* ".format(type(self).__name__), end='\n\n')
self._toolbox.print("\tmin\t25%\t50%\t75%\tmax")
self._toolbox.print("Score:\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}".format( record['fitness']['min'], record['fitness']['25_percentile'],record['fitness']['50_percentile'],record['fitness']['75_percentile'], record['fitness']['max']))
self._toolbox.print("Size:\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}".format( record['size']['min'], record['size']['25_percentile'],record['size']['50_percentile'],record['size']['75_percentile'], record['size']['max']), end='\n\n')
if( hasattr(self, "number_gen") and gen != 0 ):
eta = (time.clock() - self._initial_time) * self.number_gen / (gen+1)
self._toolbox.print("Gen: {}/{} Time Elapsed: {}min {}sec Estimated Time: {} min {} s".format(gen, self.number_gen,
np.floor((time.clock()-self._initial_time)/60),(time.clock()-self._initial_time)%60,
np.floor(eta/60), eta%60))
self._toolbox.print()
def _make_repetition_log(self, hof, pareto_front):
self.best_.update(hof)
self.best_pareto_front_.update(pareto_front)
if self.make_logbook:
self.pareto_front_.append( copy.deepcopy(pareto_front))
self.hof_.append(copy.deepcopy(hof))
self.gen_pareto_.append(self.i_gen_pareto_)
self.gen_hof_.append(self.i_gen_hof_)
self.i_gen_pareto_ = []
self.i_gen_hof_ = []
def _setup(self, X, y, normalize):
" Initialize the toolbox and statistical variables"
if(hasattr(self, '_all_solutions')):
del self._all_solutions
self._all_solutions = dict()
if(not hasattr(self, "_toolbox")):
self._toolbox = base.Toolbox()
if hasattr(self, "print_fnc"):
if (self.print_fnc == None):
self._toolbox.register("print", print)
else:
self._toolbox.register("print", self.print_fnc)
else:
self._toolbox.register("print", print)
if self.parallel:
from multiprocessing import Pool
self._toolbox.register("map", Pool().map)
else:
self._toolbox.register("map", map)
if self.make_logbook:
self._make_stats()
self.pareto_front_ = []
self.hof_ = []
self.gen_hof_ = []
self.gen_pareto_ = []
self.i_gen_hof_ = []
self.i_gen_pareto_ = []
if | |
<reponame>yxzoro/pypy
"""The builtin str implementation"""
from rpython.rlib.objectmodel import (
compute_hash, compute_unique_id, import_from_mixin,
enforceargs)
from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
from rpython.rlib.runicode import (
make_unicode_escape_function, str_decode_ascii, str_decode_utf_8,
unicode_encode_ascii, unicode_encode_utf_8, fast_str_decode_ascii,
unicode_encode_utf8_forbid_surrogates, SurrogateError)
from rpython.rlib import jit
from pypy.interpreter import unicodehelper
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
from pypy.module.unicodedata import unicodedb
from pypy.objspace.std import newformat
from pypy.objspace.std.formatting import mod_format, FORMAT_UNICODE
from pypy.objspace.std.stringmethods import StringMethods
from pypy.objspace.std.util import IDTAG_SPECIAL, IDTAG_SHIFT
__all__ = ['W_UnicodeObject', 'encode_object', 'decode_object',
'unicode_from_object', 'unicode_to_decimal_w']
class W_UnicodeObject(W_Root):
import_from_mixin(StringMethods)
_immutable_fields_ = ['_value']
@enforceargs(uni=unicode)
def __init__(self, unistr):
assert isinstance(unistr, unicode)
self._value = unistr
self._utf8 = None
def __repr__(self):
"""representation for debugging purposes"""
return "%s(%r)" % (self.__class__.__name__, self._value)
def unwrap(self, space):
# for testing
return self._value
def create_if_subclassed(self):
if type(self) is W_UnicodeObject:
return self
return W_UnicodeObject(self._value)
def is_w(self, space, w_other):
if not isinstance(w_other, W_UnicodeObject):
return False
if self is w_other:
return True
if self.user_overridden_class or w_other.user_overridden_class:
return False
s1 = space.unicode_w(self)
s2 = space.unicode_w(w_other)
if len(s2) > 1:
return s1 is s2
else: # strings of len <= 1 are unique-ified
return s1 == s2
def immutable_unique_id(self, space):
if self.user_overridden_class:
return None
s = space.unicode_w(self)
if len(s) > 1:
uid = compute_unique_id(s)
else: # strings of len <= 1 are unique-ified
if len(s) == 1:
base = ~ord(s[0]) # negative base values
else:
base = 257 # empty unicode string: base value 257
uid = (base << IDTAG_SHIFT) | IDTAG_SPECIAL
return space.newint(uid)
def unicode_w(self, space):
return self._value
def text_w(self, space):
try:
identifier = jit.conditional_call_elidable(
self._utf8, g_encode_utf8, self._value)
except SurrogateError as e:
raise OperationError(space.w_UnicodeEncodeError,
space.newtuple([space.newtext('utf-8'),
self,
space.newint(e.index-1),
space.newint(e.index),
space.newtext("surrogates not allowed")]))
if not jit.isconstant(self):
self._utf8 = identifier
return identifier
def listview_unicode(self):
return _create_list_from_unicode(self._value)
def ord(self, space):
if len(self._value) != 1:
raise oefmt(space.w_TypeError,
"ord() expected a character, but string of length %d "
"found", len(self._value))
return space.newint(ord(self._value[0]))
def _new(self, value):
return W_UnicodeObject(value)
def _new_from_list(self, value):
return W_UnicodeObject(u''.join(value))
def _empty(self):
return W_UnicodeObject.EMPTY
def _len(self):
return len(self._value)
_val = unicode_w
@staticmethod
def _use_rstr_ops(space, w_other):
# Always return true because we always need to copy the other
# operand(s) before we can do comparisons
return True
@staticmethod
def _op_val(space, w_other, allow_char=False):
if isinstance(w_other, W_UnicodeObject):
return w_other._value
raise oefmt(space.w_TypeError,
"Can't convert '%T' object to str implicitly", w_other)
def _chr(self, char):
assert len(char) == 1
return unicode(char)[0]
_builder = UnicodeBuilder
def _generic_name(self):
return "str"
def _isupper(self, ch):
return unicodedb.isupper(ord(ch))
def _islower(self, ch):
return unicodedb.islower(ord(ch))
def _isnumeric(self, ch):
return unicodedb.isnumeric(ord(ch))
def _istitle(self, ch):
return unicodedb.isupper(ord(ch)) or unicodedb.istitle(ord(ch))
def _isspace(self, ch):
return unicodedb.isspace(ord(ch))
def _isalpha(self, ch):
return unicodedb.isalpha(ord(ch))
def _isalnum(self, ch):
return unicodedb.isalnum(ord(ch))
def _isdigit(self, ch):
return unicodedb.isdigit(ord(ch))
def _isdecimal(self, ch):
return unicodedb.isdecimal(ord(ch))
def _iscased(self, ch):
return unicodedb.iscased(ord(ch))
def _islinebreak(self, ch):
return unicodedb.islinebreak(ord(ch))
def _upper(self, ch):
return u''.join([unichr(x) for x in
unicodedb.toupper_full(ord(ch))])
def _lower_in_str(self, value, i):
ch = value[i]
if ord(ch) == 0x3A3:
# Obscure special case.
return self._handle_capital_sigma(value, i)
return u''.join([unichr(x) for x in
unicodedb.tolower_full(ord(ch))])
def _title(self, ch):
return u''.join([unichr(x) for x in
unicodedb.totitle_full(ord(ch))])
def _handle_capital_sigma(self, value, i):
# U+03A3 is in the Final_Sigma context when, it is found like this:
#\p{cased} \p{case-ignorable}* U+03A3 not(\p{case-ignorable}* \p{cased})
# where \p{xxx} is a character with property xxx.
j = i - 1
final_sigma = False
while j >= 0:
ch = value[j]
if unicodedb.iscaseignorable(ord(ch)):
j -= 1
continue
final_sigma = unicodedb.iscased(ord(ch))
break
if final_sigma:
j = i + 1
length = len(value)
while j < length:
ch = value[j]
if unicodedb.iscaseignorable(ord(ch)):
j += 1
continue
final_sigma = not unicodedb.iscased(ord(ch))
break
if final_sigma:
return unichr(0x3C2)
else:
return unichr(0x3C3)
def _newlist_unwrapped(self, space, lst):
return space.newlist_unicode(lst)
@staticmethod
def descr_new(space, w_unicodetype, w_object=None, w_encoding=None,
w_errors=None):
encoding, errors = _get_encoding_and_errors(space, w_encoding,
w_errors)
if w_object is None:
w_value = W_UnicodeObject.EMPTY
elif encoding is None and errors is None:
# this is very quick if w_object is already a w_unicode
w_value = unicode_from_object(space, w_object)
else:
if space.isinstance_w(w_object, space.w_unicode):
raise oefmt(space.w_TypeError,
"decoding str is not supported")
w_value = unicode_from_encoded_object(space, w_object,
encoding, errors)
if space.is_w(w_unicodetype, space.w_unicode):
return w_value
assert isinstance(w_value, W_UnicodeObject)
w_newobj = space.allocate_instance(W_UnicodeObject, w_unicodetype)
W_UnicodeObject.__init__(w_newobj, w_value._value)
return w_newobj
@staticmethod
def descr_maketrans(space, w_type, w_x, w_y=None, w_z=None):
y = None if space.is_none(w_y) else space.unicode_w(w_y)
z = None if space.is_none(w_z) else space.unicode_w(w_z)
w_new = space.newdict()
if y is not None:
# x must be a string too, of equal length
ylen = len(y)
try:
x = space.unicode_w(w_x)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise oefmt(space.w_TypeError,
"first maketrans argument must be a string if "
"there is a second argument")
if len(x) != ylen:
raise oefmt(space.w_ValueError,
"the first two maketrans arguments must have "
"equal length")
# create entries for translating chars in x to those in y
for i in range(len(x)):
w_key = space.newint(ord(x[i]))
w_value = space.newint(ord(y[i]))
space.setitem(w_new, w_key, w_value)
# create entries for deleting chars in z
if z is not None:
for i in range(len(z)):
w_key = space.newint(ord(z[i]))
space.setitem(w_new, w_key, space.w_None)
else:
# x must be a dict
if not space.is_w(space.type(w_x), space.w_dict):
raise oefmt(space.w_TypeError,
"if you give only one argument to maketrans it "
"must be a dict")
# copy entries into the new dict, converting string keys to int keys
w_iter = space.iter(space.call_method(w_x, "items"))
while True:
try:
w_item = space.next(w_iter)
except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
w_key, w_value = space.unpackiterable(w_item, 2)
if space.isinstance_w(w_key, space.w_unicode):
# convert string keys to integer keys
key = space.unicode_w(w_key)
if len(key) != 1:
raise oefmt(space.w_ValueError,
"string keys in translate table must be "
"of length 1")
w_key = space.newint(ord(key[0]))
else:
# just keep integer keys
try:
space.int_w(w_key)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise oefmt(space.w_TypeError,
"keys in translate table must be strings "
"or integers")
space.setitem(w_new, w_key, w_value)
return w_new
def descr_repr(self, space):
chars = self._value
size = len(chars)
u = _repr_function(chars, size, "strict")
return space.newunicode(u)
def descr_str(self, space):
if space.is_w(space.type(self), space.w_unicode):
return self
# Subtype -- return genuine unicode string with the same value.
return space.newunicode(space.unicode_w(self))
def descr_hash(self, space):
x = compute_hash(self._value)
x -= (x == -1) # convert -1 to -2 without creating a bridge
return space.newint(x)
def descr_eq(self, space, w_other):
try:
res = self._val(space) == self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def descr_ne(self, space, w_other):
try:
res = self._val(space) != self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def descr_lt(self, space, w_other):
try:
res = self._val(space) < self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def descr_le(self, space, w_other):
try:
res = self._val(space) <= self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def descr_gt(self, space, w_other):
try:
res = self._val(space) > self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def descr_ge(self, space, w_other):
try:
res = self._val(space) >= self._op_val(space, w_other)
except OperationError as e:
if e.match(space, space.w_TypeError):
return space.w_NotImplemented
raise
return space.newbool(res)
def _parse_format_arg(self, space, w_kwds, __args__):
for i in range(len(__args__.keywords)):
try: # pff
arg = __args__.keywords[i].decode('utf-8')
except UnicodeDecodeError:
continue # uh, just skip that
space.setitem(w_kwds, space.newunicode(arg),
__args__.keywords_w[i])
def descr_format(self, space, __args__):
w_kwds = space.newdict()
if __args__.keywords:
self._parse_format_arg(space, w_kwds, __args__)
return newformat.format_method(space, self, __args__.arguments_w,
w_kwds, True)
def descr_format_map(self, space, w_mapping):
return newformat.format_method(space, self, None, w_mapping, True)
def descr__format__(self, space, w_format_spec):
return newformat.run_formatter(space, w_format_spec, "format_string",
self)
def descr_mod(self, space, w_values):
return mod_format(space, self, w_values, fmt_type=FORMAT_UNICODE)
def descr_rmod(self, space, w_values):
return mod_format(space, w_values, self, fmt_type=FORMAT_UNICODE)
def descr_translate(self, space, w_table):
selfvalue = self._value
w_sys = space.getbuiltinmodule('sys')
maxunicode = space.int_w(space.getattr(w_sys,
space.newtext("maxunicode")))
result = []
for unichar in selfvalue:
try:
w_newval = space.getitem(w_table, space.newint(ord(unichar)))
except OperationError as e:
if e.match(space, space.w_LookupError):
result.append(unichar)
else:
raise
else:
if space.is_w(w_newval, space.w_None):
continue
elif space.isinstance_w(w_newval, space.w_int):
newval = space.int_w(w_newval)
if newval < 0 or newval > maxunicode:
raise oefmt(space.w_ValueError,
"character mapping must be in range(%s)",
hex(maxunicode + 1))
result.append(unichr(newval))
elif space.isinstance_w(w_newval, space.w_unicode):
result.append(space.unicode_w(w_newval))
else:
raise oefmt(space.w_TypeError,
"character mapping must return integer, None "
"or str")
return W_UnicodeObject(u''.join(result))
def descr_encode(self, space, w_encoding=None, w_errors=None):
encoding, errors = _get_encoding_and_errors(space, w_encoding,
| |
PackagesUploadNpm data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.packages_upload_npm_with_http_info(owner, repo, **kwargs)
else:
(data) = self.packages_upload_npm_with_http_info(owner, repo, **kwargs)
return data
def packages_upload_npm_with_http_info(self, owner, repo, **kwargs):
"""
Create a new npm package
Create a new npm package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_npm_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadNpm data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method packages_upload_npm" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `packages_upload_npm`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `packages_upload_npm`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/packages/{owner}/{repo}/upload/npm/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AlpinePackageUpload',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def packages_upload_nuget(self, owner, repo, **kwargs):
"""
Create a new NuGet package
Create a new NuGet package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_nuget(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadNuget data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.packages_upload_nuget_with_http_info(owner, repo, **kwargs)
else:
(data) = self.packages_upload_nuget_with_http_info(owner, repo, **kwargs)
return data
def packages_upload_nuget_with_http_info(self, owner, repo, **kwargs):
"""
Create a new NuGet package
Create a new NuGet package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_nuget_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadNuget data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method packages_upload_nuget" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `packages_upload_nuget`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `packages_upload_nuget`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/packages/{owner}/{repo}/upload/nuget/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AlpinePackageUpload',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def packages_upload_p2(self, owner, repo, **kwargs):
"""
Create a new P2 package
Create a new P2 package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_p2(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadP2 data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.packages_upload_p2_with_http_info(owner, repo, **kwargs)
else:
(data) = self.packages_upload_p2_with_http_info(owner, repo, **kwargs)
return data
def packages_upload_p2_with_http_info(self, owner, repo, **kwargs):
"""
Create a new P2 package
Create a new P2 package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_p2_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadP2 data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method packages_upload_p2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `packages_upload_p2`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `packages_upload_p2`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/packages/{owner}/{repo}/upload/p2/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AlpinePackageUpload',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def packages_upload_python(self, owner, repo, **kwargs):
"""
Create a new Python package
Create a new Python package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_python(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadPython data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.packages_upload_python_with_http_info(owner, repo, **kwargs)
else:
(data) = self.packages_upload_python_with_http_info(owner, repo, **kwargs)
return data
def packages_upload_python_with_http_info(self, owner, repo, **kwargs):
"""
Create a new Python package
Create a new Python package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.packages_upload_python_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param PackagesUploadPython data:
:return: AlpinePackageUpload
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method packages_upload_python" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `packages_upload_python`")
# verify the required parameter | |
<filename>src/api-server/venv/lib/python3.8/site-packages/aniso8601/tests/test_duration.py
# -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.duration import (get_duration_resolution, parse_duration,
_parse_duration_prescribed,
_parse_duration_combined,
_parse_duration_prescribed_notime,
_parse_duration_prescribed_time,
_has_any_component)
from aniso8601.resolution import DurationResolution
from aniso8601.tests.compat import mock
class TestDurationParserFunctions(unittest.TestCase):
def test_get_duration_resolution_years(self):
self.assertEqual(get_duration_resolution('P1Y'), DurationResolution.Years)
self.assertEqual(get_duration_resolution('P1,5Y'), DurationResolution.Years)
self.assertEqual(get_duration_resolution('P1.5Y'), DurationResolution.Years)
def test_get_duration_resolution_months(self):
self.assertEqual(get_duration_resolution('P1Y2M'), DurationResolution.Months)
self.assertEqual(get_duration_resolution('P1M'), DurationResolution.Months)
self.assertEqual(get_duration_resolution('P1,5M'), DurationResolution.Months)
self.assertEqual(get_duration_resolution('P1.5M'), DurationResolution.Months)
def test_get_duration_resolution_weeks(self):
self.assertEqual(get_duration_resolution('P1W'), DurationResolution.Weeks)
self.assertEqual(get_duration_resolution('P1,5W'), DurationResolution.Weeks)
self.assertEqual(get_duration_resolution('P1.5W'), DurationResolution.Weeks)
def test_get_duration_resolution_days(self):
self.assertEqual(get_duration_resolution('P1Y2M3D'), DurationResolution.Days)
self.assertEqual(get_duration_resolution('P1Y2M3,5D'), DurationResolution.Days)
self.assertEqual(get_duration_resolution('P1Y2M3.5D'), DurationResolution.Days)
self.assertEqual(get_duration_resolution('P1D'), DurationResolution.Days)
self.assertEqual(get_duration_resolution('P1,5D'), DurationResolution.Days)
self.assertEqual(get_duration_resolution('P1.5D'), DurationResolution.Days)
def test_get_duration_resolution_hours(self):
self.assertEqual(get_duration_resolution('P1Y2M3DT4H'), DurationResolution.Hours)
self.assertEqual(get_duration_resolution('PT4H'), DurationResolution.Hours)
def test_get_duration_resolution_minutes(self):
self.assertEqual(get_duration_resolution('P1Y2M3DT4H5M'), DurationResolution.Minutes)
self.assertEqual(get_duration_resolution('PT4H5M'), DurationResolution.Minutes)
def test_get_duration_resolution_seconds(self):
self.assertEqual(get_duration_resolution('P1Y2M3DT4H54M6S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('P1Y2M3DT4H54M6,5S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('P1Y2M3DT4H54M6.5S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('PT4H54M6,5S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('PT4H54M6.5S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('PT0.0000001S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('PT2.0000048S'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('P0003-06-04T12:30:05'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('P0003-06-04T12:30:05.5'), DurationResolution.Seconds)
self.assertEqual(get_duration_resolution('P0001-02-03T14:43:59.9999997'), DurationResolution.Seconds)
def test_parse_duration(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': None}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT0.0000001S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '0.0000001'}),
('PT2.0000048S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '2.0000048'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05.5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0001-02-03T14:43:59.9999997', {'PnY': '0001',
'PnM': '02',
'PnD': '03',
'TnH': '14',
'TnM': '43',
'TnS':
'59.9999997'}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.duration.PythonTimeBuilder,
'build_duration') as mockBuildDuration:
mockBuildDuration.return_value = testtuple[1]
result = parse_duration(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildDuration.assert_called_once_with(**testtuple[1])
def test_parse_duration_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'PnY': '1', 'PnM': '2', 'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6'}
mockBuilder.build_duration.return_value = expectedargs
result = parse_duration('P1Y2M3DT4H54M6S', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_duration.assert_called_once_with(**expectedargs)
def test_parse_duration_badtype(self):
testtuples = (None, 1, False, 1.234)
for testtuple in testtuples:
with self.assertRaises(ValueError):
parse_duration(testtuple, builder=None)
def test_parse_duration_nop(self):
with self.assertRaises(ISOFormatError):
#Duration must start with a P
parse_duration('1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_weekcombination(self):
with self.assertRaises(ISOFormatError):
#Week designator cannot be combined with other time designators
#https://bitbucket.org/nielsenb/aniso8601/issues/2/week-designators-should-not-be-combinable
parse_duration('P1Y2W', builder=None)
def test_parse_duration_negative(self):
with self.assertRaises(ISOFormatError):
parse_duration('P-1Y', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-2M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-3D', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-T4H', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-T54M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-T6S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-7W', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P-1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
parse_duration('P1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1H1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('1Y2M3D1SPT1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M3D2MT1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P2M3D1ST1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M2MT3D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('PT1S1H', builder=None)
def test_parse_duration_badstr(self):
testtuples = ('PPPPPPPPPPPPPPPPPPPPPPPPPPPP', 'PTT',
'PX7DDDTX8888UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU'
'UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU8888888888888888H$H',
'P1Y2M3X.4D', 'P1Y2M3.4XD', 'P1Y2M3DT4H5M6XS', 'PT4H5M6X.2S',
'bad', '')
for testtuple in testtuples:
with self.assertRaises(ISOFormatError):
parse_duration(testtuple, builder=None)
def test_parse_duration_prescribed(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2', ''
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': None}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
result = _parse_duration_prescribed(testtuple[0])
self.assertEqual(result, testtuple[1])
def test_parse_duration_prescribed_negative(self):
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-1Y')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-2M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-3D')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-4W')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-1Y2M3D')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-T1H')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-T2M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-T3S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P-1Y2M3DT4H54M6S')
def test_parse_duration_prescribed_multiplefractions(self):
with self.assertRaises(ISOFormatError):
#Multiple fractions are not allowed
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6.1234S')
def test_parse_duration_prescribed_middlefraction(self):
with self.assertRaises(ISOFormatError):
#Fraction only allowed on final component
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6S')
def test_parse_duration_prescribed_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Dasdfasdf')
def test_parse_duration_prescribed_notime(self):
testtuples = (('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
result = _parse_duration_prescribed_notime(testtuple[0])
self.assertEqual(result, testtuple[1])
def test_parse_duration_prescribed_notime_timepart(self):
#Ensure no time part is allowed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H5S')
def test_parse_duration_prescribed_notime_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1Y1M')
def test_parse_duration_prescribed_notime_badstr(self):
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1S')
def test_parse_duration_prescribed_time(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3',
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None,
'PnW': None, 'PnD': None,
'TnH': '4', 'TnM': '54',
'TnS': '6.5'}))
for testtuple in testtuples:
result = _parse_duration_prescribed_time(testtuple[0])
self.assertEqual(result, testtuple[1])
def test_parse_duration_prescribed_time_timeindate(self):
#Don't allow time components in date half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D4HT54M6S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D6ST4H54M')
def test_parse_duration_prescribed_time_dateintime(self):
#Don't allow date components in time half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3DT1Y4H54M6S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2MT3D4H54M6S')
def test_parse_duration_prescribed_time_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('1Y2M3D1SPT1M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D2MT1S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3D1ST1Y1M')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M2MT3D1S')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('PT1S1H')
def test_parse_duration_prescribed_time_badstr(self):
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y')
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y1M')
def test_parse_duration_combined(self):
testtuples = (('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05,5', {'PnY': '0003', 'PnM': | |
home_team_data.pop("pitching_stats", None)
home_team_data["batting_stats"] = bat_stats_home
home_team_data["pitching_stats"] = pitch_stats_home
game_meta_info = self.boxscore.game_meta_info.as_dict()
game_meta_info.pop("__bbref_boxscore_meta__", None)
game_meta_info["game_time_hour"] = self.game_start_time.hour
game_meta_info["game_time_minute"] = self.game_start_time.minute
game_meta_info["game_date_time_str"] = self.game_start_time.strftime(DT_AWARE)
game_meta_info["umpires"] = self.boxscore.as_dict()["umpires"]
pitchfx_vs_bbref_audit = self.audit_pitchfx_vs_bbref_data(pitch_stats_away, pitch_stats_home)
updated_boxscore_dict = {
"bbref_game_id": self.bbref_game_id,
"boxscore_url": self.boxscore.boxscore_url,
"pitchfx_vs_bbref_audit": pitchfx_vs_bbref_audit,
"game_meta_info": game_meta_info,
"away_team_data": away_team_data,
"home_team_data": home_team_data,
"play_by_play_data": updated_innings_list,
"removed_pitchfx": self.all_removed_pfx,
"invalid_pitchfx": self.invalid_pitchfx,
"player_id_dict": self.player_id_dict,
}
return Result.Ok(updated_boxscore_dict)
def update_inning_with_combined_data(self, inning):
inning_events = [event for event in self.game_events_combined_data if event["inning_id"] == inning.inning_id]
inning_totals = {
"inning_total_runs": inning.inning_total_runs,
"inning_total_hits": inning.inning_total_hits,
"inning_total_errors": inning.inning_total_errors,
"inning_total_left_on_base": inning.inning_total_left_on_base,
"away_team_runs_after_inning": inning.away_team_runs_after_inning,
"home_team_runs_after_inning": inning.home_team_runs_after_inning,
}
return {
"inning_id": inning.inning_id,
"inning_label": inning.inning_label,
"begin_inning_summary": inning.begin_inning_summary,
"end_inning_summary": inning.end_inning_summary,
"inning_totals": inning_totals,
"inning_pitchfx_audit": self.generate_audit_report_for_events(inning_events),
"inning_events": inning_events,
}
def generate_audit_report_for_events(self, game_events):
pitch_count_bbref = sum(event["at_bat_pitchfx_audit"]["pitch_count_bbref"] for event in game_events)
pitch_count_pitchfx = sum(event["at_bat_pitchfx_audit"]["pitch_count_pitchfx"] for event in game_events)
patched_pitchfx_count = sum(event["at_bat_pitchfx_audit"]["patched_pitchfx_count"] for event in game_events)
missing_pitchfx_count = sum(event["at_bat_pitchfx_audit"]["missing_pitchfx_count"] for event in game_events)
removed_pitchfx_count = sum(event["at_bat_pitchfx_audit"]["removed_pitchfx_count"] for event in game_events)
pitchfx_error = any(event["at_bat_pitchfx_audit"]["pitchfx_error"] for event in game_events)
at_bat_ids_pitchfx_complete = list(
{
event["at_bat_id"]
for event in game_events
if event["is_complete_at_bat"]
and event["at_bat_pitchfx_audit"]["missing_pitchfx_count"] == 0
and not event["at_bat_pitchfx_audit"]["pitchfx_error"]
}
)
at_bat_ids_patched_pitchfx = list(
{event["at_bat_id"] for event in game_events if event["at_bat_pitchfx_audit"]["patched_pitchfx_count"] > 0}
)
at_bat_ids_missing_pitchfx = list(
{event["at_bat_id"] for event in game_events if event["at_bat_pitchfx_audit"]["missing_pitchfx_count"] > 0}
)
at_bat_ids_removed_pitchfx = list(
{event["at_bat_id"] for event in game_events if event["at_bat_pitchfx_audit"]["removed_pitchfx_count"] > 0}
)
at_bat_ids_pitchfx_error = list(
{event["at_bat_id"] for event in game_events if event["at_bat_pitchfx_audit"]["pitchfx_error"]}
)
at_bat_ids_pitchfx_complete = self.order_at_bat_ids_by_time(at_bat_ids_pitchfx_complete)
at_bat_ids_patched_pitchfx = self.order_at_bat_ids_by_time(at_bat_ids_patched_pitchfx)
at_bat_ids_missing_pitchfx = self.order_at_bat_ids_by_time(at_bat_ids_missing_pitchfx)
at_bat_ids_removed_pitchfx = self.order_at_bat_ids_by_time(at_bat_ids_removed_pitchfx)
at_bat_ids_pitchfx_error = self.order_at_bat_ids_by_time(at_bat_ids_pitchfx_error)
return {
"pitchfx_error": pitchfx_error,
"pitch_count_bbref": pitch_count_bbref,
"pitch_count_pitchfx": pitch_count_pitchfx,
"patched_pitchfx_count": patched_pitchfx_count,
"missing_pitchfx_count": missing_pitchfx_count,
"removed_pitchfx_count": removed_pitchfx_count,
"total_at_bats_pitchfx_complete": len(at_bat_ids_pitchfx_complete),
"total_at_bats_patched_pitchfx": len(at_bat_ids_patched_pitchfx),
"total_at_bats_missing_pitchfx": len(at_bat_ids_missing_pitchfx),
"total_at_bats_removed_pitchfx": len(at_bat_ids_removed_pitchfx),
"total_at_bats_pitchfx_error": len(at_bat_ids_pitchfx_error),
"at_bat_ids_pitchfx_complete": at_bat_ids_pitchfx_complete,
"at_bat_ids_patched_pitchfx": at_bat_ids_patched_pitchfx,
"at_bat_ids_missing_pitchfx": at_bat_ids_missing_pitchfx,
"at_bat_ids_removed_pitchfx": at_bat_ids_removed_pitchfx,
"at_bat_ids_pitchfx_error": at_bat_ids_pitchfx_error,
}
def update_all_pitch_stats(self):
pitch_stats_dict = {}
all_bbref_pitch_stats = deepcopy(self.boxscore.away_team_data.pitching_stats)
all_bbref_pitch_stats.extend(deepcopy(self.boxscore.home_team_data.pitching_stats))
for pitch_stats in all_bbref_pitch_stats:
mlb_id = self.player_id_dict[pitch_stats.player_id_br]["mlb_id"]
pitch_stats_dict[mlb_id] = pitch_stats
updated_pitcher_ids = []
updated_pitching_stats = defaultdict(list)
for pfx_log in self.pitchfx_logs_for_game:
pitch_stats = pitch_stats_dict.pop(pfx_log.pitcher_id_mlb, None)
if not pitch_stats:
error = f"Error retrieving boxscore stats for pitch app: {pfx_log.pitch_app_id}"
return Result.Fail(error)
(team_id, updated_stats) = self.update_player_pitch_stats(pfx_log, pitch_stats)
updated_pitching_stats[team_id].append(updated_stats)
updated_pitcher_ids.append(pfx_log.pitcher_id_mlb)
for pitch_stats in pitch_stats_dict.values():
(team_id, updated_stats) = self.update_player_pitch_stats_no_pfx(pitch_stats)
updated_pitching_stats[team_id].append(updated_stats)
pitcher_id_mlb = self.player_id_dict[pitch_stats.player_id_br].get("mlb_id")
updated_pitcher_ids.append(pitcher_id_mlb)
pitcher_ids_invalid_pfx = self.get_pitcher_ids_with_invalid_pfx()
invalid_pitcher_ids = list(set(pitcher_ids_invalid_pfx) - set(updated_pitcher_ids))
if invalid_pitcher_ids:
raise NotImplementedError(
"The code for this condition was removed, create a test case for "
f"{self.bbref_game_id}, using these pitcher_ids {invalid_pitcher_ids}."
)
return (
updated_pitching_stats[self.away_team_id_br],
updated_pitching_stats[self.home_team_id_br],
)
def update_player_pitch_stats(self, pfx_log, pitch_stats):
bbref_data = pitch_stats.as_dict()
bbref_data.pop("player_id_br", None)
bbref_data.pop("player_team_id_br", None)
bbref_data.pop("opponent_team_id_br", None)
pitcher_events = [
game_event
for game_event in self.game_events_combined_data
if game_event["pitcher_id_mlb"] == pfx_log.pitcher_id_mlb
]
batters_faced_pfx = len([event for event in pitcher_events if event["is_complete_at_bat"]])
audit_report = self.generate_audit_report_for_events(pitcher_events)
pitch_count_by_inning = self.get_pitch_count_by_inning(pitcher_events)
pitch_count_by_pitch_type = self.get_pitch_count_by_pitch_type(pitcher_events)
invalid_pfx = self.get_invalid_pfx_data_for_pitcher(pfx_log.pitcher_id_mlb)
pitch_app_pitchfx_audit = {
"invalid_pitchfx": invalid_pfx["invalid_pitchfx"],
"pitchfx_error": audit_report["pitchfx_error"],
"pitch_count_bbref": audit_report["pitch_count_bbref"],
"pitch_count_pitchfx": audit_report["pitch_count_pitchfx"],
"batters_faced_bbref": pitch_stats.batters_faced,
"batters_faced_pitchfx": batters_faced_pfx,
"patched_pitchfx_count": audit_report["patched_pitchfx_count"],
"missing_pitchfx_count": audit_report["missing_pitchfx_count"],
"removed_pitchfx_count": audit_report["removed_pitchfx_count"],
"invalid_pitchfx_count": invalid_pfx["invalid_pitchfx_count"],
"total_at_bats_pitchfx_complete": audit_report["total_at_bats_pitchfx_complete"],
"total_at_bats_patched_pitchfx": audit_report["total_at_bats_patched_pitchfx"],
"total_at_bats_missing_pitchfx": audit_report["total_at_bats_missing_pitchfx"],
"total_at_bats_removed_pitchfx": audit_report["total_at_bats_removed_pitchfx"],
"total_at_bats_pitchfx_error": audit_report["total_at_bats_pitchfx_error"],
"total_at_bats_invalid_pitchfx": invalid_pfx["total_at_bats_invalid_pitchfx"],
"at_bat_ids_pitchfx_complete": audit_report["at_bat_ids_pitchfx_complete"],
"at_bat_ids_patched_pitchfx": audit_report["at_bat_ids_patched_pitchfx"],
"at_bat_ids_missing_pitchfx": audit_report["at_bat_ids_missing_pitchfx"],
"at_bat_ids_removed_pitchfx": audit_report["at_bat_ids_removed_pitchfx"],
"at_bat_ids_pitchfx_error": audit_report["at_bat_ids_pitchfx_error"],
"at_bat_ids_invalid_pitchfx": invalid_pfx["at_bat_ids_invalid_pitchfx"],
}
updated_stats = {
"pitcher_name": pfx_log.pitcher_name,
"pitcher_id_mlb": pfx_log.pitcher_id_mlb,
"pitcher_id_bbref": pitch_stats.player_id_br,
"pitch_app_id": pfx_log.pitch_app_id,
"pitcher_team_id_bb": pfx_log.pitcher_team_id_bb,
"pitcher_team_id_bbref": pitch_stats.player_team_id_br,
"opponent_team_id_bb": pfx_log.opponent_team_id_bb,
"opponent_team_id_bbref": pitch_stats.opponent_team_id_br,
"bb_game_id": pfx_log.bb_game_id,
"bbref_game_id": pfx_log.bbref_game_id,
"pitch_count_by_inning": pitch_count_by_inning,
"pitch_count_by_pitch_type": pitch_count_by_pitch_type,
"pitch_app_pitchfx_audit": pitch_app_pitchfx_audit,
"bbref_data": bbref_data,
}
return (pitch_stats.player_team_id_br, updated_stats)
def get_pitch_count_by_inning(self, pitcher_events):
all_pfx = flatten_list2d([event["pitchfx"] for event in pitcher_events])
unordered = defaultdict(int)
for pfx in all_pfx:
unordered[pfx["inning"]] += 1
pitch_count_by_inning = OrderedDict()
for k in sorted(unordered.keys()):
pitch_count_by_inning[k] = unordered[k]
return pitch_count_by_inning
def get_pitch_count_by_pitch_type(self, pitcher_events):
all_pfx = flatten_list2d([event["pitchfx"] for event in pitcher_events])
pitch_count_unordered = defaultdict(int)
for pfx in all_pfx:
pitch_count_unordered[pfx["mlbam_pitch_name"]] += 1
pitch_count_ordered = OrderedDict()
ptype_tuples = [(pitch_type, count) for pitch_type, count in pitch_count_unordered.items()]
for t in sorted(ptype_tuples, key=lambda x: x[1], reverse=True):
pitch_count_ordered[t[0]] = t[1]
return pitch_count_ordered
def get_invalid_pfx_data_for_pitcher(self, pitcher_id_mlb):
invalid_pfx = {
"invalid_pitchfx": False,
"invalid_pitchfx_count": 0,
"total_at_bats_invalid_pitchfx": 0,
"at_bat_ids_invalid_pitchfx": [],
}
if not self.invalid_pitchfx:
return invalid_pfx
pfx_data = flatten_list2d(
[
at_bat_data["pitchfx"]
for invalid_pfx_at_bat_dict in self.invalid_pitchfx.values()
for at_bat_data in invalid_pfx_at_bat_dict.values()
if at_bat_data["pitcher_id_mlb"] == pitcher_id_mlb
]
)
if not pfx_data:
return invalid_pfx
at_bat_ids_invalid_pfx = list({pfx["at_bat_id"] for pfx in pfx_data})
at_bat_ids_invalid_pfx = self.order_at_bat_ids_by_park_sv_id(at_bat_ids_invalid_pfx)
return {
"invalid_pitchfx": True,
"invalid_pitchfx_count": len(pfx_data),
"total_at_bats_invalid_pitchfx": len(at_bat_ids_invalid_pfx),
"at_bat_ids_invalid_pitchfx": at_bat_ids_invalid_pfx,
}
def get_pitcher_ids_with_invalid_pfx(self):
pitcher_ids = {
at_bat_data["pitcher_id_mlb"]
for invalid_pfx_at_bat_dict in self.invalid_pitchfx.values()
for at_bat_data in invalid_pfx_at_bat_dict.values()
}
return list(pitcher_ids)
def order_at_bat_ids_by_park_sv_id(self, at_bat_ids):
park_sv_id_map = [
{
"at_bat_id": ab_id,
"park_sv_id": min(pfx.park_sv_id for pfx in self.all_pfx_data_for_game if pfx.at_bat_id == ab_id),
}
for ab_id in at_bat_ids
]
park_sv_id_map.sort(key=lambda x: x["park_sv_id"])
return [id_map["at_bat_id"] for id_map in park_sv_id_map]
def update_player_pitch_stats_no_pfx(self, pitch_stats):
bbref_id = pitch_stats.player_id_br
mlb_id = self.player_id_dict[bbref_id].get("mlb_id", "")
pitcher_events = [
game_event for game_event in self.game_events_combined_data if game_event["pitcher_id_mlb"] == mlb_id
]
at_bat_ids_missing_pitchfx = list({event["at_bat_id"] for event in pitcher_events})
at_bat_ids_missing_pitchfx = self.order_at_bat_ids_by_time(at_bat_ids_missing_pitchfx)
bbref_data = pitch_stats.as_dict()
bbref_data.pop("player_id_br", None)
bbref_data.pop("player_team_id_br", None)
bbref_data.pop("opponent_team_id_br", None)
pitch_app_pitchfx_audit = {
"invalid_pitchfx": False,
"pitchfx_error": False,
"pitch_count_bbref": pitch_stats.pitch_count,
"pitch_count_pitchfx": 0,
"batters_faced_bbref": pitch_stats.batters_faced,
"batters_faced_pitchfx": 0,
"patched_pitchfx_count": 0,
"missing_pitchfx_count": pitch_stats.pitch_count,
"removed_pitchfx_count": 0,
"invalid_pitchfx_count": 0,
"total_at_bats_pitchfx_complete": 0,
"total_at_bats_patched_pitchfx": 0,
"total_at_bats_missing_pitchfx": len(at_bat_ids_missing_pitchfx),
"total_at_bats_removed_pitchfx": 0,
"total_at_bats_pitchfx_error": 0,
"total_at_bats_invalid_pitchfx": 0,
"at_bat_ids_pitchfx_complete": [],
"at_bat_ids_patched_pitchfx": [],
"at_bat_ids_missing_pitchfx": at_bat_ids_missing_pitchfx,
"at_bat_ids_removed_pitchfx": [],
"at_bat_ids_pitchfx_error": [],
"at_bat_ids_invalid_pitchfx": [],
}
updated_stats = {
"pitcher_name": self.player_id_dict[bbref_id].get("name", ""),
"pitcher_id_mlb": mlb_id,
"pitcher_id_bbref": bbref_id,
"pitch_app_id": f"{self.bbref_game_id}_{mlb_id}",
"pitcher_team_id_bb": get_brooks_team_id(pitch_stats.player_team_id_br),
"pitcher_team_id_bbref": pitch_stats.player_team_id_br,
"opponent_team_id_bb": get_brooks_team_id(pitch_stats.opponent_team_id_br),
"opponent_team_id_bbref": pitch_stats.opponent_team_id_br,
"bb_game_id": self.boxscore.bb_game_id,
"bbref_game_id": self.bbref_game_id,
"pitch_count_by_inning": [],
"pitch_count_by_pitch_type": [],
"pitch_app_pitchfx_audit": pitch_app_pitchfx_audit,
"bbref_data": bbref_data,
}
return (pitch_stats.player_team_id_br, updated_stats)
def update_all_bat_stats(self):
all_bbref_bat_stats = self.boxscore.away_team_data.batting_stats
all_bbref_bat_stats.extend(self.boxscore.home_team_data.batting_stats)
updated_batting_stats = defaultdict(list)
for bat_stats in all_bbref_bat_stats:
bbref_id = bat_stats.player_id_br
mlb_id = self.player_id_dict[bbref_id]["mlb_id"]
(team_id, updated_stats) = self.update_player_bat_stats(bbref_id, mlb_id, bat_stats)
updated_batting_stats[team_id].append(updated_stats)
return (
updated_batting_stats[self.away_team_id_br],
updated_batting_stats[self.home_team_id_br],
)
def update_player_bat_stats(self, bbref_id, mlb_id, bat_stats):
bbref_data = bat_stats.as_dict()
bbref_data.pop("player_id_br", None)
batter_team_id_bbref = bbref_data.pop("player_team_id_br", None)
opponent_team_id_bbref = bbref_data.pop("opponent_team_id_br", None)
batter_events = [
game_event for game_event in self.game_events_combined_data if game_event["batter_id_mlb"] == mlb_id
]
all_at_bat_ids = [event["at_bat_id"] for event in batter_events]
incomplete_at_bat_ids = [event["at_bat_id"] for event in batter_events if not event["is_complete_at_bat"]]
updated_stats = {
"batter_name": self.player_id_dict[bbref_id]["name"],
"batter_id_mlb": mlb_id,
"batter_id_bbref": bbref_id,
"batter_team_id_bb": get_brooks_team_id(batter_team_id_bbref),
"batter_team_id_bbref": batter_team_id_bbref,
"opponent_team_id_bb": get_brooks_team_id(batter_team_id_bbref),
"opponent_team_id_bbref": opponent_team_id_bbref,
"total_pbp_events": len(all_at_bat_ids),
"total_incomplete_at_bats": len(incomplete_at_bat_ids),
"total_plate_appearances": bbref_data["plate_appearances"],
"at_bat_ids": all_at_bat_ids,
"incomplete_at_bat_ids": incomplete_at_bat_ids,
"bbref_data": bbref_data,
}
return (batter_team_id_bbref, updated_stats)
def audit_pitchfx_vs_bbref_data(self, away_team_pitching_stats, home_team_pitching_stats):
batters_faced_bbref_home = sum(
pitch_stats["pitch_app_pitchfx_audit"]["batters_faced_bbref"] for pitch_stats in home_team_pitching_stats
)
batters_faced_bbref_away = sum(
pitch_stats["pitch_app_pitchfx_audit"]["batters_faced_bbref"] for pitch_stats in away_team_pitching_stats
)
batters_faced_bbref = batters_faced_bbref_home + batters_faced_bbref_away
batters_faced_pitchfx_home = sum(
pitch_stats["pitch_app_pitchfx_audit"]["batters_faced_pitchfx"] for pitch_stats in home_team_pitching_stats
)
batters_faced_pitchfx_away = sum(
pitch_stats["pitch_app_pitchfx_audit"]["batters_faced_pitchfx"] for pitch_stats in away_team_pitching_stats
)
batters_faced_pitchfx = batters_faced_pitchfx_home + batters_faced_pitchfx_away
pitch_count_bbref_stats_table_home = sum(
pitch_stats["bbref_data"]["pitch_count"] for pitch_stats in home_team_pitching_stats
)
pitch_count_bbref_stats_table_away = sum(
pitch_stats["bbref_data"]["pitch_count"] for pitch_stats in away_team_pitching_stats
)
pitch_count_bbref_stats_table = pitch_count_bbref_stats_table_home + pitch_count_bbref_stats_table_away
audit_report = self.generate_audit_report_for_events(self.game_events_combined_data)
at_bat_ids_invalid_pfx = [
at_bat_ids
for invalid_pfx_at_bat_dict in self.invalid_pitchfx.values()
for at_bat_ids in invalid_pfx_at_bat_dict.keys()
]
at_bat_ids_invalid_pfx = self.order_at_bat_ids_by_park_sv_id(at_bat_ids_invalid_pfx)
total_pitches_invalid_pfx = sum(
len(at_bat_data["pitchfx"])
for invalid_pfx_at_bat_dict in self.invalid_pitchfx.values()
for at_bat_data in invalid_pfx_at_bat_dict.values()
)
invalid_pfx = {
"invalid_pitchfx": bool(self.invalid_pitchfx),
"invalid_pitchfx_count": total_pitches_invalid_pfx,
"total_at_bats_invalid_pitchfx": len(at_bat_ids_invalid_pfx),
"at_bat_ids_invalid_pitchfx": at_bat_ids_invalid_pfx,
}
return {
"invalid_pitchfx": invalid_pfx["invalid_pitchfx"],
"pitchfx_error": audit_report["pitchfx_error"],
"pitch_count_bbref_stats_table": pitch_count_bbref_stats_table,
"pitch_count_bbref": audit_report["pitch_count_bbref"],
"pitch_count_pitchfx": audit_report["pitch_count_pitchfx"],
"batters_faced_bbref": batters_faced_bbref,
"batters_faced_pitchfx": batters_faced_pitchfx,
"patched_pitchfx_count": audit_report["patched_pitchfx_count"],
"missing_pitchfx_count": audit_report["missing_pitchfx_count"],
"removed_pitchfx_count": audit_report["removed_pitchfx_count"],
"invalid_pitchfx_count": invalid_pfx["invalid_pitchfx_count"],
"total_at_bats_pitchfx_complete": audit_report["total_at_bats_pitchfx_complete"],
"total_at_bats_patched_pitchfx": audit_report["total_at_bats_patched_pitchfx"],
"total_at_bats_missing_pitchfx": audit_report["total_at_bats_missing_pitchfx"],
"total_at_bats_removed_pitchfx": audit_report["total_at_bats_removed_pitchfx"],
"total_at_bats_pitchfx_error": audit_report["total_at_bats_pitchfx_error"],
"total_at_bats_invalid_pitchfx": invalid_pfx["total_at_bats_invalid_pitchfx"],
"at_bat_ids_pitchfx_complete": audit_report["at_bat_ids_pitchfx_complete"],
"at_bat_ids_patched_pitchfx": audit_report["at_bat_ids_patched_pitchfx"],
"at_bat_ids_missing_pitchfx": audit_report["at_bat_ids_missing_pitchfx"],
"at_bat_ids_removed_pitchfx": audit_report["at_bat_ids_removed_pitchfx"],
"at_bat_ids_pitchfx_error": audit_report["at_bat_ids_pitchfx_error"],
"at_bat_ids_invalid_pitchfx": invalid_pfx["at_bat_ids_invalid_pitchfx"],
}
def gather_scraped_data_failed(self, error):
self.gather_scraped_data_success = False
self.update_db = False
self.write_json = False
self.error_messages.append(error)
result = Result.Fail("")
result.value = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"error": "\n".join(self.error_messages),
}
return result
def combined_data_failed(self, error):
self.combined_data_success = False
self.write_json = False
self.error_messages.append(error)
result = Result.Fail("")
result.value = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"combined_data_success": self.combined_data_success,
"error": "\n".join(self.error_messages),
}
return result
def update_game_status(self, result):
if self.update_db:
self.game_status.combined_data_success = 1 if result.success else 0
self.game_status.combined_data_fail = 0 if result.success else 1
self.db_session.commit()
return result
def save_combined_data(self, combined_data):
self.combined_data_success = True
self.combined_data = combined_data
if self.write_json:
return self.scraped_data.save_combined_game_data(combined_data)
return Result.Ok()
def save_combined_data_failed(self, error):
self.save_combined_data_success = False
self.error_messages.append(error)
result = Result.Fail("")
result.value = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"combined_data_success": self.combined_data_success,
"save_combined_data_success": self.save_combined_data_success,
"error": self.error_messages,
}
return result
def check_update_db(self, value):
self.save_combined_data_success = True
if self.update_db:
return Result.Ok()
result = Result.Fail("")
result.value = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"combined_data_success": self.combined_data_success,
"save_combined_data_success": self.save_combined_data_success,
"boxscore": self.combined_data,
}
return result
def update_pitch_app_status(self):
result = update_pitch_apps_with_combined_data(self.db_session, self.combined_data)
if result.failure:
self.error_messages.append(result.error)
result.value = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"combined_data_success": self.combined_data_success,
"save_combined_data_success": self.save_combined_data_success,
"boxscore": self.combined_data,
"update_pitch_apps_success": False,
"error": self.error_messages,
}
return result
results = {
"gather_scraped_data_success": self.gather_scraped_data_success,
"combined_data_success": self.combined_data_success,
"save_combined_data_success": self.save_combined_data_success,
"boxscore": self.combined_data,
"update_pitch_apps_success": True,
"results": result.value,
}
| |
import csv
import logging
from apps.projects.models import ProjectBudgetLine
from bluebottle.payments.models import OrderPayment
import os
from registration.models import RegistrationProfile
from django.utils import timezone
from django.conf import settings
from apps.cowry_docdata.models import payment_method_mapping
from bluebottle.donations.models import Donation
from apps.recurring_donations.models import MonthlyDonor
from apps.vouchers.models import Voucher, VoucherStatuses
from apps.organizations.models import Organization, OrganizationMember
from bluebottle.fundraisers.models import FundRaiser
from apps.tasks.models import Task, TaskMember
from apps.projects.models import Project
from apps.members.models import Member
logger = logging.getLogger('bluebottle.salesforce')
def generate_organizations_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Organizations.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["Organization_External_Id__c",
"Name",
"BillingStreet",
"BillingCity",
"BillingState",
"BillingCountry",
"BillingPostalCode",
"E_mail_address__c",
"Phone",
"Website",
"Twitter__c",
"Facebook__c",
"Skype__c",
"Tags__c",
"Bank_account_name__c",
"Bank_account_address__c",
"Bank_account_postalcode__c",
"Bank_account_city__c",
"Bank_account_country__c",
"Bank_account_IBAN__c",
"Bank_SWIFT__c",
"Bank_account_number__c",
"Bank_bankname__c",
"Bank_address__c",
"Bank_postalcode__c",
"Bank_city__c",
"Bank_country__c",
"Organization_created_date__c",
"Deleted__c"])
organizations = Organization.objects.all()
logger.info("Exporting {0} Organization objects to {1}".format(organizations.count(), filename))
for organization in organizations:
try:
billing_street = organization.address_line1 + " " + organization.address_line2
if organization.country:
billing_country = organization.country.name
else:
billing_country = ''
if organization.account_bank_country:
bank_country = organization.account_bank_country.name
else:
bank_country = ''
if organization.account_holder_country:
bank_account_country = organization.account_holder_country.name
else:
bank_account_country = ''
tags = ""
for tag in organization.tags.all():
tags = str(tag) + ", " + tags
deleted = ""
if organization.deleted:
deleted = organization.deleted.strftime("%Y-%m-%dT%H:%M:%S.000Z")
csvwriter.writerow([organization.id,
organization.name.encode("utf-8"),
billing_street.encode("utf-8"),
organization.city[:40].encode("utf-8"),
organization.state.encode("utf-8"),
billing_country.encode("utf-8"),
organization.postal_code.encode("utf-8"),
organization.email.encode("utf-8"),
organization.phone_number.encode("utf-8"),
organization.website.encode("utf-8"),
organization.twitter.encode("utf-8"),
organization.facebook.encode("utf-8"),
organization.skype.encode("utf-8"),
tags.encode("utf-8"),
organization.account_holder_name.encode("utf-8"),
organization.account_holder_address.encode("utf-8"),
organization.account_holder_postal_code.encode("utf-8"),
organization.account_holder_city.encode("utf-8"),
bank_account_country.encode("utf-8"),
organization.account_iban.encode("utf-8"),
organization.account_bic.encode("utf-8"),
organization.account_number.encode("utf-8"),
organization.account_bank_name.encode("utf-8"),
organization.account_bank_address.encode("utf-8"),
organization.account_bank_postal_code.encode("utf-8"),
organization.account_bank_city.encode("utf-8"),
bank_country.encode("utf-8"),
organization.created.date().strftime("%Y-%m-%dT%H:%M:%S.000Z"),
deleted])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving organization id {0}: ".format(organization.id) + str(e))
return success_count, error_count
def generate_users_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Users.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Contact_External_Id__c",
"Category1__c",
"FirstName",
"LastName",
"Gender__c",
"Username__c",
"Active__c",
"Deleted__c",
"Member_since__c",
"Location__c",
"Birthdate",
"Email",
"Website__c",
"Picture_Location__c",
"Tags__c",
"MailingCity",
"MailingStreet",
"MailingCountry",
"MailingPostalCode",
"MailingState",
"Receive_newsletter__c",
"Primary_language__c",
"Available_to_share_time_and_knowledge__c",
"Available_to_donate__c",
"Availability__c",
"Facebook__c",
"Twitter__c",
"Skype__c",
"Has_Activated_Account__c",
"Date_Joined__c",
"Date_Last_Login__c",
"Account_number__c",
"Account_holder__c",
"Account_city__c",
"Account_IBAN__c",
"Account_Active_Recurring_Debit__c",
"Phone"])
users = Member.objects.all()
logger.info("Exporting {0} User objects to {1}".format(users.count(), filename))
for user in users:
try:
if user.address:
mailing_city = user.address.city
mailing_street = user.address.line1 + ' ' + user.address.line2
if user.address.country:
mailing_country = user.address.country.name
else:
mailing_country = ''
mailing_postal_code = user.address.postal_code
mailing_state = user.address.state
else:
mailing_city = ''
mailing_street = ''
mailing_country = ''
mailing_postal_code = ''
mailing_state = ''
if user.last_name.strip():
last_name = user.last_name
else:
last_name = "1%MEMBER"
gender = ""
if user.gender == "male":
gender = Member.Gender.values['male'].title()
elif user.gender == "female":
gender = Member.Gender.values['female'].title()
date_deleted = ""
if user.deleted:
date_deleted = user.deleted.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
birth_date = ""
if user.birthdate:
birth_date = user.birthdate.strftime("%Y-%m-%dT%H:%M:%S.000Z")
tags = ""
for tag in user.tags.all():
tags = str(tag) + ", " + tags
date_joined = ""
member_since = ""
if user.date_joined:
member_since = user.date_joined.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
date_joined = user.date_joined.strftime("%Y-%m-%dT%H:%M:%S.000Z")
last_login = ""
if user.last_login:
last_login = user.last_login.strftime("%Y-%m-%dT%H:%M:%S.000Z")
has_activated = False
try:
rp = RegistrationProfile.objects.get(id=user.id)
if rp.activation_key == RegistrationProfile.ACTIVATED:
has_activated = True
except RegistrationProfile.DoesNotExist:
if not user.is_active and user.date_joined == user.last_login:
has_activated = False
else:
has_activated = True
try:
monthly_donor = MonthlyDonor.objects.get(user=user)
bank_account_city = monthly_donor.city
bank_account_holder = monthly_donor.name
bank_account_number = ''
bank_account_iban = monthly_donor.iban
bank_account_active = monthly_donor.active
except MonthlyDonor.DoesNotExist:
bank_account_city = ''
bank_account_holder = ''
bank_account_number = ''
bank_account_iban = ''
bank_account_active = False
availability = user.available_time
csvwriter.writerow([user.id,
Member.UserType.values[user.user_type].title(),
user.first_name.encode("utf-8"),
last_name.encode("utf-8"),
gender,
user.username.encode("utf-8"),
int(user.is_active),
date_deleted,
member_since,
user.location.encode("utf-8"),
birth_date,
user.email.encode("utf-8"),
user.website.encode("utf-8"),
user.picture,
tags.encode("utf-8"),
mailing_city.encode("utf-8"),
mailing_street.encode("utf-8"),
mailing_country.encode("utf-8"),
mailing_postal_code.encode("utf-8"),
mailing_state.encode("utf-8"),
int(user.newsletter),
user.primary_language.encode("utf-8"),
int(user.share_time_knowledge),
int(user.share_money),
availability,
user.facebook.encode("utf-8"),
user.twitter.encode("utf-8"),
user.skypename.encode("utf-8"),
int(has_activated),
date_joined,
last_login,
bank_account_number,
bank_account_holder.encode("utf-8"),
bank_account_city.encode("utf-8"),
bank_account_iban.encode("utf-8"),
int(bank_account_active),
user.phone_number.encode("utf-8")])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving user id {0}: ".format(user.id) + str(e))
return success_count, error_count
def generate_projects_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Projects.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Project_External_ID__c",
"Project_name__c",
"NumerOfPeopleReachedDirect__c",
"VideoURL__c",
"Date_project_deadline__c",
"Is_Campaign__c",
"Amount_requested__c",
"Amount_at_the_moment__c",
"Amount_still_needed__c",
"Allow_Overfunding__c",
"Date_plan_submitted",
"Date_Started__c",
"Date_Ended__c",
"Date_Funded__c",
"Picture_Location__c",
"Project_Owner__c",
"Organization__c",
"Country_in_which_the_project_is_located__c",
"Theme__c",
"Status_project__c",
"Project_created_date__c",
"Project_updated_date__c",
"Tags__c",
"Partner_Organization__c",
"Slug__c",
"Region__c",
"Sub_region__c",
"Donation_total__c",
"Donation_oo_total__c",
"Supporter_count__c",
"Supporter_oo_count__c"])
projects = Project.objects.all()
logger.info("Exporting {0} Project objects to {1}".format(projects.count(), filename))
for project in projects:
country = ''
region = ''
sub_region = ''
if project.country:
country = project.country.name.encode("utf-8")
region = project.country.subregion.region.name.encode("utf-8")
sub_region = project.country.subregion.name.encode("utf-8")
status = ''
if project.status:
status = project.status.name.encode("utf-8")
organization_id = ''
if project.organization:
organization_id = project.organization.id
video_url = ''
if project.video_url:
video_url = project.video_url
tags = ""
deadline = ""
date_submitted = ""
date_started = ""
date_ended = ""
date_funded = ""
theme = ""
if project.deadline:
deadline = project.deadline.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
if project.date_submitted:
date_submitted = project.date_submitted.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
if project.campaign_started:
date_started = project.campaign_started.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
if project.campaign_ended:
date_ended = project.campaign_ended.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
if project.campaign_funded:
date_funded = project.campaign_funded.date().strftime("%Y-%m-%dT%H:%M:%S.000Z")
for tag in project.tags.all():
tags = str(tag) + ", " + tags
if project.theme:
theme = project.theme.name
partner_organization_name = "-"
if project.partner_organization:
partner_organization_name = project.partner_organization.name
csvwriter.writerow([project.id,
project.title.encode("utf-8"),
project.reach,
video_url.encode("utf-8"),
deadline,
int(project.is_campaign),
"%01.2f" % (project.amount_asked or 0),
"%01.2f" % (project.amount_donated or 0),
"%01.2f" % (project.amount_needed or 0),
int(project.allow_overfunding),
date_submitted,
date_started,
date_ended,
date_funded,
project.image,
project.owner.id,
organization_id,
country,
theme,
status,
project.created.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
project.updated.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
tags[:255],
partner_organization_name.encode("utf-8"),
project.slug,
region,
sub_region,
"%01.2f" % ((project.get_money_total(['paid', 'pending'])) / 100),
"%01.2f" % ((project.get_money_total(['paid', 'pending'])) / 100),
project.supporters_count(),
project.supporters_count(True)])
success_count += 1
return success_count, error_count
def generate_projectbudgetlines_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Projectbudgetlines.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Project_Budget_External_ID__c", "Project__c", "Costs__c", "Description__c"])
budget_lines = ProjectBudgetLine.objects.all()
logger.info("Exporting {0} ProjectBudgetLine objects to {1}".format(budget_lines.count(), filename))
for budget_line in budget_lines:
try:
csvwriter.writerow([budget_line.id,
budget_line.project.id,
'%01.2f' % (float(budget_line.amount) / 100),
budget_line.description.encode("utf-8")])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving projectbudgetline id {0}: ".format(budget_line.id) + str(e))
return success_count, error_count
def generate_donations_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Donations.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Donation_External_ID__c",
"Donor__c",
"Project__c",
"Amount",
"CloseDate",
"Name",
"StageName",
"Type",
"Donation_created_date__c",
"Donation_updated_date__c",
"Donation_ready_date__c",
"Payment_method__c",
"RecordTypeId",
"Fundraiser__c"])
donations = Donation.objects.all()
logger.info("Exporting {0} Donation objects to {1}".format(donations.count(), filename))
t = 0
for donation in donations:
t += 1
logger.debug("writing donation {0}/{1}: {2}".format(t, donations.count(), donation.id))
try:
donor_id = ''
if donation.user:
donor_id = donation.user.id
project_id = ''
if donation.project:
project_id = donation.project.id
fundraiser_id = ''
if donation.fundraiser:
fundraiser_id = donation.fundraiser.id
if donation.user and donation.user.get_full_name() != '':
name = donation.user.get_full_name()
else:
name = "Anonymous"
donation_ready = ''
if donation.completed:
donation_ready = donation.completed.strftime("%Y-%m-%dT%H:%M:%S.000Z")
# Get the payment method from the associated order / payment
payment_method = payment_method_mapping[''] # Maps to Unknown for DocData.
if donation.order:
lp = OrderPayment.get_latest_by_order(donation.order)
if lp and lp.payment_method in payment_method_mapping:
payment_method = payment_method_mapping[lp.payment_method]
csvwriter.writerow([donation.id,
donor_id,
project_id,
'%01.2f' % (float(donation.amount)),
donation.created.date().strftime("%Y-%m-%dT%H:%M:%S.000Z"),
name.encode("utf-8"),
donation.order.get_status_display(),
donation.order.order_type,
donation.created.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
donation.updated.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
donation_ready,
payment_method.encode("utf-8"),
'012A0000000ZK6FIAW',
fundraiser_id])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving donation id {0}: ".format(donation.id) + str(e))
return success_count, error_count
def generate_vouchers_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Vouchers_{0}.csv'.format(timezone.localtime(timezone.now()).strftime('%Y%m%d'))
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Voucher_External_ID__c", "Purchaser__c", "Amount", "CloseDate", "Name", "Description",
"StageName", "RecordTypeId"])
vouchers = Voucher.objects.all()
logger.info("Exporting {0} Voucher objects to {1}".format(vouchers.count(), filename))
for voucher in vouchers:
try:
if voucher.sender and voucher.sender.get_full_name() != '':
name = voucher.sender.get_full_name()
else:
name = "1%MEMBER"
csvwriter.writerow([voucher.id,
voucher.sender.id,
'%01.2f' % (float(voucher.amount) / 100),
voucher.created.date(),
name.encode("utf-8"),
voucher.message.encode("utf-8"),
VoucherStatuses.values[voucher.status].title(),
'012A0000000BxfHIAS'])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving voucher id {0}: ".format(voucher.id) + str(e))
return success_count, error_count
def generate_tasks_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Tasks.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
csvwriter = csv.writer(csv_outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Task_External_ID__c",
"Project__c",
"Deadline__c",
"Location_of_the_task__c",
"Task_expertise__c",
"Task_status__c",
"Title__c",
"Task_created_date__c",
"Tags__c",
"Effort__c",
"People_Needed__c",
"Author__c",
"Date_realized__c"])
tasks = Task.objects.all()
logger.info("Exporting {0} Task objects to {1}".format(tasks.count(), filename))
for task in tasks:
tags = ""
for tag in task.tags.all():
tags = str(tag) + ", " + tags
skill = ''
if task.skill:
skill = task.skill.name.encode("utf-8")
author = ''
if task.author:
author = task.author.id
date_realized = ''
if task.status == 'realized' and task.date_status_change:
date_realized = task.date_status_change.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
csvwriter.writerow([task.id,
task.project.id,
task.deadline.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
task.location.encode("utf-8"),
skill,
task.status.encode("utf-8"),
task.title.encode("utf-8"),
task.created.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
tags,
task.time_needed.encode("utf-8"),
task.people_needed,
author,
date_realized])
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving task id {0}: ".format(task.id) + str(e))
return success_count, error_count
def generate_taskmembers_csv_file(path, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
filename = 'BLUE2SFDC_Taskmembers.csv'
with open(os.path.join(path, filename), 'wb') as csv_outfile:
| |
= response.text #Получаем HTML код страницы
if "строения" in html: #Получаем информацию о строениях
soup = BS(html, "html.parser")
elems = soup.findAll("a", class_="lwhite")
for elem in elems:
elem = elem.text
if "из" in elem:
if "Академия" in elem:
data["academy_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "Архивы" in elem:
data["archives_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "лавка" in elem:
data["magical_shop_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "зал" in elem:
data["trophy_room_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "Оружейная" in elem:
data["armory_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "Обелиск" in elem:
data["obelisk_of_valor_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "Дом" in elem:
data["house_of_herolds_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
elif "совет" in elem:
data["military_council_level"] = elem.split("(")[1].split("из")[0].replace(" ", "")
return data
def chest(self): #Узнать содержимое сумки
data = {} #Заготовка на вывод
item = {} #Нужная вещь
response = self.session.get(self.url+"chest", headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="wr8") #Находим нужные нам элементы
j = 0 #Нужная вещь x2
for elem in elems:
j += 1
elem = elem.text.replace("\t", "").split("\n") #Обработка элемента
item_ = [] #Нужная вещь x3
for i in elem:
if i != "":
item_.append(i)
item["item"] = item_[0]
item["rarity"] = item_[1].split("[")[0].replace(" ", "")
item["level"] = item_[1].split("[")[1].split("/")[0]
item["level_max"] = item_[1].split("/")[1].split("]")[0]
data[j] = item
item = {}
return data
def gear(self, id: str = None): #Узнать снаряжения игрока
data = {} #Заготовка на вывод
if id == None: #Узнаем у себя если id не указан
response = self.session.get(self.url+"gear", headers=self.headers) #Посылаем GET запрос
else: #Узнаем снаряжение у другого игрока
response = self.session.get(self.url+"view_gear?player_id="+str(id), headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.gear(id)
if "Снаряжение" in html: #Проверка на нужную страницу x2
names = []
rarities = []
levels = []
levels_max = []
runes = [""] * 10
gems = [""] * 10
differences = [""] * 10
sharpenings = [""] * 10
output = {}
i = 0
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="wr8") #Получаем все элементы
for elem in elems:
i += 1
for link in elem.findAll("a"):
if link.text != "" and link.text != "В сумку":
names.append(link.text) #Находим название
for span in elem.findAll("span"):
if span.has_attr("class") and len(span["class"][0]) == 2:
rarities.append(span.text.split('[')[0][:-1]) #Находим редкость
levels.append(span.text.split("[")[1].split("/")[0]) #Находим текущий уровень
levels_max.append(span.text.split("/")[1].split("]")[0]) #Находим максимальный уровень
if span.has_attr("class") and len(span["class"][0]) != 2:
runes[i-1] = span.text #Проверка на наличие руны
if span.has_attr("class") and span["class"][0] == "win":
differences[i-1] = span.text.replace("\n", "").replace("\t", "").replace(" ", "") #Отличие от нашей вещи (работает только на других пользователях и только если у них есть вещь лучше вашей)
for gem in elem.findAll("div", class_="mt5"):
img = gem.find("img")
if img != None and img.has_attr("class") and len(img["class"]) == 2:
gems[i-1] = gem.text.replace("\n", "").replace("\t", "").replace(" ", "") #Проверка на наличие самоцвета
link = gem.find("a")
if link != None and link["href"].startswith("/view"):
res = gem.text.replace("\n", "").replace("\t", "").replace(" ", "").split("+")[-1] #Находим заточку
if res.isdigit():
sharpenings[i-1] = res
for i in range(0, len(names)):
output["name"] = names[i]
output["rarity"] = rarities[i]
output["level"] = levels[i]
output["level_max"] = levels_max[i]
output["rune"] = runes[i]
output["gem"] = gems[i]
output["difference"] = differences[i]
output["sharpening"] = sharpenings[i]
data[str(i+1)] = output
output = {}
return data
def schedule(self): #Узнать расписание битв
data = {} #Заготовка на вывод
response = self.session.get(self.url+"schedule", headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.schedule()
if "Расписание" in html: #Проверка на нужную страницу x2
soup = BS(html, "html.parser")
data["current_time"] = ":".join(soup.find("div", class_="lblue").text.replace("\t", "").replace("\n", "").replace(" ", "").split(":")[1:]).strip()[:-3] #Находим текущее время
times = soup.findAll("li", class_="mb10")
for time_ in times: #Находим оставшееся время до битв
time_ = time_.text.replace("\t", "").replace("\n", "").strip()
if "Турнир героев" in time_:
data["tournament"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Вторжение" in time_:
data["invasion"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Турнир кланов" in time_:
data["clanTourney"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Войны кланов" in time_:
data["army"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Остров сокровищ" in time_:
data["clanSurvival"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Осада башен" in time_:
data["tower"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
elif "Битва за престол" in time_:
data["throne"] = time_.split("через")[1].replace("ч", "h").replace("м", "m").replace("д", "d").strip()
return data
def coliseum(self): #Узнать информацию о колизее
data = {} #Заготовка на вывод
response = self.session.get(self.url+"pvp", headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.coliseum()
if "Ваш рейтинг" in html: #Проверка на нужную страницу x2
soup = BS(html, "html.parser")
elem = soup.find("div", class_="wr8") #Находим информацию
txts = []
for i in elem.text.replace("\t", "").split("\n"):
if i != "":
txts.append(i.strip())
data["rating"] = txts[0].split(":")[1] #Находим рейтинг
data["league"] = txts[1][1:] #Находим лигу
data["season_end"] = " ".join(txts[2].split(" ")[3:]).replace("ч", "h").replace("м", "m").replace("д", "d") #Находим время, оставшееся до конца сезона
return data
def chat(self, id: str = "0", pages: int = 1): #Получить сообщения из чата
data = {} #Заготовка на вывод
message = {} #Нужная вещь
i = 0 #Нужная вещь x2
for page in range(1, pages+1):
response = self.session.get(self.url+"chat?id="+str(id)+"?page="+str(page), headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.chat()
if "Отправить" in html: #Проверка на нужную страницу x2
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="mb5") #Получаем все элементы с сообщениями
for elem in elems: #Проходимся по каждому из них
if len(elem["class"]) == 1:
i += 1
elem = elem.text
message["nickname"] = elem.split("(»):")[0].strip() #Узнаем никнейм
message["text"] = elem.split("(»):")[1].strip() #Узнаем текст
data[str(i)] = message
message = {}
return data
def amulet(self, id: str = None): #Узнать информацию об амулете
data = {} #Заготовка на вывод
if id == None: #Если узнаем у себя
response = self.session.get(self.url+"amulet", headers=self.headers) #Посылаем GET запрос
else: #Если узнаем у человека с указанным ID
response = self.session.get(self.url+"view_amulet?player_id="+str(id), headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.amulet(id)
if "Амулет" in html: #Проверка на нужную страницу
soup = BS(html, "html.parser")
elem = soup.find("div", class_="wr8")
if id != None:
try:
data["difference"] = elem.find("div", class_="win").text.split(":")[1].strip() #Находим различие, если амулет чужой
except:
pass
data["level"] = elem.find("span", class_="lblue").text.split(":")[1].split("из")[0].strip() #Находим качество амулета
a = [] #Нужная вещь
for i in elem.text.replace("\t", "").split("\n"): #Обработка данных
if i != "":
a.append(i)
a = a[2:] #Обработка данных x2
data["parametrs_bonus"] = a[0].split("к")[0].strip() #Находим бонус к параметрам
data["exp_bonus"] = a[1].split("к")[0].strip() #Находим бонус к опыту
data["silver_bonus"] = a[2].split("к")[0].strip() #Находим бонус к серебру
if id == None: #Если парсим свой амулет
btn = soup.find("span", class_="ur")
data["upgrade_cost"] = btn.text.split("за")[1].strip() #Находим стоимость улучшения
img = btn.find("img")
if "silver" in img["src"]: #Находим валюту
data["upgrade_currency"] = "silver"
elif "gold" in img["src"]:
data["upgrade_currency"] = "gold"
return data
def abilities(self, id: str = None): #Узнать умения игрока
data = {} #Заготовка на вывод
obj = {} #Нужная вещь
if id == None: #Если узнаем у себя
response = self.session.get(self.url+"ability", headers=self.headers) #Посылаем GET запрос
else: #Или если узнаем у игрока с данным ID
response = self.session.get(self.url+"view_abilities?player_id="+str(id), headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.abilities(id)
if "Умения" in html: #Проверка на нужную страницу x2
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="wr8") #Находим все элементы
a = [] #Нужная вещь x2
for elem in elems:
for i in elem.text.replace("\t", "").split("\n"):
if i != "" and i != "Активное" and i != " ":
a.append(i)
if "Уворот" in a[0]: #Отдельная обработка для уворота
obj["percents"] = "".join(a[0].split(" ")[1]).strip() #Находим шанс/кулдаун/прибавку
else:
obj["percents"] = "".join(a[0].split(" ")[-1]).strip()
obj["bonus"] = a[1].split(":")[1].strip() #Находим бонус
obj["level"] = a[2].split(":")[1].split("из")[0].strip() #Находим уровень прокачки
names = {"Ярость": "rage", "Пробивание": "punch", "Круговой": "round_hit", "Блок": "block", "Защита": "protect", "Парирование": "parry", "Уворот": "dodge", "Лечение": "health", "Уклонение": "evasion"}
data[names[a[0].split(" ")[0].strip()]] = obj
obj = {}
a = []
return data
def trophies(self, id: str = None): #Узнать трофеи игрока
data = {"trophies": []} #Заготовка на вывод
if id | |
each pair
input/output.
speed_range : array
Array with the frequencies.
velc_resp : array
Array with the velocity response for each node for each pair
input/output.
accl_resp : array
Array with the acceleration response for each node for each pair
input/output.
Examples
--------
>>> rotor = rotor_example()
>>> speed = np.linspace(0, 1000, 101)
>>> response = rotor.run_freq_response(speed_range=speed)
Return the response amplitude
>>> abs(response.freq_resp) # doctest: +ELLIPSIS
array([[[1.00000000e-06, 1.00261725e-06, 1.01076952e-06, ...
Return the response phase
>>> np.angle(response.freq_resp) # doctest: +ELLIPSIS
array([[[...
Using clustered points option.
Set `cluster_points=True` and choose how many modes the method must search and
how many points to add just before and after each critical speed.
>>> response = rotor.run_freq_response(cluster_points=True, num_points=5)
>>> response.speed_range.shape
(61,)
Plotting frequency response function:
>>> fig = response.plot(inp=13, out=13)
To plot velocity and acceleration responses, you must change amplitude_units
from "[length]/[force]" units to "[speed]/[force]" or "[acceleration]/[force]"
respectively
Plotting velocity response
>>> fig = response.plot(inp=13, out=13, amplitude_units="m/s/N")
Plotting acceleration response
>>> fig = response.plot(inp=13, out=13, amplitude_units="m/s**2/N")
"""
if speed_range is None:
if not cluster_points:
modal = self.run_modal(0)
speed_range = np.linspace(0, max(modal.evalues.imag) * 1.5, 1000)
else:
speed_range = self._clustering_points(
num_modes, num_points, modes, rtol
)
self._check_frequency_array(speed_range)
freq_resp = np.empty((self.ndof, self.ndof, len(speed_range)), dtype=np.complex)
velc_resp = np.empty((self.ndof, self.ndof, len(speed_range)), dtype=np.complex)
accl_resp = np.empty((self.ndof, self.ndof, len(speed_range)), dtype=np.complex)
for i, speed in enumerate(speed_range):
H = self.transfer_matrix(speed=speed, modes=modes)
freq_resp[..., i] = H
velc_resp[..., i] = 1j * speed * H
accl_resp[..., i] = -(speed ** 2) * H
results = FrequencyResponseResults(
freq_resp=freq_resp,
velc_resp=velc_resp,
accl_resp=accl_resp,
speed_range=speed_range,
number_dof=self.number_dof,
)
return results
def forced_response(
self,
force=None,
speed_range=None,
modes=None,
cluster_points=False,
num_modes=12,
num_points=10,
rtol=0.005,
unbalance=None,
):
"""Forced response for a mdof system.
This method returns the unbalanced response for a mdof system
given magnitude and phase of the unbalance, the node where it's
applied and a frequency range.
Available plotting methods:
.plot()
.plot_magnitude()
.plot_phase()
.plot_polar_bode()
.plot_deflected_shape()
.plot_bending_moment()
.plot_deflected_shape_3d()
.plot_deflected_shape_2d()
Parameters
----------
force : list, array
Unbalance force in each degree of freedom for each value in omega
speed_range : list, array
Array with the desired range of frequencies
modes : list, optional
Modes that will be used to calculate the frequency response
(all modes will be used if a list is not given).
unbalance : array, optional
Array with the unbalance data (node, magnitude and phase) to be plotted
with deflected shape. This argument is set only if running an unbalance
response analysis.
Default is None.
cluster_points : bool, optional
boolean to activate the automatic frequency spacing method. If True, the
method uses _clustering_points() to create an speed_range.
Default is False
num_points : int, optional
The number of points generated per critical speed.
The method set the same number of points for slightly less and slightly
higher than the natural circular frequency. It means there'll be num_points
greater and num_points smaller than a given critical speed.
num_points may be between 2 and 12. Anything above this range defaults
to 10 and anything below this range defaults to 4.
The default is 10.
num_modes
The number of eigenvalues and eigenvectors to be calculated using ARPACK.
It also defines the range for the output array, since the method generates
points only for the critical speed calculated by run_critical_speed().
Default is 12.
rtol : float, optional
Tolerance (relative) for termination. Applied to scipy.optimize.newton to
calculate the approximated critical speeds.
Default is 0.005 (0.5%).
Returns
-------
forced_resp : object
An instance of ForcedResponseResult class, which is used to post-process
results. Attributes stored:
forced_resp : array
Array with the forced response for each node for each frequency.
speed_range : array
Array with the frequencies.
velc_resp : array
Array with the velocity response for each node for each frequency.
accl_resp : array
Array with the acceleration response for each node for each frequency.
Examples
--------
>>> rotor = rotor_example()
>>> speed = np.linspace(0, 1000, 101)
>>> force = rotor._unbalance_force(3, 10.0, 0.0, speed)
>>> resp = rotor.forced_response(force=force, speed_range=speed)
>>> abs(resp.forced_resp) # doctest: +ELLIPSIS
array([[0.00000000e+00, 5.06073311e-04, 2.10044826e-03, ...
Using clustered points option.
Set `cluster_points=True` and choose how many modes the method must search and
how many points to add just before and after each critical speed.
>>> response = rotor.forced_response(
... force=force, cluster_points=True, num_modes=12, num_points=5
... )
>>> response.speed_range.shape
(61,)
"""
if speed_range is None:
if cluster_points:
speed_range = self._clustering_points(
num_modes, num_points, modes, rtol
)
freq_resp = self.run_freq_response(
speed_range, modes, cluster_points, num_modes, num_points, rtol
)
forced_resp = np.zeros(
(self.ndof, len(freq_resp.speed_range)), dtype=np.complex
)
velc_resp = np.zeros((self.ndof, len(freq_resp.speed_range)), dtype=np.complex)
accl_resp = np.zeros((self.ndof, len(freq_resp.speed_range)), dtype=np.complex)
for i in range(len(freq_resp.speed_range)):
forced_resp[:, i] = freq_resp.freq_resp[..., i] @ force[..., i]
velc_resp[:, i] = freq_resp.velc_resp[..., i] @ force[..., i]
accl_resp[:, i] = freq_resp.accl_resp[..., i] @ force[..., i]
forced_resp = ForcedResponseResults(
rotor=self,
forced_resp=forced_resp,
velc_resp=velc_resp,
accl_resp=accl_resp,
speed_range=speed_range,
unbalance=unbalance,
)
return forced_resp
def _unbalance_force(self, node, magnitude, phase, omega):
"""Calculate unbalance forces.
This is an auxiliary function the calculate unbalance forces. It takes the
force magnitude and phase and generate an array with complex values of forces
on each degree degree of freedom of the given node.
Parameters
----------
node : int
Node where the unbalance is applied.
magnitude : float
Unbalance magnitude (kg.m)
phase : float
Unbalance phase (rad)
omega : list, float
Array with the desired range of frequencies
Returns
-------
F0 : list
Unbalance force in each degree of freedom for each value in omega
Examples
--------
>>> rotor = rotor_example()
>>> speed = np.linspace(0, 1000, 101)
>>> rotor._unbalance_force(3, 10.0, 0.0, speed)[12] # doctest: +ELLIPSIS
array([0.000e+00+0.j, 1.000e+03+0.j, 4.000e+03+0.j, ...
"""
F0 = np.zeros((self.ndof, len(omega)), dtype=np.complex128)
b0 = np.zeros((self.number_dof), dtype=np.complex128)
b0[0] = magnitude * np.exp(1j * phase)
b0[1] = -1j * magnitude * np.exp(1j * phase)
# b0[2] 1j*(Id - Ip)*beta*np.exp(1j*gamma)
n0 = self.number_dof * node
n1 = n0 + self.number_dof
for i, w in enumerate(omega):
F0[n0:n1, i] += w ** 2 * b0
return F0
@check_units
def run_unbalance_response(
self,
node,
unbalance_magnitude,
unbalance_phase,
frequency=None,
modes=None,
cluster_points=False,
num_modes=12,
num_points=10,
rtol=0.005,
):
"""Unbalanced response for a mdof system.
This method returns the unbalanced response for a mdof system
given magnitide and phase of the unbalance, the node where it's
applied and a frequency range.
Available plotting methods:
.plot()
.plot_magnitude()
.plot_phase()
.plot_polar_bode()
.plot_deflected_shape()
.plot_bending_moment()
.plot_deflected_shape_3d()
.plot_deflected_shape_2d()
Parameters
----------
node : list, int
Node where the unbalance is applied.
unbalance_magnitude : list, float, pint.Quantity
Unbalance magnitude (kg.m).
unbalance_phase : list, float, pint.Quantity
Unbalance phase (rad).
frequency : list, float, pint.Quantity
Array with the desired range of frequencies (rad/s).
modes : list, optional
Modes that will be used to calculate the frequency response
(all modes will be used if a list is not given).
cluster_points : bool, optional
boolean to activate the automatic frequency spacing method. If True, the
method uses _clustering_points() to create an speed_range.
Default is False
num_points : int, optional
The number of points generated per critical speed.
The method set the same number of points for slightly less and slightly
higher than the natural circular frequency. It means there'll be num_points
greater and num_points smaller than a given critical speed.
num_points may be between 2 and 12. Anything above this range defaults
to 10 and anything below this range defaults to 4.
The default is 10.
num_modes
The number of eigenvalues and eigenvectors to be calculated using ARPACK.
It also defines the range for the output array, since the method generates
points only for the critical speed calculated by run_critical_speed().
Default is 12.
rtol : float, optional
Tolerance (relative) for termination. Applied to scipy.optimize.newton to
calculate the approximated critical speeds.
Default is 0.005 (0.5%).
Returns
-------
forced_response : object
An instance of ForcedResponseResult class, which is used to post-process
results. Attributes stored:
forced_resp : array
Array with the forced response for each node for each frequency.
speed_range : array
Array with the frequencies.
velc_resp : array
Array with the velocity response for each node for each frequency.
accl_resp : array
Array with the acceleration response for | |
<filename>2 Evaluating Passes/Christian Gilson - Evaluating Passes Assignment.py<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# ## Evaluating Passes Assignemnt
# In[1]:
# standard imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import json
import os
# stats packages
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.calibration import calibration_curve
# plotting
from mplsoccer.pitch import Pitch
# to deal with the unicode characters of players names / team names in Wyscout
import codecs
pd.options.mode.chained_assignment = None
# # Helper Functions
# In[2]:
def show_event_breakdown(df_events, dic_tags):
"""
Produces a full breakdown of the events, subevents, and the tags for the Wyscout dataset
Use this to look at the various tags attributed to the event taxonomy
"""
df_event_breakdown = df_events.groupby(['eventName','subEventName']).agg({'id':'nunique','tags':lambda x: list(x)}).reset_index().rename(columns={'id':'numSubEvents','tags':'tagList'})
# creating a histogram of the tags per sub event
df_event_breakdown['tagHist'] = df_event_breakdown.tagList.apply(lambda x: Counter([dic_tags[j] for i in x for j in i]))
dic = {}
for i, cols in df_event_breakdown.iterrows():
eventName, subEventName, numEvents, tagList, tagHist = cols
for key in tagHist:
dic[f'{i}-{key}'] = [eventName, subEventName, numEvents, key, tagHist[key]]
df_event_breakdown = pd.DataFrame.from_dict(dic, orient='index', columns=['eventName','subEventName','numSubEvents','tagKey','tagFrequency']).sort_values(['eventName','numSubEvents','tagFrequency'], ascending=[True, False, False]).reset_index(drop=True)
return df_event_breakdown
# In[3]:
def home_and_away(df):
"""
Picks out the home and away teamIds and their scores
"""
teamsData = df['teamsData']
for team in teamsData:
teamData = teamsData[team]
if teamData.get('side') == 'home':
homeTeamId = team
homeScore = teamData.get('score')
elif teamData.get('side') == 'away':
awayTeamId = team
awayScore = teamData.get('score')
df['homeTeamId'], df['homeScore'] = homeTeamId, homeScore
df['awayTeamId'], df['awayScore'] = awayTeamId, awayScore
return df
# In[4]:
def possession_indicator(df):
"""
Function that identifies which team is in possession of the ball
If the event is a found, interruption of offside, return a 0
Winner of a duel is deemed in possession of the ball
"""
# team identifiers
teamId = df['teamId']
homeTeamId = df['homeTeamId']
awayTeamId = df['awayTeamId']
teams = set([homeTeamId, awayTeamId])
otherTeamId = list(teams - set([teamId]))[0]
# eventName and subEventNames
eventName = df['eventName']
# success flag
successFlag = df['successFlag']
# assigning possession teamId
if eventName in ['Pass','Free Kick','Others on the ball','Shot','Save attempt','Goalkeeper leaving line']:
possessionTeamId = teamId
elif eventName == 'Duel':
possessionTeamId = teamId if successFlag == 1 else otherTeamId
else:
possessionTeamId = np.NaN
return possessionTeamId
# In[5]:
def strong_foot_flag(df):
"""
Compare foot of pass with footedness of player
Provides flag = 1 if pass played with strong foot of the player
"""
tags = df['tags']
foot = df['foot']
# tags
if 401 in tags:
passFoot = 'L'
elif 402 in tags:
passFoot = 'R'
elif 403 in tags:
passFoot = 'H'
else:
passFoot = 'N'
# feature
if (passFoot == 'L') and (foot in ['L','B']):
strongFlag = 1
elif (passFoot == 'R') and (foot in ['R','B']):
strongFlag = 1
else:
strongFlag = 0
return strongFlag
def weak_foot_flag(df):
"""
Compare foot of pass with footedness of player
Provides flag = 1 if pass played with weak foot of the player
"""
tags = df['tags']
foot = df['foot']
# tags
if 401 in tags:
passFoot = 'L'
elif 402 in tags:
passFoot = 'R'
elif 403 in tags:
passFoot = 'H'
else:
passFoot = 'N'
# feature
if (passFoot == 'L') and (foot == 'R'):
weakFlag = 1
elif (passFoot == 'R') and (foot == 'L'):
weakFlag = 1
else:
weakFlag = 0
return weakFlag
# ---
#
# # Data Loader Functions
#
# * Players
# * Teams
# * Tags
# * Matches
# * Formations
# * Events
# In[6]:
def get_players(player_file):
"""
Returns dataframe of players
"""
with open(player_file) as f:
players_data = json.load(f)
player_cols = ['playerId','shortName','foot','height','weight','birthDate','birthCountry','role','roleCode']
df_players = pd.DataFrame([[i.get('wyId'),codecs.unicode_escape_decode(i.get('shortName'))[0],i.get('foot'),i.get('height'),i.get('weight'),i.get('birthDate'),i.get('passportArea').get('name'),i.get('role').get('name'),i.get('role').get('code3')] for i in players_data], columns = player_cols)
return df_players
def get_teams(team_file):
"""
Returns dataframe of teams
"""
with open(team_file) as f:
teams_data = json.load(f)
team_cols = ['teamId','teamName','officialTeamName','teamType','teamArea']
df_teams = pd.DataFrame([[i.get('wyId'),codecs.unicode_escape_decode(i.get('name'))[0],codecs.unicode_escape_decode(i.get('officialName'))[0],i.get('type'),i.get('area').get('name')] for i in teams_data], columns=team_cols)
return df_teams
dic_tags = {
101: 'Goal',
102: 'Own goal',
301: 'Assist',
302: 'Key pass',
1901: 'Counter attack',
401: 'Left foot',
402: 'Right foot',
403: 'Head/body',
1101: 'Direct',
1102: 'Indirect',
2001: 'Dangerous ball lost',
2101: 'Blocked',
801: 'High',
802: 'Low',
1401: 'Interception',
1501: 'Clearance',
201: 'Opportunity',
1301: 'Feint',
1302: 'Missed ball',
501: 'Free space right',
502: 'Free space left',
503: 'Take on left',
504: 'Take on right',
1601: 'Sliding tackle',
601: 'Anticipated',
602: 'Anticipation',
1701: 'Red card',
1702: 'Yellow card',
1703: 'Second yellow card',
1201: 'Position: Goal low center',
1202: 'Position: Goal low right',
1203: 'Position: Goal center',
1204: 'Position: Goal center left',
1205: 'Position: Goal low left',
1206: 'Position: Goal center right',
1207: 'Position: Goal high center',
1208: 'Position: Goal high left',
1209: 'Position: Goal high right',
1210: 'Position: Out low right',
1211: 'Position: Out center left',
1212: 'Position: Out low left',
1213: 'Position: Out center right',
1214: 'Position: Out high center',
1215: 'Position: Out high left',
1216: 'Position: Out high right',
1217: 'Position: Post low right',
1218: 'Position: Post center left',
1219: 'Position: Post low left',
1220: 'Position: Post center right',
1221: 'Position: Post high center',
1222: 'Position: Post high left',
1223: 'Position: Post high right',
901: 'Through',
1001: 'Fairplay',
701: 'Lost',
702: 'Neutral',
703: 'Won',
1801: 'Accurate',
1802: 'Not accurate'
}
def get_matches(match_repo):
"""
Return dataframe of matches
"""
match_files = os.listdir(match_repo)
lst_df_matches = []
# note, this does not include groupName
match_cols = ["status","roundId","gameweek","teamsData","seasonId","dateutc","winner","venue","wyId","label","date","referees","duration","competitionId","source"]
for match_file in match_files:
print (f'Processing {match_file}...')
with open(f'matches/{match_file}') as f:
data = json.load(f)
df = pd.DataFrame(data)
# adding some file source metadata
df['source'] = match_file.replace('matches_','').replace('.json','')
# dealing with the groupName column that's only in the international competitions
df = df[match_cols]
lst_df_matches.append(df)
# concatenating match files
df_matches = pd.concat(lst_df_matches, ignore_index=True)
# applying home and away transformations using helper functions
df_matches = df_matches.apply(home_and_away, axis=1)
# and changing the wyId to matchId
df_matches = df_matches.rename(columns={'wyId':'matchId'})
# and filtering columns (may want to change this later)
match_cols_final = ["source","competitionId","seasonId","roundId","gameweek","matchId","teamsData","dateutc","date","homeTeamId","homeScore","awayTeamId","awayScore","duration","winner","venue","label"]
df_matches = df_matches[match_cols_final]
return df_matches
def get_formations(df_matches):
"""
Returns dataframe of formations within a match for all matches
Adapted from https://github.com/CleKraus/soccer_analytics
"""
lst_formations = list()
for idx, match in df_matches.iterrows():
matchId = match['matchId']
# loop through the two teams
for team in [0, 1]:
team = match['teamsData'][list(match['teamsData'])[team]]
teamId = team['teamId']
# get all players that started on the bench
player_bench = [player['playerId'] for player in team['formation']['bench']]
df_bench = pd.DataFrame()
df_bench['playerId'] = player_bench
df_bench['lineup'] = 0
# get all players that were in the lineup
player_lineup = [
player['playerId'] for player in team['formation']['lineup']
]
df_lineup = pd.DataFrame()
df_lineup['playerId'] = player_lineup
df_lineup['lineup'] = 1
# in case there were no substitutions in the match
if team['formation']['substitutions'] == 'null':
player_in = []
player_out = []
sub_minute = []
# if there were substitutions
else:
player_in = [
sub['playerIn'] for sub in team['formation']['substitutions']
]
player_out = [
sub['playerOut'] for sub in team['formation']['substitutions']
]
sub_minute = [
sub['minute'] for sub in team['formation']['substitutions']
]
# build a data frame who and when was substituted in
df_player_in = pd.DataFrame()
df_player_in['playerId'] = player_in
df_player_in['substituteIn'] = sub_minute
# build a data frame who and when was substituted out
df_player_out = pd.DataFrame()
df_player_out['playerId'] = player_out
df_player_out['substituteOut'] = sub_minute
# get the formation by concatenating lineup and bench players
df_formation = pd.concat([df_lineup, df_bench], axis=0)
df_formation['matchId'] = matchId
df_formation['teamId'] = teamId
# add information about substitutions
df_formation = pd.merge(df_formation, df_player_in, how='left')
df_formation = pd.merge(df_formation, df_player_out, how='left')
lst_formations.append(df_formation)
df_formations = pd.concat(lst_formations)
# get the minute the player started and the minute the player ended the match
df_formations['minuteStart'] = np.where(
df_formations['substituteIn'].isnull(), 0, df_formations['substituteIn']
)
df_formations['minuteEnd'] = np.where(
df_formations['substituteOut'].isnull(), 90, df_formations['substituteOut']
)
# make sure the match always lasts 90 minutes
df_formations['minuteStart'] = np.minimum(df_formations['minuteStart'], 90)
df_formations['minuteEnd'] = np.minimum(df_formations['minuteEnd'], 90)
# set minuteEnd to 0 in case the player was not in the lineup and did not get substituted in
df_formations['minuteEnd'] = np.where(
(df_formations['lineup'] == 0) & (df_formations['substituteIn'].isnull()),
0,
df_formations['minuteEnd'],
)
# compute the minutes played
df_formations['minutesPlayed'] = (
df_formations['minuteEnd'] - df_formations['minuteStart']
)
# use a binary flag of substitution rather than a minute and NaNs
df_formations['substituteIn'] = 1 * (df_formations['substituteIn'].notnull())
df_formations['substituteOut'] = 1 * (df_formations['substituteOut'].notnull())
return df_formations
def get_events(event_repo, leagueSelectionFlag = 0, leagueSelection = 'England'):
"""
Returns dataframe of events
"""
events_files = os.listdir(event_repo)
lst_df_events = []
if leagueSelectionFlag == 1:
events_files = [i | |
from __future__ import absolute_import, division, print_function
import cStringIO
import sys, os, re
from six.moves import zip
def check_bin_format(bin):
try :
d_max, d_min = float(bin[0]), float(bin[1])
except ValueError as e :
raise RuntimeError("%s\nOffending values: %s, %s"%(str(e),bin[0],bin[1]))
def float_or_none(n):
if n is None : return None
else : return float(n)
def percent_to_float(value):
assert value.endswith("%")
return float(re.sub("\%$", "", value))
class experiment_info(object):
def extract_all_stats(self):
return self
class integration_info(object):
def __init__(self, program_name="NULL"):
self.program_name = program_name
self.wavelength = None
self.distance = None
self.twotheta = None
def set_wavelength(self, wavelength):
self.wavelength = float(wavelength)
def set_distance(self, distance):
self.distance = float(distance)
def set_2theta(self, twotheta):
self.twotheta = twotheta
def extract_all_stats(self):
return self
class scaling_info(object):
def __init__(self, program_name="NULL"):
self.program_name = program_name
self.stats_overall = {}
self.binned_stats = {}
self.bins = None
self.d_max = None
self.d_min = None
self.n_refl = None
self.n_refl_all = None
def set_bins(self, bins):
for bin in bins :
check_bin_format(bin)
self.bins = bins
if self.d_max is None :
d_max = float(self.bins[0][0])
d_min = float(self.bins[-1][1])
self.set_d_max_min(d_max, d_min)
def set_n_refl(self, n_refl, n_refl_all):
self.n_refl = n_refl
self.n_refl_all = n_refl_all
def add_bin_stat(self, bin, stat_name, value):
check_bin_format(bin)
if stat_name in self.binned_stats :
self.binned_stats[stat_name].append(value)
else :
self.binned_stats[stat_name] = [value]
def add_overall_stat(self, stat_name, value):
self.stats_overall[stat_name] = value
def set_d_max_min(self, d_max, d_min):
self.d_max = d_max
self.d_min = d_min
def extract_all_stats(self):
from libtbx import group_args
d_min = float(self.bins[-1][1])
d_max = float(self.bins[0][0])
comp_overall = self.stats_overall.get("completeness", None)
mult_overall = self.stats_overall.get("multiplicity", None)
rmerg_overall = self.stats_overall.get("r_merge", None)
s2n_overall = self.stats_overall.get("i/sigma", None)
return group_args(d_max_min=(d_max, d_min),
n_refl=self.n_refl,
n_refl_all=self.n_refl_all,
completeness=float_or_none(comp_overall),
multiplicity=float_or_none(mult_overall),
r_sym=float_or_none(rmerg_overall),
r_meas=None, # TODO?
i_over_sigma=float_or_none(s2n_overall))
def extract_outer_shell_stats(self):
from libtbx import group_args
d_min = float(self.bins[-1][1])
d_max = float(self.bins[-1][0])
comp_bin = self.binned_stats.get("completeness", [None])[-1]
mult_bin = self.binned_stats.get("multiplicity", [None])[-1]
rmerg_bin = self.binned_stats.get("r_merge", [None])[-1]
s2n_bin = self.binned_stats.get("i/sigma", [None])[-1]
return group_args(d_max_min=(d_max, d_min),
n_refl=None, # FIXME
n_refl_all=None,
completeness=float_or_none(comp_bin),
multiplicity=float_or_none(mult_bin),
r_sym=float_or_none(rmerg_bin),
r_meas=None, # TODO?
i_over_sigma=float_or_none(s2n_bin))
class all_none(object):
def __getattr__(self, name):
return None
class empty_info(object):
def extract_all_stats(self):
return all_none()
def extract_outer_shell_stats(self):
return all_none()
class processing_info(object):
def __init__(self, experiment, integration, scaling):
self.experiment = experiment
self.integration = integration
self.scaling = scaling
def get_experiment_info(self):
if (self.experiment is not None):
return self.experiment
return all_none() #empty_info()
def get_integration_info(self):
if (self.integration is not None):
return self.integration
return all_none() #empty_info()
def get_scaling_info(self):
if (self.scaling is not None):
return self.scaling
return empty_info()
def format_remark_200(self):
from libtbx.str_utils import format_value
from libtbx.test_utils import approx_equal
def format(obj, attr, fs="%.4f"):
value = getattr(obj, attr, None)
return format_value(fs, value, replace_none_with="NULL").strip()
e = None
if self.experiment is not None :
e = self.experiment.extract_all_stats()
i = None
if self.integration is not None :
i = self.integration.extract_all_stats()
s = None
if self.scaling is not None :
s = self.scaling.extract_all_stats()
lines = []
lines.append("")
lines.append("EXPERIMENTAL DETAILS")
lines.append(" EXPERIMENT TYPE : X-RAY DIFFRACTION")
lines.append(" DATE OF DATA COLLECTION : NULL")
lines.append(" TEMPERATURE (KELVIN) : NULL")
lines.append(" PH : NULL")
lines.append(" NUMBER OF CRYSTALS USED : NULL")
lines.append("")
# TODO
wavelength = getattr(e, "wavelength", None)
if (wavelength is None):
wavelength = getattr(i, "wavelength", None)
synchrotron = "NULL"
if (wavelength is not None):
out = cStringIO.StringIO()
if (not approx_equal(wavelength, 1.5418, eps=0.01, out=out) and
not approx_equal(wavelength, 0.7107, eps=0.01, out=out)):
synchrotron = "Y"
else :
synchrotron = "N"
wl = "%.4f" % wavelength
else :
wl = "NULL"
lines.append(" SYNCHROTRON (Y/N) : %s" % synchrotron)
lines.append(" RADIATION SOURCE : NULL")
lines.append(" BEAMLINE : NULL")
lines.append(" X-RAY GENERATOR MODEL : NULL")
lines.append(" MONOCHROMATIC OR LAUE (M/L) : M")
lines.append(" WAVELENGTH OR RANGE (A) : %s" % wl)
lines.append(" MONOCHROMATOR : NULL")
lines.append(" OPTICS : NULL")
lines.append("")
int_software = getattr(self.integration, "program_name", "NULL")
lines.append(" DETECTOR TYPE : NULL")
lines.append(" DETECTOR MANUFACTURER : NULL")
lines.append(" INTENSITY-INTEGRATION SOFTWARE : %s" % int_software)
scale_software = getattr(self.scaling, "program_name", "NULL")
lines.append(" DATA SCALING SOFTWARE : %s" % scale_software)
lines.append("")
lines.append("OVERALL.")
comp_overall = format(s, "completeness", "%.1f")
mult_overall = format(s, "multiplicity", "%.1f")
rmerg_overall = format(s, "r_sym", "%.5f")
s2n_overall = format(s, "i_over_sigma", "%.4f")
lines.append(" COMPLETENESS FOR RANGE (%%) : %s" % comp_overall)
lines.append(" DATA REDUNDANCY : %s" % mult_overall)
lines.append(" R MERGE (I) : %s" % rmerg_overall)
lines.append(" R SYM (I) : NULL")
lines.append(" <I/SIGMA(I)> FOR THE DATA SET : %s" % s2n_overall)
lines.append("")
lines.append("IN THE HIGHEST RESOLUTION SHELL.")
shell = None
if self.scaling is not None :
shell = self.scaling.extract_outer_shell_stats()
(_d_max, _d_min) = getattr(shell, "d_max_min", (None, None))
d_max = format_value("%.2f", _d_max, replace_none_with="NULL").strip()
d_min = format_value("%.2f", _d_min, replace_none_with="NULL").strip()
comp_lastbin = format(shell, "completeness", "%.1f")
mult_lastbin = format(shell, "multiplicity", "%.1f")
rmerg_lastbin = format(shell, "r_sym", "%.5f")
s2n_lastbin = format(shell, "i_over_sigma", "%.4f")
lines.append(" HIGHEST RESOLUTION SHELL, RANGE HIGH (A) : %s" % d_min)
lines.append(" HIGHEST RESOLUTION SHELL, RANGE LOW (A) : %s" % d_max)
lines.append(" COMPLETENESS FOR SHELL (%%) : %s" % comp_lastbin)
lines.append(" DATA REDUNDANCY IN SHELL : %s" % mult_lastbin)
lines.append(" R MERGE FOR SHELL (I) : %s" % rmerg_lastbin)
lines.append(" R SYM FOR SHELL (I) : NULL")
lines.append(" <I/SIGMA(I)> FOR SHELL : %s" % s2n_lastbin)
lines.append("")
remark_lines = [ "REMARK 200 %s" % line for line in lines ]
return "\n".join(remark_lines)
#-----------------------------------------------------------------------
# PARSERS
#
def parse_denzo(lines):
info = integration_info("HKL-2000")
for i, line in enumerate(lines):
if line.strip().startswith("Wavelength "):
fields = line.strip().split()
for field in fields :
try :
wavelength = float(field)
except ValueError :
pass
else :
info.set_wavelength(wavelength)
break
elif line.strip().startswith("Detector to crystal distance"):
fields = line.strip().split()
info.set_distance(float(fields[4]))
return info
def parse_mosflm(lines):
info = integration_info("MOSFLM")
for i, line in enumerate(lines):
line = line.strip()
if line.startswith("Beam Parameters"):
j = i
while (j < len(lines)):
line = lines[j].strip()
if line.startswith("Wavelength"):
wavelength = float(line.split()[1])
info.set_wavelength(wavelength)
break
j += 1
elif line.startswith("Detector Parameters"):
j = i
while (j < (i + 100)):
line = lines[j].strip()
if line.startswith("Crystal to detector distance"):
fields = line.split()
distance = float(fields[-2])
info.set_distance(distance)
elif line.startswith("Detector swing angle"):
fields = line.split()
twotheta = float(fields[-2])
info.set_twotheta(twotheta)
j += 1
break
return info
def parse_scalepack(lines):
n_lines = len(lines)
mode = 0
info = scaling_info("SCALA")
def is_table_end(fields):
return (fields[0] == "All" and fields[1] == "hkl")
n_refl_all = None
n_refl = None
for i, line in enumerate(lines):
if ("intensities and R-factors by batch number" in line):
j = i + 3
while j < n_lines :
line2 = lines[j].strip()
if line2.startswith("All films"):
n_refl_all = int(line2.split()[2])
break
j+= 1
elif "Summary of observation redundancies by shells" in line :
bins = []
j = i + 3
while (j < (i+100)):
line2 = lines[j]
fields = line2.strip().split()
if is_table_end(fields):
n_refl = int(fields[-1])
info.set_n_refl(n_refl, n_refl_all)
break
else :
bin_d_max_min = (fields[0], fields[1])
bins.append(bin_d_max_min)
j += 1
assert (len(bins) > 0)
info.set_bins(bins)
elif "Average Redundancy Per Shell" in line :
j = i + 3
while (j < (i+100)):
line2 = lines[j]
fields = line2.strip().split()
if is_table_end(fields):
info.add_overall_stat("multiplicity", fields[-1])
break
else :
bin = (fields[0], fields[1])
info.add_bin_stat(bin, "multiplicity", fields[-1])
j += 1
elif "I/Sigma in resolution shells:" in line :
j = i + 3
while (j < (i+100)):
line2 = lines[j]
fields = line2.strip().split()
if is_table_end(fields):
info.add_overall_stat("completeness", fields[-1])
break
else :
bin = (fields[0], fields[1])
info.add_bin_stat(bin, "completeness", fields[-1])
j += 1
elif "Summary of reflections intensities and R-factors by shells" in line :
j = i
while (j < (i+100)):
line2 = lines[j]
fields = line2.strip().split()
j += 1
if (len(fields) > 0 and fields[0] == "limit"):
break
while (j < (i+200)):
line2 = lines[j]
fields = line2.strip().split()
i_mean = float(fields[2])
sig_i_mean = float(fields[3])
r_merge = fields[-2] # XXX -1 (linear) or -2 (square) ???
if (fields[0] == "All" and fields[1] == "reflections"):
info.add_overall_stat("i/sigma", "%.2f" % (i_mean / sig_i_mean))
info.add_overall_stat("r_merge", r_merge)
break
else :
bin = (fields[0], fields[1])
info.add_bin_stat(bin, "i/sigma", "%.2f" % (i_mean / sig_i_mean))
info.add_bin_stat(bin, "r_merge", r_merge)
j += 1
return info
def parse_scala(lines):
from iotbx import data_plots
info = scaling_info("SCALA")
tables = data_plots.import_ccp4i_logfile(log_lines=lines)
d_max = None
for i, line in enumerate(lines):
if ("Summary data for " in line):
if (lines[i+1].startswith("</p>")) or ("<br" in line):
continue
j = i
n_refl = None
n_refl_all = None
while (j < len(lines)):
line = lines[j].strip()
if line.startswith("Low resolution limit"):
d_max = float(line.split()[3])
elif line.startswith("Rmerge") and (not "bin" in line):
info.add_overall_stat("r_merge", float(line.split()[1]))
elif line.startswith("Total number of observations"):
n_refl_all = float(line.split()[4])
elif line.startswith("Total number unique"):
n_refl = float(line.split()[3])
info.set_n_refl(n_refl, n_refl_all)
elif (line.startswith("Mean((I)/sd(I))") or
line.startswith("Mean(I)/sd(I)")):
info.add_overall_stat("i/sigma", float(line.split()[1]))
elif line.startswith("Completeness"):
info.add_overall_stat("completeness", | |
#Imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import glob
import random
# import imgaug
# from imgaug import augmenters as iaa
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import openslide
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
# from tensorflow.keras.models import Model
# from tensorflow.keras.layers import Input, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, concatenate, Concatenate, UpSampling2D, Activation
# from tensorflow.keras.losses import categorical_crossentropy
# from tensorflow.keras.applications.densenet import DenseNet121
# from tensorflow.keras.optimizers import Adam
# from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
# from tensorflow.keras import metrics
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms # noqa
import sklearn.metrics
import io
import itertools
from six.moves import range
import time
import cv2
from skimage.color import rgb2hsv
from skimage.filters import threshold_otsu
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.getcwd())))
from models.seg_models import unet_densenet121, get_inception_resnet_v2_unet_softmax
# Random Seeds
np.random.seed(0)
random.seed(0)
tf.set_random_seed(0)
# import tifffile
# import skimage.io as io
import pandas as pd
import json
# Image Helper Functions
def imsave(*args, **kwargs):
"""
Concatenate the images given in args and saves them as a single image in the specified output destination.
Images should be numpy arrays and have same dimensions along the 0 axis.
imsave(im1,im2,out="sample.png")
"""
args_list = list(args)
for i in range(len(args_list)):
if type(args_list[i]) != np.ndarray:
print("Not a numpy array")
return 0
if len(args_list[i].shape) == 2:
args_list[i] = np.dstack([args_list[i]]*3)
if args_list[i].max() == 1:
args_list[i] = args_list[i]*255
out_destination = kwargs.get("out",'')
try:
concatenated_arr = np.concatenate(args_list,axis=1)
im = Image.fromarray(np.uint8(concatenated_arr))
except Exception as e:
print(e)
import ipdb; ipdb.set_trace()
return 0
if out_destination:
print("Saving to %s" % out_destination)
im.save(out_destination)
else:
return im
def imshow(*args,**kwargs):
""" Handy function to show multiple plots in on row, possibly with different cmaps and titles
Usage:
imshow(img1, title="myPlot")
imshow(img1,img2, title=['title1','title2'])
imshow(img1,img2, cmap='hot')
imshow(img1,img2,cmap=['gray','Blues']) """
cmap = kwargs.get('cmap', 'gray')
title= kwargs.get('title','')
axis_off = kwargs.get('axis_off','')
if len(args)==0:
raise ValueError("No images given to imshow")
elif len(args)==1:
plt.title(title)
plt.imshow(args[0], interpolation='none')
else:
n=len(args)
if type(cmap)==str:
cmap = [cmap]*n
if type(title)==str:
title= [title]*n
plt.figure(figsize=(n*5,10))
for i in range(n):
plt.subplot(1,n,i+1)
plt.title(title[i])
plt.imshow(args[i], cmap[i])
if axis_off:
plt.axis('off')
plt.show()
def normalize_minmax(data):
"""
Normalize contrast across volume
"""
_min = np.float(np.min(data))
_max = np.float(np.max(data))
if (_max-_min)!=0:
img = (data - _min) / (_max-_min)
else:
img = np.zeros_like(data)
return img
# Functions
def BinMorphoProcessMask(mask):
"""
Binary operation performed on tissue mask
"""
close_kernel = np.ones((20, 20), dtype=np.uint8)
image_close = cv2.morphologyEx(np.array(mask), cv2.MORPH_CLOSE, close_kernel)
open_kernel = np.ones((5, 5), dtype=np.uint8)
image_open = cv2.morphologyEx(np.array(image_close), cv2.MORPH_OPEN, open_kernel)
kernel = np.ones((20, 20), dtype=np.uint8)
image = cv2.dilate(image_open,kernel,iterations = 1)
return image
def get_bbox(cont_img, rgb_image=None):
contours, _ = cv2.findContours(cont_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rgb_contour = None
if rgb_image is not None:
rgb_contour = rgb_image.copy()
line_color = (0, 0, 255) # blue color code
cv2.drawContours(rgb_contour, contours, -1, line_color, 2)
bounding_boxes = [cv2.boundingRect(c) for c in contours]
for x, y, h, w in bounding_boxes:
rgb_contour = cv2.rectangle(rgb_contour,(x,y),(x+h,y+w),(0,255,0),2)
return bounding_boxes, rgb_contour
def get_all_bbox_masks(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max, x_min:x_max]=1
return bbox_mask
def get_all_bbox_masks_with_stride(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max:stride_factor, x_min:x_max:stride_factor]=1
return bbox_mask
def find_largest_bbox(mask, stride_factor):
"""
Find the largest bounding box encompassing all the blobs
"""
y_size, x_size = mask.shape
x, y = np.where(mask==1)
bbox_mask = np.zeros_like(mask)
x_min = np.min(x) - stride_factor
x_max = np.max(x) + stride_factor
y_min = np.min(y) - stride_factor
y_max = np.max(y) + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_min > y_size:
y_max = y_size - 1
bbox_mask[x_min:x_max, y_min:y_max]=1
return bbox_mask
def TissueMaskGeneration(slide_obj, level, RGB_min=50):
img_RGB = slide_obj.read_region((0, 0),level,slide_obj.level_dimensions[level])
img_RGB = np.transpose((img_RGB.convert('RGB')),axes=[1,0,2])
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > threshold_otsu(img_RGB[:, :, 0])
background_G = img_RGB[:, :, 1] > threshold_otsu(img_RGB[:, :, 1])
background_B = img_RGB[:, :, 2] > threshold_otsu(img_RGB[:, :, 2])
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > threshold_otsu(img_HSV[:, :, 1])
min_R = img_RGB[:, :, 0] > RGB_min
min_G = img_RGB[:, :, 1] > RGB_min
min_B = img_RGB[:, :, 2] > RGB_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
# r = img_RGB[:,:,0] < 235
# g = img_RGB[:,:,1] < 210
# b = img_RGB[:,:,2] < 235
# tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGenerationPatch(patchRGB):
'''
Returns mask of tissue that obeys the threshold set by paip
'''
r = patchRGB[:,:,0] < 235
g = patchRGB[:,:,1] < 210
b = patchRGB[:,:,2] < 235
tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGeneration_BIN(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY)
return np.array(tissue_mask)
def TissueMaskGeneration_BIN_OTSU(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return np.array(tissue_mask)
def labelthreshold(image, threshold=0.5):
label = np.zeros_like(image)
label[image>=threshold] = 1
# np.place(image,image>=threshold, 1)
# np.place(image,image<threshold, 0)
return np.uint8(label)
def calc_jacc_score(x,y,smoothing=1):
for var in [x,y]:
np.place(var,var>0,1)
numerator = np.sum(x*y)
denominator = np.sum(np.logical_or(x,y))
return (numerator+smoothing)/(denominator+smoothing)
def get_tumor_fraction(mask_image):
fraction = np.count_nonzero(mask_image)/np.prod(mask_image.shape)
return fraction
# In[3]:
# DataLoader Implementation
class WSIStridedPatchDataset(Dataset):
"""
Data producer that generate all the square grids, e.g. 3x3, of patches,
from a WSI and its tissue mask, and their corresponding indices with
respect to the tissue mask
"""
def __init__(self, wsi_path, mask_path, label_path=None, image_size=256,
normalize=True, flip='NONE', rotate='NONE',
level=5, sampling_stride=16, roi_masking=True):
"""
Initialize the data producer.
Arguments:
wsi_path: string, path to WSI file
mask_path: string, path to mask file in numpy format OR None
label_mask_path: string, path to ground-truth label mask path in tif file or
None (incase of Normal WSI or test-time)
image_size: int, size of the image before splitting into grid, e.g. 768
patch_size: int, size of the patch, e.g. 256
crop_size: int, size of the final crop that is feed into a CNN,
e.g. 224 for ResNet
normalize: bool, if normalize the [0, 255] pixel values to [-1, 1],
mostly False for debuging purpose
flip: string, 'NONE' or 'FLIP_LEFT_RIGHT' indicating the flip type
rotate: string, 'NONE' or 'ROTATE_90' or 'ROTATE_180' or
'ROTATE_270', indicating the rotate type
level: Level to extract the WSI tissue mask
roi_masking: True: Multiplies the strided WSI with tissue mask to eliminate white spaces,
False: Ensures inference is done on the entire WSI
sampling_stride: Number of pixels to skip in the tissue mask, basically it's the overlap
fraction when patches are extracted from WSI during inference.
stride=1 -> consecutive pixels are utilized
stride= image_size/pow(2, level) -> non-overalaping patches
"""
self._wsi_path = wsi_path
self._mask_path = mask_path
self._label_path = label_path
self._image_size = image_size
self._normalize = normalize
self._flip = flip
self._rotate = rotate
self._level = level
self._sampling_stride = sampling_stride
self._roi_masking = roi_masking
self._preprocess()
def _preprocess(self):
self._slide = openslide.OpenSlide(self._wsi_path)
if self._label_path is not None:
self._label_slide = openslide.OpenSlide(self._label_path)
X_slide, Y_slide = self._slide.level_dimensions[0]
print("Image dimensions: (%d,%d)" %(X_slide,Y_slide))
factor = self._sampling_stride
if self._mask_path is not None:
mask_file_name = os.path.basename(self._mask_path)
if mask_file_name.endswith('.tiff'):
mask_obj = openslide.OpenSlide(self._mask_path)
self._mask = np.array(mask_obj.read_region((0, 0),
self._level,
mask_obj.level_dimensions[self._level]).convert('L')).T
np.place(self._mask,self._mask>0,255)
else:
# Generate tissue mask on the fly
self._mask = TissueMaskGeneration(self._slide, self._level)
# morphological operations ensure the holes are filled | |
adaptive stepsize.
#
# For now, we use a heuristic that catches very bad gradients, but is not
# perfectly accurate.
type_eps = {'float64': 1e-7,
'float32': 3e-4,
numpy.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4}
def __init__(self, f, pt, eps=None, out_type=None):
"""Return the gradient of f at pt.
:param f: a differentiable function such that f(*pt) is a scalar
:param pt: an ndarray, a list of ndarrays or tuple of ndarrays
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
This function computes the gradient by a one-sided finite
differences of a fixed step size (eps).
It is assumed that f(...) will return a scalar.
It is assumed that all f's inputs are numpy.ndarray objects.
:param eps: the stepsize for the finite differencing. None means
input dtype-dependent. See `type_eps`.
"""
def prod(inputs):
rval = 1
for i in inputs:
rval *= i
return rval
packed_pt = False
if not isinstance(pt, (list, tuple)):
pt = [pt]
packed_pt = True
apt = [numpy.array(p) for p in pt]
shapes = [p.shape for p in apt]
dtypes = [str(p.dtype) for p in apt]
# TODO: remove this eventually (why was this here in the first place ?)
# In the case of CSM, the arguments are a mixture of floats and
# integers...
# if not dtypes == [dtypes[0]] * len(apt):
# raise TypeError('All function arguments must have same dtype')
total_size = __builtin__.sum(prod(sh) for sh in shapes)
working_dtype = __builtin__.min((self.type_eps[dt], dt)
for dt in dtypes)[1]
# create un-initialized memory
x = numpy.ndarray((total_size,), dtype=working_dtype)
if (not out_type is None) and (out_type.startswith('complex')):
gx = numpy.ndarray((total_size,), dtype=out_type)
else:
gx = numpy.ndarray((total_size,), dtype=working_dtype)
if eps is None:
eps = __builtin__.max(self.type_eps[dt] for dt in dtypes)
# set up aliases so that apt[i] is backed by memory in x
# and self.gf is backed by memory in gx
cur_pos = 0
self.gf = []
for i, p in enumerate(apt):
p_size = prod(p.shape)
# set up alias
apt[i] = x[cur_pos: cur_pos + p_size].reshape(p.shape)
self.gf.append(gx[cur_pos: cur_pos + p_size].reshape(p.shape))
# initialize with p's value
apt[i][...] = p
cur_pos += p_size
f_x = f(*[p.copy() for p in apt])
# now iterate over the elements of x, and call f on apt.
x_copy = x.copy()
for i in xrange(total_size):
x[:] = x_copy
x[i] += eps
f_eps = f(*apt)
# TODO: remove this when it is clear that the next
# replacemement does not pose problems of its own. It was replaced
# for its inability to handle complex variables.
# gx[i] = numpy.asarray((f_eps - f_x) / eps)
gx[i] = ((f_eps - f_x) / eps)
if packed_pt:
self.gf = self.gf[0]
@staticmethod
def abs_rel_err(a, b):
"""Return absolute and relative error between a and b.
The relative error is a small number when a and b are close, relative
to how big they are.
Formulas used:
abs_err = abs(a - b)
rel_err = abs_err / max(abs(a) + abs(b), 1e-8)
The denominator is clipped at 1e-8 to avoid dividing by 0 when a and b
are both close to 0.
The tuple (abs_err, rel_err) is returned
"""
abs_err = abs(a - b)
rel_err = abs_err / numpy.maximum(abs(a) + abs(b), 1e-8)
return (abs_err, rel_err)
def abs_rel_errors(self, g_pt):
"""Return the abs and rel error of gradient estimate `g_pt`
`g_pt` must be a list of ndarrays of the same length as self.gf,
otherwise a ValueError is raised.
Corresponding ndarrays in `g_pt` and `self.gf` must have the same
shape or ValueError is raised.
"""
if len(g_pt) != len(self.gf):
raise ValueError(
'argument has wrong number of elements',
len(g_pt))
errs = []
for i, (a, b) in enumerate(zip(g_pt, self.gf)):
if a.shape != b.shape:
raise ValueError(
'argument element %i has wrong shape %s' % (
i, str((a.shape, b.shape))))
errs.append(numeric_grad.abs_rel_err(a, b))
return errs
def max_err(self, g_pt, abs_tol, rel_tol):
"""Find the biggest error between g_pt and self.gf.
What is measured is the violation of relative and absolute errors,
wrt the provided tolerances (abs_tol, rel_tol).
A value > 1 means both tolerances are exceeded.
Return the argmax of min(abs_err / abs_tol, rel_err / rel_tol) over
g_pt, as well as abs_err and rel_err at this point.
"""
pos = []
errs = []
abs_errs = []
rel_errs = []
abs_rel_errs = self.abs_rel_errors(g_pt)
for abs_err, rel_err in abs_rel_errs:
if not numpy.all(numpy.isfinite(abs_err)):
raise ValueError('abs_err not finite', repr(abs_err))
if not numpy.all(numpy.isfinite(rel_err)):
raise ValueError('rel_err not finite', repr(rel_err))
scaled_err = numpy.minimum(abs_err / abs_tol, rel_err / rel_tol)
max_i = scaled_err.argmax()
pos.append(max_i)
errs.append(scaled_err.flatten()[max_i])
abs_errs.append(abs_err.flatten()[max_i])
rel_errs.append(rel_err.flatten()[max_i])
# max over the arrays in g_pt
max_arg = numpy.argmax(errs)
max_pos = pos[max_arg]
return (max_arg, pos[max_arg], abs_errs[max_arg], rel_errs[max_arg])
def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, out_type=None, abs_tol=None,
rel_tol=None, mode=None, cast_to_output_type=False):
""" Test a gradient by Finite Difference Method. Raise error on failure.
Example:
>>> verify_grad(theano.tensor.tanh,
(numpy.asarray([[2,3,4], [-1, 3.3, 9.9]]),),
rng=numpy.random)
Raises an Exception if the difference between the analytic gradient and
numerical gradient (computed through the Finite Difference Method) of a
random projection of the fun's output to a scalar exceeds the given
tolerance.
:param fun: a Python function that takes Theano variables as inputs,
and returns a Theano variable. For instance, an Op instance with
a single output.
:param pt: the list of numpy.ndarrays to use as input values.
These arrays must be either float32 or float64 arrays.
:param n_tests: number of times to run the test
:param rng: random number generator used to sample u, we test gradient
of sum(u * fun) at pt
:param eps: stepsize used in the Finite Difference Method (Default
None is type-dependent)
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
:param abs_tol: absolute tolerance used as threshold for gradient
comparison
:param rel_tol: relative tolerance used as threshold for gradient
comparison
:note: WARNING to unit-test writers: if `op` is a function that builds
a graph, try to make it a SMALL graph. Often verify grad is run
in debug mode, which can be very slow if it has to verify a lot of
intermediate computations.
:note: This op does not support multiple outputs. In tests/test_scan.py
there is an experimental verify_grad that covers that case as well
by using random projections.
"""
from theano import compile, shared
import theano.tensor
from theano.tensor import as_tensor_variable, cast, TensorType
assert isinstance(pt, (list, tuple))
pt = [numpy.array(p) for p in pt]
for i, p in enumerate(pt):
if p.dtype not in ('float32', 'float64'):
raise TypeError(('verify_grad can work only with floating point '
'inputs, but input %i has dtype "%s".') % (i, p.dtype))
_type_tol = dict( # relative error tolerances for different types
float32=1e-2,
float64=1e-4)
if abs_tol is None:
abs_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rel_tol is None:
rel_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rng is None:
raise TypeError(('rng should be a valid instance of '
'numpy.random.RandomState. You may '
'want to use theano.tests.unittest'
'_tools.verify_grad instead of '
'theano.gradient.verify_grad.'))
# We allow input downcast in function, because numeric_grad works in the
# most precise dtype used among the inputs, so we may need to cast some.
def function(inputs, output):
if mode is None:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, on_unused_input='ignore')
else:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
return f
tensor_pt = [TensorType(
as_tensor_variable(p).dtype,
as_tensor_variable(p).broadcastable)(name='input %i' % i)
for i, p in enumerate(pt)]
#fun can be either a function or an actual Op instance
o_output = fun(*tensor_pt)
if isinstance(o_output, list):
raise NotImplementedError(('cant (yet) autotest gradient of fun '
'with multiple outputs'))
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -JB.
o_fn = function(tensor_pt, o_output)
o_fn_out = o_fn(*[p.copy() for p in pt])
if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list):
raise TypeError('It seems like you are trying to use verify_grad '
'on an op or a function which outputs a list: there should'
' be a single (array-like) output instead')
# | |
<gh_stars>0
import os
import torch
import copy
import time
import random
from arguments import get_args
import numpy as np
import cma
import json
import deepspeed
from sklearn.metrics import f1_score
from tokenization_enc_dec import EncDecTokenizer
from fastNLP import cache_results, DataSet
from tester import Tester
import mpu
from utils import print_rank_0, save_rank_0
from utils import setup_model_and_optimizer, set_random_seed, initialize_distributed, get_model, get_checkpoint_iteration
from utils import hinge_loss
from samplers import DistributedBatchSampler, RandomSampler
from dataloader import ChnSentiCorpLoader, THUCNewsLoader, AFQMCLoader
from metrics import ChnSentiCorpMetric, THUCNewsMetric, AFQMCMetric
from bayes_opt import BayesianOptimization
torch.backends.cudnn.enabled = False
# Arguments.
args = get_args()
task_name = args.task_name
n_prompt_tokens = args.n_prompt_tokens
intrinsic_dim = args.intrinsic_dim
k_shot = args.k_shot
batch_size = args.batch_size
budget = args.budget
alg = args.alg
random_proj = args.random_proj
loss_type = args.loss_type
print_every = args.print_every
eval_every = args.eval_every
cat_or_add = args.cat_or_add
bound = args.bound
# Pytorch distributed.
initialize_distributed(args)
device = torch.cuda.current_device()
mpu.model_parallel_cuda_manual_seed(args.seed)
# Random seeds for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cat_or_add == 'add':
init_prompt_path = None
else:
init_prompt_path = './nli_base_prompt.pt'
if task_name in ['chnsenticorp', 'afqmc']:
num_labels = 2
elif task_name in ['thuc_news']:
num_labels = 10
else:
raise ValueError(f'Task name `{task_name}` not supported.')
save_path = f'results/{task_name}_results/D_{n_prompt_tokens * 1024}_d_{intrinsic_dim}_data_{k_shot * num_labels}_{alg}_range_{bound}_loss_{loss_type}_budget_{budget}_seed_{args.seed}_{cat_or_add}_{random_proj}'
print(f'Results will be saved in {save_path}')
if os.path.exists(save_path):
print('Experiment already run.')
exit()
class LMForwardAPI:
def __init__(self, n_prompt_tokens=50, save_path='./results', loss_type='hinge', init_prompt_path=None):
self.tokenizer = EncDecTokenizer(os.path.join(args.tokenizer_path, 'vocab.txt'))
self.model = self._get_model()
self.model.eval()
self.concat_prompt = cat_or_add == 'cat'
if self.concat_prompt:
if init_prompt_path is not None:
print(f'Initialize prompt embedding from {init_prompt_path}')
self.init_prompt = torch.load(init_prompt_path).weight.cpu().reshape(-1)
else:
print('Initial prompt embedding not found. Initialize to zero embedding.')
self.init_prompt = torch.zeros(n_prompt_tokens * 4096)
print(f'Shape of initial prompt embedding: {self.init_prompt.shape}')
else:
self.init_prompt = None
self.linear = torch.nn.Linear(intrinsic_dim, n_prompt_tokens * 4096, bias=False)
if random_proj == 'normal':
embedding = self.model.module.module.module.word_embeds.weight.data.clone().cpu()
embedding = embedding[1000: 2000]
print(embedding)
emb_range = float(torch.max(torch.abs(embedding.max()), torch.abs(embedding.min())).detach().numpy())
std = emb_range / (np.sqrt(9 * intrinsic_dim - emb_range * emb_range))
print('Range of embedding: {}'.format(emb_range))
print('Std for the random projection: {}'.format(std))
for p in self.linear.parameters():
torch.nn.init.normal_(p, 0.0, std)
self.best_train_loss = float('inf')
self.best_dev_loss = float('inf')
self.best_prompt = None
self.num_call = 0
self.save_path = save_path
self.print_every = print_every
self.eval_every = eval_every
self.loss_type = loss_type
if task_name == 'chnsenticorp':
self.metric = ChnSentiCorpMetric(target='label', pred='logits', tokenizer=self.tokenizer)
self.metric_key = 'acc'
self.metric_name = 'ChnSentiCorpMetric'
elif task_name == 'thuc_news':
self.metric = THUCNewsMetric(target='label', pred='logits', tokenizer=self.tokenizer)
self.metric_key = 'acc'
self.metric_name = 'THUCNewsMetric'
elif task_name == 'afqmc':
self.metric = AFQMCMetric(target='label', pred='logits', tokenizer=self.tokenizer)
self.metric_key = 'acc'
self.metric_name = 'AFQMCMetric'
else:
raise NotImplementedError
if save_path is not None:
os.makedirs(save_path, exist_ok=True)
self.margin = self.metric.margin
self.ce_loss = torch.nn.CrossEntropyLoss(reduce='sum')
@staticmethod
def _get_model():
with open(args.deepspeed_config, "r") as f:
ds_config = json.load(f)
ds_config["gradient_accumulation_steps"] = args.gradient_accumulation_steps
ds_config["train_micro_batch_size_per_gpu"] = args.batch_size
# Model initialization
model = get_model(args, tokenizer.vocab_size, None)
model, _, _, _ = deepspeed.initialize(
model=model,
args=args,
mpu=mpu,
dist_init_required=False,
config_params=ds_config
)
iteration, release, success = get_checkpoint_iteration(args)
checkpoint_name, sd = model.load_checkpoint(
args.load,
iteration,
load_module_strict=False,
load_optimizer_states=args.load_oprimizer_states, # dont fix oprimizer
load_lr_scheduler_states=args.load_lr_scheduler_states
)
if args.finetune or release:
iteration = 0
else:
try:
iteration = sd['iteration']
except KeyError:
try: # Backward compatible with older checkpoints
iteration = sd['total_iters']
except KeyError:
print_rank_0('A metadata file exists but Unable to load iteration '
' from checkpoint {}, exiting'.format(checkpoint_name))
exit()
# rng states.
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(sd['random_rng_state'])
np.random.set_state(sd['np_rng_state'])
torch.set_rng_state(sd['torch_rng_state'])
torch.cuda.set_rng_state(sd['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer '
'state.'.format(checkpoint_name))
exit()
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print_rank_0(' successfully loaded {}'.format(checkpoint_name))
args.iteration = iteration
model.eval()
return model
def calc_metric(self, logits, target):
label_map = self.metric.label_map
converted_target = target.clone()
for key, val in label_map.items():
converted_target[target == key] = val
interest_index = list(label_map.keys())
logits = logits[:, interest_index]
pred = logits.argmax(dim=-1)
if self.metric_key == 'acc':
perf = (pred == converted_target).sum() / len(target)
elif self.metric_key == 'f1':
perf = f1_score(converted_target.detach().cpu().numpy().tolist(),
pred.detach().cpu().numpy().tolist())
else:
raise KeyError(f'[Metric] Only support [acc, f1], got {self.metric_key} instead.')
if self.loss_type == 'hinge':
loss = hinge_loss(logits, converted_target, margin=self.margin, reduce='sum').item() / len(target)
elif self.loss_type == 'ce':
loss = self.ce_loss(logits, converted_target).item() / len(target)
elif self.loss_type == 'perf':
loss = -1 * perf
else:
raise KeyError(f'[Loss] Only support [hinge, ce, perf], got {self.loss_type} instead.')
return loss, perf
def eval(self, prompt_embedding=None, test_data=None):
self.num_call += 1
print_rank_0(f'# call {self.num_call}')
if prompt_embedding is None:
prompt_embedding = self.best_prompt
if test_data is None:
bsz = len(dev_data['enc_input_ids']) # batch size of dev data is the orignal batch size of training data
else:
bsz = batch_size # for test data
tmp_prompt = copy.deepcopy(prompt_embedding) # list or numpy.ndarray
if isinstance(prompt_embedding, list): # multiple queries
pe_list = []
for pe in prompt_embedding:
z = torch.tensor(pe).type(torch.float32) # z
z = self.linear(z) # Az
if self.init_prompt is not None:
z = z + self.init_prompt # Az + p_0
pe_list.append(z.reshape(n_prompt_tokens, -1).repeat(bsz, 1, 1))
prompt_embedding = torch.cat(pe_list) # num_workers*bsz x prompt_len x dim
assert len(prompt_embedding) == len(train_data['enc_input_ids'])
elif isinstance(prompt_embedding, np.ndarray): # single query or None
prompt_embedding = torch.tensor(prompt_embedding).type(torch.float32) # z
prompt_embedding = self.linear(prompt_embedding) # Az
if self.init_prompt is not None:
prompt_embedding = prompt_embedding + self.init_prompt # Az + p_0
prompt_embedding = prompt_embedding.reshape(n_prompt_tokens, -1).repeat(bsz, 1, 1)
else:
raise ValueError(
f'[Prompt Embedding] Only support [list, numpy.ndarray], got `{type(prompt_embedding)}` instead.'
)
prompt_embedding = prompt_embedding.to(device).half()
self.model.module.module.module.set_prompt_embedding(prompt_embedding)
if isinstance(test_data, DataSet):
if prompt_embedding.shape[0] > bsz:
raise ValueError('Provide a single prompt embedding for testing.')
test_tester = Tester(data=test_data, model=self.model, metrics=self.metric, batch_size=batch_size,
num_workers=4, device=device)
results = test_tester.test()
test_acc = results[self.metric_name][self.metric_key]
# fitlog.add_best_metric(test_acc, name='test_acc')
return test_acc
else:
for k, v in train_data.items():
train_data[k] = v.to(device)
with torch.no_grad():
logits = self.model(
enc_input_ids=train_data['enc_input_ids'],
enc_attention_mask=train_data['enc_attention_mask'],
dec_input_ids=train_data['dec_input_ids'],
dec_attention_mask=train_data['dec_attention_mask'],
cross_attention_mask=train_data['cross_attention_mask']
)['lm_logits']
logits_list = [torch.zeros_like(logits) for _ in range(mpu.get_model_parallel_world_size())]
torch.distributed.all_gather(logits_list, logits, mpu.get_model_parallel_group())
logits = torch.cat(logits_list, dim=-1)[:, 1, :]
loss, perf = self.calc_metric(logits, train_data['label'])
# fitlog.add_loss(loss, name=self.loss_type, step=self.num_call)
# fitlog.add_metric(perf, name='train_acc', step=self.num_call)
if loss < self.best_train_loss:
self.best_train_loss = loss
# fitlog.add_best_metric(self.best_train_perf, name='train_acc')
for k, v in dev_data.items():
dev_data[k] = v.to(device)
with torch.no_grad():
logits = self.model(
enc_input_ids=dev_data['enc_input_ids'],
enc_attention_mask=dev_data['enc_attention_mask'],
dec_input_ids=dev_data['dec_input_ids'],
dec_attention_mask=dev_data['dec_attention_mask'],
cross_attention_mask=dev_data['cross_attention_mask']
)['lm_logits']
logits_list = [torch.zeros_like(logits) for _ in range(mpu.get_model_parallel_world_size())]
torch.distributed.all_gather(logits_list, logits, mpu.get_model_parallel_group())
logits = torch.cat(logits_list, dim=-1)[:, 1, :]
dev_loss, dev_perf = self.calc_metric(logits, dev_data['label'])
# fitlog.add_metric(dev_perf, name='dev_acc', step=self.num_call)
if dev_loss <= self.best_dev_loss:
self.best_dev_loss = dev_loss
# fitlog.add_best_metric(self.best_dev_perf, name='dev_acc')
self.best_prompt = copy.deepcopy(tmp_prompt)
if torch.distributed.get_rank() == 0:
if self.save_path is not None:
with open(os.path.join(self.save_path, 'train_acc.txt'), 'a') as fout:
fout.write(f'{self.num_call}\t{loss}\t{perf}\n')
if self.num_call % self.print_every == 0:
print_rank_0(
'[# API Calls {}] loss: {}. Current perf: {}. Best loss so far: {}'.format(
self.num_call,
round(float(loss), 4),
round(float(perf), 4),
round(float(self.best_train_loss), 4)))
if self.num_call % self.eval_every == 0:
for k, v in dev_data.items():
dev_data[k] = v.to(device)
with torch.no_grad():
logits = self.model(
enc_input_ids=dev_data['enc_input_ids'],
enc_attention_mask=dev_data['enc_attention_mask'],
dec_input_ids=dev_data['dec_input_ids'],
dec_attention_mask=dev_data['dec_attention_mask'],
cross_attention_mask=dev_data['cross_attention_mask']
)['lm_logits']
logits_list = [torch.zeros_like(logits) for _ in range(mpu.get_model_parallel_world_size())]
torch.distributed.all_gather(logits_list, logits, mpu.get_model_parallel_group())
logits = torch.cat(logits_list, dim=-1)[:, 1, :]
dev_loss, dev_perf = self.calc_metric(logits, dev_data['label'])
# fitlog.add_metric(dev_perf, name='dev_acc', step=self.num_call)
if dev_loss <= self.best_dev_loss:
self.best_dev_loss = dev_loss
# fitlog.add_best_metric(self.best_dev_perf, name='dev_acc')
self.best_prompt = copy.deepcopy(tmp_prompt)
if torch.distributed.get_rank() == 0:
if self.save_path is not None:
with open(os.path.join(self.save_path, 'dev_loss.txt'), 'a') as fout:
fout.write(f'{self.num_call}\t{dev_loss}\t{dev_perf}\n')
print_rank_0('Dev loss: {}. Dev perf: {}. Best dev loss: {}'.format(
round(float(dev_loss), 4),
round(float(dev_perf), 4),
round(float(self.best_dev_loss), 4)))
print_rank_0('********* Done *********')
print_rank_0(f'train loss = {loss}')
return loss
tokenizer = EncDecTokenizer(os.path.join(args.tokenizer_path, 'vocab.txt'))
cache_fn = f"caches/data_{task_name}_{n_prompt_tokens}_{args.seed}.pt"
DataLoader = {
'chnsenticorp': ChnSentiCorpLoader,
'thuc_news': THUCNewsLoader,
'afqmc': AFQMCLoader
}
@cache_results(cache_fn, _refresh=False)
def get_data(task_name, tokenizer):
if task_name in ['chnsenticorp', 'thuc_news']:
splits = ['train', 'test']
else: # for datasets without test set, we use dev set
splits = ['train', 'validation']
# if args.cat_or_add == 'cat':
# data_bundle = DataLoader[task_name](tokenizer=tokenizer, n_prompt_tokens=0).my_load(splits)
# else:
data_bundle = DataLoader[task_name](tokenizer=tokenizer, n_prompt_tokens=n_prompt_tokens).my_load(splits)
return data_bundle
def construct_true_few_shot_data(train_data, k_shot):
train_label_count = {}
dev_label_count = {}
new_train_data = DataSet()
new_dev_data = DataSet()
all_indices = [_ for _ in range(len(train_data))]
np.random.shuffle(all_indices)
for index in all_indices:
label = train_data[index]['label']
if label < 0:
continue
if label not in train_label_count:
train_label_count[label] = 0
if label not in dev_label_count:
dev_label_count[label] = 0
if train_label_count[label] < k_shot:
new_train_data.append(train_data[index])
train_label_count[label] += 1
elif dev_label_count[label] < k_shot:
new_dev_data.append(train_data[index])
dev_label_count[label] += 1
new_train_data.set_input("enc_input_ids", "enc_attention_mask", 'cross_attention_mask', "dec_input_ids", "dec_attention_mask", "label")
new_train_data.set_target("label")
new_dev_data.set_input("enc_input_ids", "enc_attention_mask", 'cross_attention_mask', "dec_input_ids", "dec_attention_mask", "label")
new_dev_data.set_target("label")
return new_train_data, new_dev_data
data_bundle = get_data(task_name=task_name, tokenizer=tokenizer)
if task_name in ['chnsenticorp', 'thuc_news']:
train_data, test_data = data_bundle.get_dataset('train'), data_bundle.get_dataset('test')
else:
train_data, test_data = data_bundle.get_dataset('train'), data_bundle.get_dataset('validation')
train_data, dev_data = construct_true_few_shot_data(train_data, k_shot)
if torch.distributed.get_rank() == 0:
print('# of train data: {}'.format(len(train_data)))
print('Example:')
print(train_data[0])
print('\n# of dev data: {}'.format(len(dev_data)))
print('Example:')
print(dev_data[0])
print('\n# of test data: {}'.format(len(test_data)))
print('Example:')
print(test_data[0])
train_data = {
'enc_input_ids': torch.tensor(train_data['enc_input_ids'].get(list(range(len(train_data))))),
'enc_attention_mask': train_data['enc_attention_mask'].get(list(range(len(train_data)))).unsqueeze(1),
'cross_attention_mask': train_data['cross_attention_mask'].get(list(range(len(train_data)))).unsqueeze(1),
'dec_input_ids': train_data['dec_input_ids'].get(list(range(len(train_data)))),
'dec_attention_mask': train_data['dec_attention_mask'].get(list(range(len(train_data)))).unsqueeze(1),
'label': torch.tensor(train_data['label'].get(list(range(len(train_data))))),
}
dev_data = {
'enc_input_ids': torch.tensor(dev_data['enc_input_ids'].get(list(range(len(dev_data))))),
'enc_attention_mask': dev_data['enc_attention_mask'].get(list(range(len(dev_data)))).unsqueeze(1),
'cross_attention_mask': dev_data['cross_attention_mask'].get(list(range(len(dev_data)))).unsqueeze(1),
'dec_input_ids': dev_data['dec_input_ids'].get(list(range(len(dev_data)))),
'dec_attention_mask': dev_data['dec_attention_mask'].get(list(range(len(dev_data)))).unsqueeze(1),
'label': torch.tensor(dev_data['label'].get(list(range(len(dev_data))))),
}
model_forward_api = LMForwardAPI(
n_prompt_tokens=n_prompt_tokens,
save_path=save_path,
loss_type=loss_type,
init_prompt_path=init_prompt_path
)
if | |
#!/bin/python
import sys
import numpy as np
class TITAN:
'''Titan orbit functions and parametes'''
def __init__(self):
'''Init default parameters'''
self.Tday = 15.945 # Earth days / 1 Saturn orbit
# Default orbit parameters calculated with NAIF Space Kernels
self.date = [np.datetime64('1980-01-02'),np.datetime64('2032-12-31')]
self.obl = 26.730882944988142
self.orbit = np.timedelta64(10751,'D')
self.eq_v = [np.datetime64('1980-02-22'), np.datetime64('2009-07-30')]
self.sol_s = [np.datetime64('1987-11-25'), np.datetime64('2017-05-14')]
self.eq_a = [np.datetime64('1995-11-07'), np.datetime64('2025-04-24')]
self.sol_w = [np.datetime64('2002-10-23'), np.datetime64('2032-03-26')]
self.aphs = [np.datetime64('2003-07-21'), np.datetime64('2032-11-21')]
self.pers = [np.datetime64('1988-08-31'), np.datetime64('2018-04-07')]
self.r_v = 9.443302157356690
self.r_s = 10.030529604959204
self.r_a = 9.587968538637037
self.r_w = 9.031185737728954
self.r_aph = 9.0077428463331
self.r_per = 10.0728723166560
self.A = 6.1664830805512354
self.B = 6.0482745790986066
self.C = 101.03535416292833
return
def __repr__(self):
return 'Titan orbit functions and parametes'
def read(self,fname='NAIF-Titan.dat',usecols=(0,5,6,7),skiprows=21):
'''Read an orbital input file
Parameters
-----------
fname : str
Input file location
usecols : tuple
List of column containing:
- T : date ('YYYY-MM-DD' format)
- Z : elevation of the subsolar point (km)
- R : sub-point radius (km)
- D : distance to the Sun (km)
skiprows : tuple
Number of line of the header to skip.
Init
-----
date : numpy.array( numpy.datetime64[D] )
Date of the calculated position of the planet.
lat : numpy.array
Sub solar point latitude (degN).
dist : numpy.array
Distance to the Sun (U.A.)
'''
ua = 149597870.7 # 1 Astronomical unit (km)
T,Z,R,D = np.loadtxt(fname, unpack=True, dtype=(str),
skiprows=skiprows, usecols=usecols)
self.date = T.astype(dtype='datetime64[D]')
self.lat = np.degrees( np.arcsin( np.double(Z)/np.double(R) ) )
self.dist = np.double(D) / ua
return
def setObliquity(self):
'''Set the planet obliquity = Solar latitude maximal extension.'''
self.obl = np.max( np.abs(self.lat) )
return
def setEquinox(self):
'''Search for the equinox dates and the orbit duration'''
eqs = []; d_eq = self.date[0]; l_eq = 90.
cond = ( np.abs(self.lat) < .05 )
for l,d in zip(np.abs(self.lat[cond]),self.date[cond]):
if l < l_eq:
l_eq = l
d_eq = d
elif (d - d_eq) > np.timedelta64(20,'D'):
l_eq = 90
eqs.append( d_eq )
if d_eq != self.date[-1]:
eqs.append( d_eq )
# Split equinox (Vernal/Autumnal)
if self.lat[cond][0] < 0 :
self.eq_v = eqs[::2] # Vernal equinox
self.eq_a = eqs[1::2] # Autumnal equinox
else:
self.eq_v = eqs[1::2] # Vernal equinox
self.eq_a = eqs[::2] # Autumnal equinox
self.r_v = self.dist[ self.date == self.eq_v[0] ][0]
self.r_a = self.dist[ self.date == self.eq_a[0] ][0]
# Set orbit duration (Earth days)
if len(self.eq_v) < 2:
raise ValueError('You need at least 2 vernal equinox to estimate the orbit duration')
self.orbit = self.eq_v[1] - self.eq_v[0]
return
def setSolstice(self):
'''Search for the solstice dates'''
sols = []; d_sol = self.date[0]; l_sol = 0.
cond = ( np.abs(self.lat) > .999 * self.obl )
for l,d in zip(np.abs(self.lat[cond]),self.date[cond]):
if l > l_sol:
l_sol = l
d_sol = d
elif (d - d_sol) > self.orbit/4:
l_sol = 0
sols.append( d_sol )
if d_sol != self.date[-1]:
sols.append( d_sol )
# Split solstice (Summer/Winter)
if self.eq_v[0] < sols[0] and sols[0] < self.eq_a[0] :
self.sol_s = sols[::2] # Summer solstice
self.sol_w = sols[1::2] # Winter solstice
else:
self.sol_s = sols[1::2] # Summer solstice
self.sol_w = sols[::2] # Winter solstice
self.r_s = self.dist[ self.date == self.sol_s[0] ][0]
self.r_w = self.dist[ self.date == self.sol_w[0] ][0]
return
def setPerihelion(self):
'''Search for the perihelon position (closest distance to the Sun)'''
self.r_peri = np.min(self.dist)
self.peris = []; d_peri = self.date[0]; l_peri = 1.01 * self.r_peri
cond = ( self.dist < 1.01 * self.r_peri )
for l,d in zip(self.dist[cond],self.date[cond]):
if l < l_peri:
l_peri = l
d_peri = d
elif (d - d_peri) > self.orbit/4:
l_peri = 1.01 * self.r_peri
self.peris.append( d_peri )
if d_peri != self.date[-1]:
self.peris.append( d_peri )
return
def setAphelion(self):
'''Search for the aphelion position (farest distance to the Sun)'''
self.r_aph = np.max(self.dist)
self.aphs = []; d_aph = self.date[0]; l_aph = .99 * self.r_aph
cond = ( self.dist > .99 * self.r_aph )
for l,d in zip(self.dist[cond],self.date[cond]):
if l > l_aph:
l_aph = l
d_aph = d
elif (d - d_aph) > self.orbit/4:
l_aph = .99 * self.r_aph
self.aphs.append( d_aph )
if d_aph != self.date[-1]:
self.aphs.append( d_aph )
return
def fitLs(self,plot=False):
'''Fit the Solar longitude parameters'''
import scipy.optimize as sp
Ls = np.array([0,90,180,270])
date = np.array([self.eq_v[0], self.sol_s[0], self.eq_a[0], self.sol_w[0] ])
dLs = 360. * (date-self.eq_v[0]).astype(float) / self.orbit.astype(float) - Ls
param_bounds = ([-360,-360,0],[360,360,180])
def f(t,A,B,C): return A * np.sin(2*np.pi*(t-C)/360.) + B
popt,_ = sp.curve_fit(f, Ls, dLs, bounds=param_bounds)
self.A,self.B,self.C = popt
if plot:
import matplotlib.pyplot as plt
x = np.linspace(0,360,100)
plt.figure()
plt.plot(Ls,dLs,'ro')
plt.plot(x,f(x,*popt),'b-')
plt.show()
return
def reload(self):
'''Reload all with default parameters'''
self.read()
self.setObliquity()
self.setEquinox()
self.setSolstice()
self.setAphelion()
self.setPerihelion()
self.fitLs()
self.dump()
return
def dump(self):
'''Dump all the orbital parameters'''
orbit_y = self.orbit.astype('timedelta64[Y]').astype(int)
orbit_m = self.orbit.astype('timedelta64[M]').astype(int) - 12 * orbit_y
orbit_d = self.orbit.astype(int) - np.timedelta64(orbit_y,'Y').astype('timedelta64[D]').astype(int) - np.timedelta64(orbit_m,'M').astype('timedelta64[D]').astype(int)
spring = self.sol_s[0] - self.eq_v[0]
summer = self.eq_a[0] - self.sol_s[0]
autumn = self.sol_w[0] - self.eq_a[0]
winter = self.eq_v[1] - self.sol_w[0]
print 'Date coverage: %s | %s' % ( self.date[0], self.date[-1] )
print 'Orbit : %s (Earth) = %i years %i months %i days | %s (Titan)' % ( self.orbit, orbit_y,orbit_m,orbit_d,self.orbit/self.Tday )
print 'Equinox (V): %s ' % (' | '.join(str(eq) for eq in self.eq_v ) )
print 'Solstice (S): %s ' % (' | '.join(str(sol) for sol in self.sol_s) )
print 'Equinox (A): %s ' % (' | '.join(str(eq) for eq in self.eq_a ) )
print 'Solstice (W): %s ' % (' | '.join(str(sol) for sol in self.sol_w) )
print 'Obliquity : %.2f deg' % self.obl
print 'N Spring : %s (Earth) | %s (Titan) | Ls = 0 | R = %.2f UA' % ( spring, spring/self.Tday, self.r_v )
print 'N Summer : %s (Earth) | %s (Titan) | Ls = 90 | R = %.2f UA' % ( summer, summer/self.Tday, self.r_s )
print 'N Autumn : %s (Earth) | %s (Titan) | Ls = 180 | R = %.2f UA' % ( autumn, autumn/self.Tday, self.r_a )
print 'N Winter : %s (Earth) | %s (Titan) | Ls = 270 | R = %.2f UA' % ( winter, winter/self.Tday, self.r_w )
print 'Perihelion : %s | %.2f UA' % (' | '.join(str(per) for per in self.pers), self.r_per )
print 'Aphelion : %s | %.2f UA' % (' | '.join(str(aph) for aph in self.aphs), self.r_aph )
print '360*(Date - Eq_V)/orbit = Ls + A * sin[2.PI/360 * (Ls - C)] + B'
print 'with: A = %.2f | B = %.2f | C = %.2f' % (self.A,self.B,self.C)
return
def date2ls(self,date,eps=1.e-7,imax=25):
'''Calculate the solar longitude corresponding to a date.
Parameters
-----------
date : str, numpy.datetime64
Input date (YYYY-MM-DD or YYYY/MM/DD)
eps : float, optional
Precision of the convergence
imax : int, optional
Number maximum of iteration to reach the convergence, throw a ValueError otherwise.
Note
-----
The value of Ls is the solution of a transcendental equation which
is numerically solved with the Newton method:
$$L_s^0 = 360 \cdot \frac{\text{Date} - \text{Eq}^V}{\text{Orbit}} - B\\
L_s^{n+1} = L_s^n - \frac{
L_s^n - L_s^0 + A\cdot\sin\left(2\pi\cdot \frac{L_s^n - C}{360} \right)
}{
1 + A\cdot\frac{2\pi}{360}\cos\left(2\pi\cdot \frac{L_s^n - C}{360} \right)
}$$
Return
-------
Ls : real
Solar latitude corresponding to the input date
'''
if type(date) == str: date = np.datetime64(date.replace('/','-'),'D')
Ls_0 = ( (360.*(date - self.eq_v[0]).astype(int))/self.orbit.astype(float) - self.B ) % 360
Ls = Ls_0
for ii in range(imax):
dLs = - (Ls - Ls_0 + self.A * np.sin(2*np.pi*(Ls-self.C)/360.) ) \
/( 1 + self.A *2*np.pi/360.* np.sin(2*np.pi*(Ls-self.C)/360.) )
Ls = Ls + dLs
if np.abs(dLs.all()) < eps:
break
if ii >= imax:
raise ValueError('Max number of iteration reach without getting convergence.')
return Ls % 360
def ls2date(self,Ls,Ty=0):
'''Calculate the date corresponding to a solar longitude.
Parameters
-----------
Ls : real
Input solar latitude
Ty : int, optional
Number of Titan year after 1980-02-22 (Vernal Equinox before Voyager 1)
Return
-------
date : numpy.datetime64
Date corresponding to the input solar latitude
'''
date = np.round(self.orbit.astype(int)/360. * (Ls + self.A * np.sin(2*np.pi*(Ls-self.C)/360.) + self.B + 360 * Ty))
return self.eq_v[0] + np.timedelta64(int(date),'D')
if __name__ == '__main__':
titan = TITAN()
if len(sys.argv) > 1:
for arg in sys.argv:
if | |
a = self.RPythonAnnotator()
t = a.translator
s = a.build_types(f, [])
assert s.knowntype == int
graph = tgraphof(t, A.__del__.im_func)
assert graph.startblock in a.annotated
def test_annotate_type(self):
class A:
pass
x = [A(), A()]
def witness(t):
return type(t)
def get(i):
return x[i]
def f(i):
witness(None)
return witness(get(i))
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeType)
def test_annotate_iter_empty_container(self):
def f():
n = 0
d = {}
for x in []: n += x
for y in d: n += y
for z in d.iterkeys(): n += z
for s in d.itervalues(): n += s
for t, u in d.items(): n += t * u
for t, u in d.iteritems(): n += t * u
return n
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.is_constant()
assert s.const == 0
def test_mixin(self):
class Mixin(object):
_mixin_ = True
def m(self, v):
return v
class Base(object):
pass
class A(Base, Mixin):
pass
class B(Base, Mixin):
pass
class C(B):
pass
def f():
a = A()
v0 = a.m(2)
b = B()
v1 = b.m('x')
c = C()
v2 = c.m('y')
return v0, v1, v2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeChar)
assert isinstance(s.items[2], annmodel.SomeChar)
def test_mixin_staticmethod(self):
class Mixin(object):
_mixin_ = True
@staticmethod
def m(v):
return v
class Base(object):
pass
class A(Base, Mixin):
pass
class B(Base, Mixin):
pass
class C(B):
pass
def f():
a = A()
v0 = a.m(2)
b = B()
v1 = b.m('x')
c = C()
v2 = c.m('y')
return v0, v1, v2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeChar)
assert isinstance(s.items[2], annmodel.SomeChar)
def test_mixin_first(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Base(object):
def foo(self): return 5
class Concrete(Mixin, Base):
pass
def f():
return Concrete().foo()
assert f() == 4
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 4
def test_mixin_last(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Base(object):
def foo(self): return 5
class Concrete(Base, Mixin):
pass
def f():
return Concrete().foo()
assert f() == 5
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 5
def test_mixin_concrete(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Concrete(Mixin):
def foo(self): return 5
def f():
return Concrete().foo()
assert f() == 5
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 5
def test_multiple_mixins_mro(self):
# an obscure situation, but it occurred in module/micronumpy/types.py
class A(object):
_mixin_ = True
def foo(self): return 1
class B(A):
_mixin_ = True
def foo(self): return 2
class C(A):
_mixin_ = True
class D(B, C):
_mixin_ = True
class Concrete(D):
pass
def f():
return Concrete().foo()
assert f() == 2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 2
def test_multiple_mixins_mro_2(self):
class A(object):
_mixin_ = True
def foo(self): return 1
class B(A):
_mixin_ = True
def foo(self): return 2
class C(A):
_mixin_ = True
class Concrete(C, B):
pass
def f():
return Concrete().foo()
assert f() == 2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 2
def test_cannot_use_directly_mixin(self):
class A(object):
_mixin_ = True
#
def f():
return A()
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
#
class B(object):
pass
x = B()
def g():
return isinstance(x, A)
py.test.raises(AnnotatorError, a.build_types, g, [])
def test_import_from_mixin(self):
class M(object):
def f(self):
return self.a
class I(object):
objectmodel.import_from_mixin(M)
def __init__(self, i):
self.a = i
class S(object):
objectmodel.import_from_mixin(M)
def __init__(self, s):
self.a = s
def f(n):
return (I(n).f(), S("a" * n).f())
assert f(3) == (3, "aaa")
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeString)
def test___class___attribute(self):
class Base(object): pass
class A(Base): pass
class B(Base): pass
class C(A): pass
def seelater():
C()
def f(n):
if n == 1:
x = A()
else:
x = B()
y = B()
result = x.__class__, y.__class__
seelater()
return result
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s.items[0], annmodel.SomePBC)
assert len(s.items[0].descriptions) == 4
assert isinstance(s.items[1], annmodel.SomePBC)
assert len(s.items[1].descriptions) == 1
def test_slots(self):
# check that the annotator ignores slots instead of being
# confused by them showing up as 'member' objects in the class
class A(object):
__slots__ = ('a', 'b')
def f(x):
a = A()
a.b = x
return a.b
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == int
def test_slots_reads(self):
class A(object):
__slots__ = ()
class B(A):
def __init__(self, x):
self.x = x
def f(x):
if x:
a = A()
else:
a = B(x)
return a.x # should explode here
a = self.RPythonAnnotator()
with py.test.raises(NoSuchAttrError) as excinfo:
a.build_types(f, [int])
# this should explode on reading the attribute 'a.x', but it can
# sometimes explode on 'self.x = x', which does not make much sense.
# But it looks hard to fix in general: we don't know yet during 'a.x'
# if the attribute x will be read-only or read-write.
def test_unboxed_value(self):
class A(object):
__slots__ = ()
class C(A, objectmodel.UnboxedValue):
__slots__ = unboxedattrname = 'smallint'
def f(n):
return C(n).smallint
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == int
def test_annotate_bool(self):
def f(x):
return ~x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return -x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return +x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return abs(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return int(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x, y):
return x + y
a = self.RPythonAnnotator()
s = a.build_types(f, [bool, int])
assert s.knowntype == int
a = self.RPythonAnnotator()
s = a.build_types(f, [int, bool])
assert s.knowntype == int
def test_annotate_rarith(self):
inttypes = [int, r_uint, r_longlong, r_ulonglong]
for inttype in inttypes:
c = inttype()
def f():
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
for inttype in inttypes:
def f():
return inttype(0)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
for inttype in inttypes:
def f(x):
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [inttype])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
def test_annotate_rshift(self):
def f(x):
return x >> 2
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger(nonneg=True)])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_prebuilt_mutables(self):
class A:
pass
class B:
pass
a1 = A()
a2 = A()
a1.d = {} # this tests confusion between the two '{}', which
a2.d = {} # compare equal
a1.l = []
a2.l = []
b = B()
b.d1 = a1.d
b.d2 = a2.d
b.l1 = a1.l
b.l2 = a2.l
def dmutate(d):
d[123] = 321
def lmutate(l):
l.append(42)
def readout(d, l):
return len(d) + len(l)
def f():
dmutate(b.d1)
dmutate(b.d2)
dmutate(a1.d)
dmutate(a2.d)
lmutate(b.l1)
lmutate(b.l2)
lmutate(a1.l)
lmutate(a2.l)
return readout(a1.d, a1.l) + readout(a2.d, a2.l)
a = self.RPythonAnnotator()
a.build_types(f, [])
v1, v2 = graphof(a, readout).getargs()
assert not a.binding(v1).is_constant()
assert not a.binding(v2).is_constant()
def test_prebuilt_mutables_dont_use_eq(self):
# test that __eq__ is not called during annotation, at least
# when we know that the classes differ anyway
class Base(object):
def __eq__(self, other):
if self is other:
return True
raise ValueError
def __hash__(self):
return 42
class A(Base):
pass
class B(Base):
pass
a1 = A()
a2 = B()
a1.x = 5
a2.x = 6
def f():
return a1.x + a2.x
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == int
def test_chr_out_of_bounds(self):
def g(n, max):
if n < max:
return chr(n)
else:
return '?'
def fun(max):
v = g(1000, max)
return g(ord(v), max)
a = self.RPythonAnnotator()
s = a.build_types(fun, [int])
assert isinstance(s, annmodel.SomeChar)
def test_range_nonneg(self):
def fun(n, k):
for i in range(n):
if k == 17:
return i
return 0
a = self.RPythonAnnotator()
s = a.build_types(fun, [int, int])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_range_nonneg_variablestep(self):
def get_step(n):
if n == 1:
return 2
else:
return 3
def fun(n, k):
| |
<gh_stars>0
"""
File: my_drawing
Name: <NAME>
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GPolygon, GArc, GLabel
from campy.graphics.gwindow import GWindow
# globe variable
window = GWindow(width=600, height=800, title='We are coming in.')
def main():
"""
the idea is use the slogan in dora-av-men and the poster in the newest movie of doraemon to generate my picture.
"""
sun()
doraemon()
nobita()
wall()
dinosaur()
slogan()
def dinosaur():
dinosaur_img = GOval(250, 400, x=25, y=540)
dinosaur_img.filled = True
dinosaur_img.fill_color = 'green'
dinosaur_img.color = 'green'
window.add(dinosaur_img)
dinosaur_head_img = GOval(230, 300, x=100, y=600)
dinosaur_head_img.filled = True
dinosaur_head_img.fill_color = 'green'
dinosaur_head_img.color = 'green'
window.add(dinosaur_head_img)
dinosaur_eye_img = GOval(30, 50, x=200, y=585)
dinosaur_eye_img.filled = True
dinosaur_eye_img.fill_color = 'lightgreen'
dinosaur_eye_img.color = 'lightgreen'
window.add(dinosaur_eye_img)
dinosaur_eye1_img = GOval(15, 15, x=212, y=590)
dinosaur_eye1_img.filled = True
dinosaur_eye1_img.fill_color = 'black'
dinosaur_eye1_img.color = 'black'
window.add(dinosaur_eye1_img)
dinosaur_eye1_img = GOval(5, 5, x=222, y=590)
dinosaur_eye1_img.filled = True
dinosaur_eye1_img.fill_color = 'white'
dinosaur_eye1_img.color = 'black'
window.add(dinosaur_eye1_img)
def slogan():
label = GLabel('討', x=450, y=680)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('厭', x=450, y=730)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('死', x=500, y=700)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('相', x=500, y=750)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('嚇', x=550, y=430)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('到', x=550, y=480)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('人', x=550, y=530)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('家', x=550, y=580)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('了', x=550, y=630)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=550, y=680)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=550, y=730)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=550, y=780)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('我', x=100, y=100)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('們', x=100, y=150)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('進', x=100, y=200)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('來', x=100, y=250)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('囉', x=100, y=300)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=100, y=350)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=100, y=400)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
label = GLabel('‧', x=100, y=450)
label.font = 'courier-20'
label.color = 'white'
window.add(label)
def wall():
wall_left = picture_wall()
window.add(wall_left)
wall_right = picture_wall1()
window.add(wall_right)
wall_right = picture_wall2()
window.add(wall_right)
def picture_wall2():
wall1 = GPolygon()
wall1.add_vertex((300, 0))
wall1.add_vertex((350, 75))
wall1.add_vertex((440, 100))
wall1.add_vertex((410, 180))
wall1.add_vertex((440, 220))
wall1.add_vertex((460, 280))
wall1.add_vertex((470, 340))
wall1.add_vertex((463, 400))
wall1.add_vertex((473, 430))
wall1.add_vertex((453, 470))
wall1.add_vertex((432, 520))
wall1.add_vertex((420, 550))
wall1.add_vertex((310, 635))
wall1.add_vertex((335, 635))
wall1.add_vertex((443, 550))
wall1.add_vertex((455, 520))
wall1.add_vertex((473, 470))
wall1.add_vertex((495, 430))
wall1.add_vertex((482, 400))
wall1.add_vertex((492, 340))
wall1.add_vertex((480, 280))
wall1.add_vertex((464, 220))
wall1.add_vertex((435, 180))
wall1.add_vertex((450, 100))
wall1.add_vertex((375, 75))
wall1.add_vertex((300, 0))
wall1.filled = True
wall1.fill_color = 'gray'
wall1.color = 'gray'
return wall1
def picture_wall1():
wall1 = GPolygon()
wall1.add_vertex((600, 0))
wall1.add_vertex((300, 0))
wall1.add_vertex((350, 75))
wall1.add_vertex((440, 100))
wall1.add_vertex((410, 180))
wall1.add_vertex((440, 220))
wall1.add_vertex((460, 280))
wall1.add_vertex((470, 340))
wall1.add_vertex((463, 400))
wall1.add_vertex((473, 430))
wall1.add_vertex((453, 470))
wall1.add_vertex((432, 520))
wall1.add_vertex((420, 550))
wall1.add_vertex((310, 635))
wall1.add_vertex((300, 900))
wall1.add_vertex((600, 900))
wall1.filled = True
wall1.fill_color = 'darkgrey'
wall1.color = 'darkgrey'
return wall1
def picture_wall():
wall1 = GPolygon()
wall1.add_vertex((0, 0))
wall1.add_vertex((300, 0))
wall1.add_vertex((250, 75))
wall1.add_vertex((180, 100))
wall1.add_vertex((160, 180))
wall1.add_vertex((170, 220))
wall1.add_vertex((140, 280))
wall1.add_vertex((230, 340))
wall1.add_vertex((220, 400))
wall1.add_vertex((200, 440))
wall1.add_vertex((200, 500))
wall1.add_vertex((220, 550))
wall1.add_vertex((310, 635))
wall1.add_vertex((300, 900))
wall1.add_vertex((0, 900))
wall1.filled = True
wall1.fill_color = 'darkgrey'
wall1.color = 'grey'
return wall1
def sun():
sun_feel = sun_color()
window.add(sun_feel)
def sun_color():
sun = GRect(600, 900, x=0, y=0)
sun.filled = True
sun.fill_color = 'lightyellow'
sun.color = 'lightyellow'
return sun
def nobita():
body = nobita_body()
window.add(body)
face = nobita_face()
window.add(face)
hair = nobita_hair()
window.add(hair)
hair1 = nobita_hair1()
window.add(hair1)
eye1 = nobita_eye1()
window.add(eye1)
eye2 = nobita_eye2()
window.add(eye2)
eye3 = nobita_eye3()
window.add(eye3)
eye4 = nobita_eye4()
window.add(eye4)
eye5 = nobita_eye5()
window.add(eye5)
eye6 = nobita_eye6()
window.add(eye6)
glass = nobita_glass()
window.add(glass)
nose = nobita_nose()
window.add(nose)
mouth = nobita_mouth()
window.add(mouth)
hand = nobita_hand()
window.add(hand)
hand_cloth = nobita_handcloth()
window.add(hand_cloth)
def nobita_hand():
hand = GPolygon()
hand.add_vertex((200, 475))
hand.add_vertex((310, 555))
hand.add_vertex((310, 635))
hand.add_vertex((200, 555))
hand.filled = True
hand.fill_color = 'antiquewhite'
hand.color = 'antiquewhite'
return hand
def nobita_handcloth():
hand_cloth = GPolygon()
hand_cloth.add_vertex((310, 555))
hand_cloth.add_vertex((385, 505))
hand_cloth.add_vertex((385, 585))
hand_cloth.add_vertex((310, 635))
hand_cloth.filled = True
hand_cloth.fill_color = 'yellow'
hand_cloth.color = 'yellow'
return hand_cloth
def nobita_body():
body = GOval(200, 700, x=315, y=480)
body.filled = True
body.fill_color = 'yellow'
body.color = 'yellow'
return body
def nobita_mouth():
mouth = GOval(50, 10, x=385, y=465)
mouth.filled = True
mouth.fill_color = 'red'
mouth.color = 'black'
return mouth
def nobita_nose():
arc = GArc(15, 15, 45, 270, x=395, y=415)
arc.filled = False
arc.fill_color = 'antiquewhite'
arc.color = 'black'
return arc
def nobita_glass():
glass = GLine(480, 390, 498, 390)
return glass
def nobita_eye5():
eye_5 = GOval(5, 5, x=392, y=385)
eye_5.filled = True
eye_5.fill_color = 'ivory'
eye_5.color = 'black'
return eye_5
def nobita_eye6():
eye_6 = GOval(5, 5, x=412, y=385)
eye_6.filled = True
eye_6.fill_color = 'ivory'
eye_6.color = 'black'
return eye_6
def nobita_eye4():
eye_4 = GOval(10, 20, x=390, y=385)
eye_4.filled = True
eye_4.fill_color = 'black'
eye_4.color = 'black'
return eye_4
def nobita_eye3():
eye_3 = GOval(10, 20, x=410, y=385)
eye_3.filled = True
eye_3.fill_color = 'black'
eye_3.color = 'black'
return eye_3
def nobita_eye2():
eye_2 = GOval(75, 80, x=405, y=350)
eye_2.filled = True
eye_2.fill_color = 'whitesmoke'
eye_2.color = 'black'
return eye_2
def nobita_eye1():
eye_1 = GOval(75, 80, x=330, y=350)
eye_1.filled = True
eye_1.fill_color = 'whitesmoke'
eye_1.color = 'black'
return eye_1
def nobita_hair1():
arc = GArc(150, 180, 0, 180, x=345, y=310)
arc.filled = True
arc.fill_color = 'antiquewhite'
arc.color = 'antiquewhite'
return arc
def nobita_face():
face = GOval(180, 210, x=323, y=300)
face.filled = True
face.fill_color = 'antiquewhite'
face.color = 'black'
return face
def nobita_hair():
arc = GArc(165, 200, 0 ,180, x=325, y=290)
arc.filled = True
arc.fill_color = 'black'
return arc
def doraemon():
hand = doraemon_hand()
window.add(hand)
body = doraemon_body()
window.add(body)
bodywhite = doraemon_bodywhite()
window.add(bodywhite)
ring = doraemon_ring()
window.add(ring)
blue_head = doraemon_blue_head()
window.add(blue_head)
white_head = doraemon_white_head()
window.add(white_head)
nose = doraemon_nose()
window.add(nose)
eye1 = doraemon_eye1()
window.add(eye1)
eye2 = doraemon_eye2()
window.add(eye2)
eye3 = doraemon_eye3()
window.add(eye3)
eye4 = doraemon_eye4()
window.add(eye4)
eye5 = doraemon_eye5()
window.add(eye5)
eye6 = doraemon_eye6()
window.add(eye6)
mouth1 = doraemon_mouth1()
window.add(mouth1)
smile = doraemon_smile()
window.add(smile)
beard1 = doraemon_beard1()
window.add(beard1)
beard2 = doraemon_beard2()
window.add(beard2)
beard3 = doraemon_beard3()
window.add(beard3)
beard4 = doraemon_beard4()
window.add(beard4)
beard5 = doraemon_beard5()
window.add(beard5)
beard6 = doraemon_beard6()
window.add(beard6)
finger = doraemon_finger()
window.add(finger)
bag = doraemon_bag()
window.add(bag)
bell = doraemon_bell()
window.add(bell)
def doraemon_bell():
bell = GOval(30, 30, x=245, y=425)
bell.filled = True
bell.fill_color = 'gold'
bell.color = 'gold'
return bell
def doraemon_bodywhite():
arc = GArc(160, 750, 180, 180, x=175, y=200)
arc.filled = True
arc.fill_color = 'ivory'
arc.color = 'ivory'
return arc
def doraemon_bag():
arc = GArc(100, 200, 180, 180, x=210, y=425)
arc.filled = True
arc.fill_color = 'ivory'
arc.color = 'black'
return arc
def doraemon_finger():
finger = GOval(50, 50, x=375, y=390)
finger.filled = True
finger.fill_color = 'ivory'
finger.color = 'black'
return finger
def doraemon_hand():
hand = GRect(180, 30, x=225, y=400)
hand.filled = True
hand.fill_color = 'blue'
return hand
def doraemon_ring():
arc = GArc(200, 180, 180 ,180, x=150, y=340)
arc.filled = True
arc.fill_color = 'red'
return arc
def doraemon_body():
body = GOval(200, 700, x=150, y=180)
body.filled = True
body.fill_color = 'blue'
body.color = 'black'
return body
def doraemon_beard6():
beard = GLine(300, 330, 365, 355)
return beard
def doraemon_beard5():
beard = GLine(220, 330, 160, 355)
return beard
def doraemon_beard4():
beard = GLine(300, 300, 365, 300)
return beard
def doraemon_beard3():
beard = GLine(160, 300, 220, 300)
return beard
def doraemon_beard2():
beard = GLine(160, 255, 220, 275)
return beard
def doraemon_beard1():
beard = GLine(300, 275, 365, 255)
return beard
def doraemon_smile():
arc = GArc(180, 100, 180 ,180, x=160, y=325)
return arc
def doraemon_mouth1():
mouth1 = GLine(260, 275, 260, 365)
return mouth1
def doraemon_eye6():
eye_6 = GOval(5, 5, x=240, y=190)
eye_6.filled = True
eye_6.fill_color = 'ivory'
eye_6.color = 'black'
return eye_6
def doraemon_eye5():
eye_5 = GOval(5, 5, x=275, y=190)
eye_5.filled = True
eye_5.fill_color = 'ivory'
eye_5.color = 'black'
return eye_5
def doraemon_eye4():
eye_4 = GOval(15, 30, x=270, y=190)
eye_4.filled = True
eye_4.fill_color = 'black'
eye_4.color = 'black'
return eye_4
def doraemon_eye3():
eye_3 = GOval(15, 30, x=235, y=190)
eye_3.filled = True
eye_3.fill_color = 'black'
eye_3.color = 'black'
return eye_3
def doraemon_eye2():
eye_2 = GOval(50, 75, x=260, | |
<filename>oahapi/ratechecker/management/commands/load_daily_data.py
import os
import sys
import re
import zipfile
import StringIO
import contextlib
from datetime import datetime
from csv import reader
from decimal import Decimal
from operator import itemgetter
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import warnings
import _mysql_exceptions
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.db import connection
from django.db.utils import OperationalError, IntegrityError
from ratechecker.models import Product, Adjustment, Region, Rate
from ratechecker.views import RateCheckerParameters, rate_query
ARCHIVE_PATTERN = '^\d{8}\.zip$'
class OaHException(Exception):
pass
class Command(BaseCommand):
args = "<directory_path>"
help = """ Loads daily interest rate data from a zip archive with CSV files. """
messages = []
status = 1 # 1 = FAILURE, 0 = SUCCESS
test_scenarios = {
'1': {'maxfico': 640, 'lock': 60, 'rate_structure': 'Fixed', 'price': 150000,
'loan_amount': 150000, 'arm_type': '', 'io': '', 'institution': 'BOFA',
'loan_type': 'VA', 'ltv': 100, 'property_type': '', 'state': 'AK', 'minfico': 640,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'2': {'maxfico': 690, 'lock': 30, 'rate_structure': 'Fixed', 'price': 210526,
'loan_amount': 200000, 'arm_type': '', 'io': '', 'institution': 'COMPASS',
'loan_type': 'VA', 'ltv': 95, 'property_type': '', 'state': 'AL', 'minfico': 690,
'loan_term': 15, 'loan_purpose': 'REFI'},
'3': {'maxfico': 710, 'lock': 45, 'rate_structure': 'ARM', 'price': 83333,
'loan_amount': 75000, 'arm_type': '3-1', 'io': '', 'institution': 'PNC',
'loan_type': 'CONF', 'ltv': 90, 'property_type': '', 'state': 'AR', 'minfico': 710,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'4': {'maxfico': 745, 'lock': 60, 'rate_structure': 'ARM', 'price': 444444,
'loan_amount': 400000, 'arm_type': '3-1', 'io': 1.0, 'institution': 'USBANK',
'loan_type': 'CONF', 'ltv': 90, 'property_type': '', 'state': 'AZ', 'minfico': 745,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'5': {'maxfico': 760, 'lock': 30, 'rate_structure': 'Fixed', 'price': 1125000,
'loan_amount': 900000, 'arm_type': '', 'io': '', 'institution': 'UNBK',
'loan_type': 'JUMBO', 'ltv': 80, 'property_type': '', 'state': 'CA', 'minfico': 760,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'6': {'maxfico': 740, 'lock': 45, 'rate_structure': 'Fixed', 'price': 411764,
'loan_amount': 350000, 'arm_type': '', 'io': '', 'institution': 'PATELCO',
'loan_type': 'CONF', 'ltv': 85, 'property_type': '', 'state': 'CA', 'minfico': 740,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'7': {'maxfico': 780, 'lock': 60, 'rate_structure': 'Fixed', 'price': 2000000,
'loan_amount': 1500000, 'arm_type': '', 'io': '', 'institution': 'FIRSTBANK',
'loan_type': 'JUMBO', 'ltv': 75, 'property_type': '', 'state': 'CO', 'minfico': 780,
'loan_term': 30, 'loan_purpose': 'REFI'},
'8': {'maxfico': 715, 'lock': 30, 'rate_structure': 'ARM', 'price': 617647,
'loan_amount': 525000, 'arm_type': '7-1', 'io': 1.0, 'institution': 'RBS',
'loan_type': 'AGENCY', 'ltv': 85, 'property_type': '', 'state': 'CT', 'minfico': 715,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'9': {'maxfico': 660, 'lock': 45, 'rate_structure': 'Fixed', 'price': 631578,
'loan_amount': 600000, 'arm_type': '', 'io': '', 'institution': 'QUICKEN',
'loan_type': 'FHA-HB', 'ltv': 95, 'property_type': '', 'state': 'DC', 'minfico': 660,
'loan_term': 15, 'loan_purpose': 'PURCH'},
'10': {'maxfico': 690, 'lock': 60, 'rate_structure': 'Fixed', 'price': 233333,
'loan_amount': 175000, 'arm_type': '', 'io': '', 'institution': 'CAPITAL ONE',
'loan_type': 'CONF', 'ltv': 75, 'property_type': '', 'state': 'DE', 'minfico': 690,
'loan_term': 15, 'loan_purpose': 'REFI'},
'11': {'maxfico': 780, 'lock': 30, 'rate_structure': 'Fixed', 'price': 305555,
'loan_amount': 275000, 'arm_type': '', 'io': '', 'institution': 'KINECTA',
'loan_type': 'CONF', 'ltv': 90, 'property_type': '', 'state': 'FL', 'minfico': 780,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'12': {'maxfico': 800, 'lock': 45, 'rate_structure': 'ARM', 'price': 1000000,
'loan_amount': 700000, 'arm_type': '3-1', 'io': '', 'institution': 'FIRST TECH FCU',
'loan_type': 'JUMBO', 'ltv': 70, 'property_type': '', 'state': 'GA', 'minfico': 800,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'13': {'maxfico': 720, 'lock': 60, 'rate_structure': 'Fixed', 'price': 823529,
'loan_amount': 700000, 'arm_type': '', 'io': '', 'institution': 'PNC',
'loan_type': 'AGENCY', 'ltv': 85, 'property_type': '', 'state': 'HI', 'minfico': 720,
'loan_term': 30, 'loan_purpose': 'REFI'},
'14': {'maxfico': 680, 'lock': 30, 'rate_structure': 'ARM', 'price': 222222,
'loan_amount': 200000, 'arm_type': '5-1', 'io': '', 'institution': '53RD',
'loan_type': 'CONF', 'ltv': 90, 'property_type': '', 'state': 'IA', 'minfico': 680,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'15': {'maxfico': 650, 'lock': 45, 'rate_structure': 'Fixed', 'price': 105263,
'loan_amount': 100000, 'arm_type': '', 'io': '', 'institution': 'ZIONS',
'loan_type': 'FHA', 'ltv': 95, 'property_type': '', 'state': 'ID', 'minfico': 650,
'loan_term': 30, 'loan_purpose': 'REFI'},
'16': {'maxfico': 690, 'lock': 60, 'rate_structure': 'Fixed', 'price': 750000,
'loan_amount': 600000, 'arm_type': '', 'io': '', 'institution': 'ASTORIA',
'loan_type': 'AGENCY', 'ltv': 80, 'property_type': 'CONDO', 'state': 'IL', 'minfico': 690,
'loan_term': 30, 'loan_purpose': 'REFI'},
'17': {'maxfico': 710, 'lock': 30, 'rate_structure': 'Fixed', 'price': 157894,
'loan_amount': 150000, 'arm_type': '', 'io': '', 'institution': 'CITI',
'loan_type': 'FHA', 'ltv': 95, 'property_type': '', 'state': 'IN', 'minfico': 710,
'loan_term': 15, 'loan_purpose': 'PURCH'},
'18': {'maxfico': 760, 'lock': 45, 'rate_structure': 'Fixed', 'price': 277777,
'loan_amount': 250000, 'arm_type': '', 'io': '', 'institution': 'BECU',
'loan_type': 'CONF', 'ltv': 90, 'property_type': '', 'state': 'KS', 'minfico': 760,
'loan_term': 30, 'loan_purpose': 'REFI'},
'19': {'maxfico': 715, 'lock': 60, 'rate_structure': 'Fixed', 'price': 200000,
'loan_amount': 150000, 'arm_type': '', 'io': '', 'institution': '53RD',
'loan_type': 'VA', 'ltv': 75, 'property_type': '', 'state': 'KY', 'minfico': 715,
'loan_term': 30, 'loan_purpose': 'REFI'},
'20': {'maxfico': 695, 'lock': 30, 'rate_structure': 'ARM', 'price': 107526,
'loan_amount': 100000, 'arm_type': '5-1', 'io': '', 'institution': 'REGIONS',
'loan_type': 'CONF', 'ltv': 93, 'property_type': '', 'state': 'LA', 'minfico': 695,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'21': {'maxfico': 765, 'lock': 45, 'rate_structure': 'ARM', 'price': 533333,
'loan_amount': 400000, 'arm_type': '10-1', 'io': '', 'institution': 'WEBSTER',
'loan_type': 'CONF', 'ltv': 75, 'property_type': 'CONDO', 'state': 'MA',
'minfico': 765, 'loan_term': 30, 'loan_purpose': 'REFI'},
'22': {'maxfico': 780, 'lock': 60, 'rate_structure': 'ARM', 'price': 687500,
'loan_amount': 550000, 'arm_type': '7-1', 'io': '', 'institution': 'TDBANK',
'loan_type': 'AGENCY', 'ltv': 80, 'property_type': '', 'state': 'MD', 'minfico': 780,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'23': {'maxfico': 750, 'lock': 30, 'rate_structure': 'Fixed', 'price': 171428,
'loan_amount': 120000, 'arm_type': '', 'io': '', 'institution': 'TDBANK',
'loan_type': 'CONF', 'ltv': 70, 'property_type': '', 'state': 'ME', 'minfico': 750,
'loan_term': 30, 'loan_purpose': 'REFI'},
'24': {'maxfico': 680, 'lock': 45, 'rate_structure': 'Fixed', 'price': 225000,
'loan_amount': 180000, 'arm_type': '', 'io': '', 'institution': 'HSBC',
'loan_type': 'FHA', 'ltv': 80, 'property_type': '', 'state': 'MI', 'minfico': 680,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'25': {'maxfico': 790, 'lock': 60, 'rate_structure': 'ARM', 'price': 470588,
'loan_amount': 400000, 'arm_type': '10-1', 'io': '',
'institution': 'CHSCHWAB', 'loan_type': 'CONF', 'ltv': 85,
'property_type': '', 'state': 'MN', 'minfico': 790, 'loan_term': 30,
'loan_purpose': 'REFI'},
'26': {'maxfico': 710, 'lock': 30, 'rate_structure': 'Fixed', 'price': 166666,
'loan_amount': 150000, 'arm_type': '', 'io': '', 'institution': 'HARRIS',
'loan_type': 'FHA', 'ltv': 90, 'property_type': '', 'state': 'MO', 'minfico': 710,
'loan_term': 15, 'loan_purpose': 'PURCH'},
'27': {'maxfico': 705, 'lock': 45, 'rate_structure': 'Fixed', 'price': 100000,
'loan_amount': 80000, 'arm_type': '', 'io': '', 'institution': 'PNC',
'loan_type': 'CONF', 'ltv': 80, 'property_type': '', 'state': 'MS', 'minfico': 705,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'28': {'maxfico': 740, 'lock': 60, 'rate_structure': 'ARM', 'price': 470588,
'loan_amount': 400000, 'arm_type': '5-1', 'io': '', 'institution': 'STATEFARM',
'loan_type': 'CONF', 'ltv': 85, 'property_type': '', 'state': 'MT', 'minfico': 740,
'loan_term': 30, 'loan_purpose': 'REFI'},
'29': {'maxfico': 690, 'lock': 30, 'rate_structure': 'Fixed', 'price': 368421,
'loan_amount': 350000, 'arm_type': '', 'io': '', 'institution': 'JPM',
'loan_type': 'FHA-HB', 'ltv': 95, 'property_type': 'CONDO', 'state': 'NC',
'minfico': 690, 'loan_term': 30, 'loan_purpose': 'PURCH'},
'30': {'maxfico': 705, 'lock': 45, 'rate_structure': 'Fixed', 'price': 125625,
'loan_amount': 100500, 'arm_type': '', 'io': '', 'institution': 'STATEFARM',
'loan_type': 'CONF', 'ltv': 80, 'property_type': '', 'state': 'ND', 'minfico': 705,
'loan_term': 30, 'loan_purpose': 'REFI'},
'31': {'maxfico': 710, 'lock': 60, 'rate_structure': 'Fixed', 'price': 259067.35751295337,
'loan_amount': 250000, 'arm_type': '', 'io': '', 'institution': 'BOFA',
'loan_type': 'FHA', 'ltv': 96.5, 'property_type': '', 'state': 'NE', 'minfico': 710,
'loan_term': 15, 'loan_purpose': 'REFI'},
'32': {'maxfico': 740, 'lock': 30, 'rate_structure': 'Fixed', 'price': 421052,
'loan_amount': 400000, 'arm_type': '', 'io': '', 'institution': 'SANTANDER',
'loan_type': 'FHA-HB', 'ltv': 95, 'property_type': '', 'state': 'NH', 'minfico': 740,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'33': {'maxfico': 680, 'lock': 45, 'rate_structure': 'Fixed', 'price': 450000,
'loan_amount': 450000, 'arm_type': '', 'io': '', 'institution': 'CITI',
'loan_type': 'VA-HB', 'ltv': 100, 'property_type': '', 'state': 'NJ', 'minfico': 680,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'34': {'maxfico': 720, 'lock': 60, 'rate_structure': 'ARM', 'price': 294117,
'loan_amount': 250000, 'arm_type': '3-1', 'io': '', 'institution': 'STATEFARM',
'loan_type': 'CONF', 'ltv': 85, 'property_type': '', 'state': 'NM', 'minfico': 720,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'35': {'maxfico': 745, 'lock': 30, 'rate_structure': 'ARM', 'price': 502409,
'loan_amount': 417000, 'arm_type': '7-1', 'io': '', 'institution': 'ZIONS',
'loan_type': 'CONF', 'ltv': 83, 'property_type': '', 'state': 'NV', 'minfico': 745,
'loan_term': 30, 'loan_purpose': 'REFI'},
'36': {'maxfico': 695, 'lock': 45, 'rate_structure': 'Fixed', 'price': 500000,
'loan_amount': 400000, 'arm_type': '', 'io': '', 'institution': 'WELLS',
'loan_type': 'CONF', 'ltv': 80, 'property_type': 'COOP', 'state': 'NY', 'minfico': 695,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'37': {'maxfico': 710, 'lock': 60, 'rate_structure': 'Fixed', 'price': 204081.63265306121,
'loan_amount': 200000, 'arm_type': '', 'io': '', 'institution': 'HUNTINGTON',
'loan_type': 'VA', 'ltv': 98.0, 'property_type': '', 'state': 'OH', 'minfico': 710,
'loan_term': 15, 'loan_purpose': 'PURCH'},
'38': {'maxfico': 650, 'lock': 30, 'rate_structure': 'Fixed', 'price': 55555,
'loan_amount': 50000, 'arm_type': '', 'io': '', 'institution': 'JPM',
'loan_type': 'FHA', 'ltv': 90, 'property_type': '', 'state': 'OK', 'minfico': 650,
'loan_term': 30, 'loan_purpose': 'PURCH'},
'39': {'maxfico': 730, 'lock': 45, 'rate_structure': | |
assert round(calc_attr.alpha_rad_outer_rt, 5) == 5.0
assert round(calc_attr.alpha_comb_outer_rt, 1) == 25.0
# window
assert round(calc_attr.ua_value_win, 16) == 32.87895310796074
assert round(calc_attr.area_win, 1) == 18.0
assert round(calc_attr.r_conv_inner_win, 19) == 0.032679738562091505
assert round(calc_attr.r_rad_inner_win, 4) == 0.0111
assert round(calc_attr.r_comb_inner_win, 19) == 0.008291873963515755
assert round(calc_attr.r_conv_outer_win, 5) == 0.00278
assert round(calc_attr.r_rad_outer_win, 4) == 0.0111
assert round(calc_attr.r_comb_outer_win, 4) == 0.0022
assert round(calc_attr.alpha_conv_inner_win, 1) == 1.7
assert round(calc_attr.alpha_comb_outer_win, 1) == 25.0
assert round(calc_attr.alpha_conv_outer_win, 1) == 20.0
assert round(calc_attr.weighted_g_value, 3) == 0.789
def test_calc_chain_matrix_four(self):
"""test of calc_chain_matrix"""
from teaser.logic.buildingobjects.calculation.four_element import \
FourElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = FourElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops + \
therm_zone.ground_floors + therm_zone.inner_walls + \
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
omega = (2 * math.pi / 86400 / 5)
helplist_outer_walls = therm_zone.outer_walls + therm_zone.windows
r1_ow, c1_ow = calc_attr._calc_parallel_connection(
element_list=helplist_outer_walls,
omega=omega)
assert round(r1_ow, 14) == 0.00688468914141
assert round(c1_ow, 5) == 533938.62338
helplist_inner_walls = therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors
r1_iw, c1_iw = calc_attr._calc_parallel_connection(
element_list=helplist_inner_walls,
omega=omega)
assert round(r1_iw, 13) == 0.0097195611408
assert round(c1_iw, 6) == 319983.518743
def test_calc_weightfactor_one(self):
"""test of calc_weightfactor"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].calc_building_parameter(number_of_elements=1,
merge_windows=True,
used_library='IBPSA')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0,
0.024530650180761254,
0.03434291025306576,
0.024530650180761254,
0.03434291025306576,
0.3407000330729792]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow == \
weightfactors_test_list
weightfactors_test_list = [
0.08674342795625017,
0.0,
0.0,
0.0,
0.054214642472656345,
0.054214642472656345]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0.34638013315780397
prj.buildings[-1].thermal_zones[-1].weightfactor_ow = []
prj.buildings[-1].thermal_zones[-1].weightfactor_win = []
prj.buildings[-1].calc_building_parameter(number_of_elements=1,
merge_windows=False,
used_library='AixLib')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.03047939672771178,
0.423320678280269,
0.03047939672771178,
0.0,
0.04267115541879649,
0.04267115541879649]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow ==\
weightfactors_test_list
weightfactors_test_list = [
0.44444444444444453,
0.0,
0.0,
0.0,
0.2777777777777778,
0.2777777777777778]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win.sort() ==\
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ground == \
0.4303782174267145
def test_calc_weightfactor_two(self):
"""test of calc_weightfactor"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].calc_building_parameter(number_of_elements=2,
merge_windows=True,
used_library='IBPSA')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.0,
0.024530650180761254,
0.03434291025306576,
0.024530650180761254,
0.03434291025306576,
0.3407000330729792]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow == \
weightfactors_test_list
weightfactors_test_list = [
0.0,
0.0,
0.054214642472656345,
0.08674342795625017,
0.054214642472656345,
0.0]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0.34638013315780397
prj.buildings[-1].thermal_zones[-1].weightfactor_ow = []
prj.buildings[-1].thermal_zones[-1].weightfactor_win = []
prj.buildings[-1].calc_building_parameter(number_of_elements=2,
merge_windows=False,
used_library='AixLib')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.0,
0.03047939672771178,
0.04267115541879649,
0.03047939672771178,
0.04267115541879649,
0.423320678280269]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow ==\
weightfactors_test_list
weightfactors_test_list = [
0.0,
0.0,
0.27777777777777778,
0.44444444444444453,
0.27777777777777778,
0.0]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0.4303782174267145
def test_calc_weightfactor_three(self):
"""test of calc_weightfactor"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].calc_building_parameter(number_of_elements=3,
merge_windows=True,
used_library='IBPSA')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.03753045374718346,
0.5212510365068732,
0.05254263524605685,
0.03753045374718346,
0.05254263524605685]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow == \
weightfactors_test_list
weightfactors_test_list = [
0.13271234911406493,
0.0,
0.08294521819629057,
0.0,
0.08294521819629057]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0
prj.buildings[-1].thermal_zones[-1].weightfactor_ow = []
prj.buildings[-1].thermal_zones[-1].weightfactor_win = []
prj.buildings[-1].calc_building_parameter(number_of_elements=3,
merge_windows=False,
used_library='AixLib')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.05350813058801943,
0.7431609731775066,
0.07491138282322722,
0.05350813058801943,
0.07491138282322722]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow ==\
weightfactors_test_list
weightfactors_test_list = [
0.44444444444444453,
0.0,
0.2777777777777778,
0.0,
0.2777777777777778]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0
def test_calc_weightfactor_four(self):
"""test of calc_weightfactor"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].calc_building_parameter(number_of_elements=4,
merge_windows=True,
used_library='IBPSA')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.07839276240589141, 0.10974986736824797, 0.07839276240589141,
0.10974986736824797]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow == \
weightfactors_test_list
weightfactors_test_list = [
0.27720655131187616, 0.17325409456992255, 0.0, 0.17325409456992255]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0
assert calc_attr.weightfactor_rt == \
[1]
prj.buildings[-1].thermal_zones[-1].weightfactor_ow = []
prj.buildings[-1].thermal_zones[-1].weightfactor_win = []
prj.buildings[-1].calc_building_parameter(number_of_elements=4,
merge_windows=False,
used_library='AixLib')
calc_attr = prj.buildings[-1].thermal_zones[-1].model_attr
weightfactors_test_list = [
0.20833333333333331, 0.29166666666666663, 0.20833333333333331,
0.29166666666666663]
calc_attr.weightfactor_ow.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_ow ==\
weightfactors_test_list
weightfactors_test_list = [
0.44444444444444453, 0.2777777777777778, 0.0, 0.2777777777777778]
calc_attr.weightfactor_win.sort()
weightfactors_test_list.sort()
assert calc_attr.weightfactor_win ==\
weightfactors_test_list
assert calc_attr.weightfactor_ground == \
0
assert calc_attr.weightfactor_rt == \
[1]
def test_calc_one_element(self):
"""test of calc_two_element"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=1,
merge_windows=True)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 328.0
assert round(zone_attr.ua_value_ow, 16) == 135.5818558809656
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0016512549537649
assert round(zone_attr.r_rad_inner_ow, 16) == 0.000609756097561
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_ow, 15) == 0.000772773294534
assert round(zone_attr.c1_ow, 5) == 3648580.59312
assert round(zone_attr.r_rest_ow, 14) == 0.00461875570532
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=1,
merge_windows=False)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 328.0
assert round(zone_attr.ua_value_ow, 16) == 135.5818558809656
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0016512549537649
assert round(zone_attr.r_rad_inner_ow, 16) == 0.000609756097561
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_win, 13) == 0.0199004975124
assert round(zone_attr.r1_ow, 15) == 0.001007515484109
assert round(zone_attr.c1_ow, 5) == 3648580.59312
assert round(zone_attr.r_rest_ow, 14) == 0.00585224061345
def test_calc_two_element(self):
"""test of calc_two_element"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=2,
merge_windows=True)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 328.0
assert round(zone_attr.ua_value_ow, 16) == 135.5818558809656
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0016512549537649
assert round(zone_attr.r_rad_inner_ow, 16) == 0.000609756097561
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_ow, 15) == 0.000772773294534
assert round(zone_attr.c1_ow, 5) == 3648580.59312
assert round(zone_attr.r1_iw, 15) == 0.009719561140816
assert round(zone_attr.c1_iw, 5) == 319983.51874
assert round(zone_attr.r_rest_ow, 14) == 0.00461875570532
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=2,
merge_windows=False)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 328.0
assert round(zone_attr.ua_value_ow, 16) == 135.5818558809656
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0016512549537649
assert round(zone_attr.r_rad_inner_ow, 16) == 0.000609756097561
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_win, 13) == 0.0199004975124
assert round(zone_attr.r1_ow, 15) == 0.001007515484109
assert round(zone_attr.c1_ow, 5) == 3648580.59312
assert round(zone_attr.r1_iw, 15) == 0.009719561140816
assert round(zone_attr.r_rest_ow, 14) == 0.00585224061345
def test_calc_three_element(self):
"""test of calc_two_element"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=3,
merge_windows=True)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 188.0
assert round(zone_attr.ua_value_ow, 16) == 77.23037843150993
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0027203482045702
assert round(zone_attr.r_rad_inner_ow, 16) == 0.001063829787234
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.95532
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_ow, 14) == 0.00114890338306
assert round(zone_attr.c1_ow, 5) == 2091259.60825
assert round(zone_attr.r1_iw, 15) == 0.009719561140816
assert round(zone_attr.c1_iw, 5) == 319983.51874
assert round(zone_attr.r_rest_ow, 11) == 0.00702003101
assert round(zone_attr.area_gf, 1) == 140.0
assert round(zone_attr.ua_value_gf, 16) == 58.351477449455686
assert round(zone_attr.r_conv_inner_gf, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_gf, 16) == 0.0014285714285714
assert round(zone_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_gf, 1) == 5.0
assert round(zone_attr.r1_gf, 14) == 0.00236046484848
assert round(zone_attr.c1_gf, 5) == 1557320.98487
assert round(zone_attr.r_rest_gf, 13) == 0.0137109637229
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=3,
merge_windows=False)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 188.0
assert round(zone_attr.ua_value_ow, 16) == 77.23037843150993
assert round(zone_attr.r_conv_inner_ow, 16) == 0.0027203482045702
assert round(zone_attr.r_rad_inner_ow, 16) == 0.001063829787234
assert round(zone_attr.r_conv_outer_ow, 9) == 0.000265957
assert round(zone_attr.alpha_conv_inner_ow, 5) == 1.95532
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_win, 13) == 0.0199004975124
assert round(zone_attr.r1_ow, 13) == 0.0017577929723
assert round(zone_attr.c1_ow, 5) == 2091259.60825
assert round(zone_attr.r1_iw, 15) == 0.009719561140816
assert round(zone_attr.c1_iw, 5) == 319983.51874
assert round(zone_attr.r_rest_ow, 13) == 0.0102102921341
assert round(zone_attr.area_gf, 1) == 140.0
assert round(zone_attr.ua_value_gf, 16) == 58.351477449455686
assert round(zone_attr.r_conv_inner_gf, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_gf, 16) == 0.0014285714285714
assert round(zone_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_gf, 1) == 5.0
assert round(zone_attr.r1_gf, 14) == 0.00236046484848
assert round(zone_attr.c1_gf, 5) == 1557320.98487
assert round(zone_attr.r_rest_gf, 13) == 0.0137109637229
def test_calc_four_element(self):
"""test of calc_two_element"""
prj.set_default()
helptest.building_test2(prj)
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=4,
merge_windows=True)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 48.0
assert round(zone_attr.ua_value_ow, 16) == 19.83577523748189
assert round(zone_attr.r_conv_inner_ow, 16) == 0.007716049382716
assert round(zone_attr.r_rad_inner_ow, 16) == 0.0041666666666667
assert round(zone_attr.r_conv_outer_ow, 9) == 0.001041667
assert round(zone_attr.alpha_conv_inner_ow, 5) == 2.7
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_ow, 14) == 0.00223838915931
assert round(zone_attr.c1_ow, 5) == 533938.62338
assert round(zone_attr.r1_iw, 14) == 0.00971956114082
assert round(zone_attr.c1_iw, 5) == 319983.51874
assert round(zone_attr.r_rest_ow, 13) == 0.0138583242416
assert round(zone_attr.area_gf, 1) == 140.0
assert round(zone_attr.ua_value_gf, 16) == 58.351477449455686
assert round(zone_attr.r_conv_inner_gf, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_gf, 16) == 0.0014285714285714
assert round(zone_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_gf, 1) == 5.0
assert round(zone_attr.r1_gf, 14) == 0.00236046484848
assert round(zone_attr.c1_gf, 5) == 1557320.98487
assert round(zone_attr.r_rest_gf, 13) == 0.0137109637229
assert round(zone_attr.area_rt, 1) == 140.0
assert round(zone_attr.ua_value_rt, 16) == 57.394603194028036
assert round(zone_attr.r_conv_inner_rt, 16) == 0.0042016806722689
assert round(zone_attr.r_rad_inner_rt, 16) == 0.0014285714285714
assert round(zone_attr.r_conv_outer_rt, 9) == 0.000357143
assert round(zone_attr.alpha_conv_inner_rt, 5) == 1.7
assert round(zone_attr.alpha_rad_inner_rt, 1) == 5.0
assert round(zone_attr.r1_rt, 14) == 0.00236046484848
assert round(zone_attr.c1_rt, 5) == 1557320.98487
assert round(zone_attr.r_rest_rt, 13) == 0.0137109637229
therm_zone = prj.buildings[-1].thermal_zones[-1]
therm_zone.calc_zone_parameters(
number_of_elements=4,
merge_windows=False)
zone_attr = therm_zone.model_attr
assert round(zone_attr.area_ow, 1) == 48.0
assert round(zone_attr.ua_value_ow, 16) == 19.83577523748189
assert round(zone_attr.r_conv_inner_ow, 16) == 0.007716049382716
assert round(zone_attr.r_rad_inner_ow, 16) == 0.0041666666666667
assert round(zone_attr.r_conv_outer_ow, 9) == 0.001041667
assert round(zone_attr.alpha_conv_inner_ow, 5) == 2.7
assert round(zone_attr.alpha_rad_inner_ow, 1) == 5.0
assert round(zone_attr.r1_win, | |
#!/usr/bin/python
def param_gui(self):
param_gui = [
self.K1, self.P1, self.e1, self.om1, self.ma1, self.incl1, self.Omega1,
self.K2, self.P2, self.e2, self.om2, self.ma2, self.incl2, self.Omega2,
self.K3, self.P3, self.e3, self.om3, self.ma3, self.incl3, self.Omega3,
self.K4, self.P4, self.e4, self.om4, self.ma4, self.incl4, self.Omega4,
self.K5, self.P5, self.e5, self.om5, self.ma5, self.incl5, self.Omega5,
self.K6, self.P6, self.e6, self.om6, self.ma6, self.incl6, self.Omega6,
self.K7, self.P7, self.e7, self.om7, self.ma7, self.incl7, self.Omega7,
self.K8, self.P8, self.e8, self.om8, self.ma8, self.incl8, self.Omega8,
self.K9, self.P9, self.e9, self.om9, self.ma9, self.incl9, self.Omega9,
]
return param_gui
def param_errors_gui(self):
param_errors_gui = [self.err_K1,self.err_P1,self.err_e1,self.err_om1,self.err_ma1, self.err_i1, self.err_Om1,
self.err_K2,self.err_P2,self.err_e2,self.err_om2,self.err_ma2, self.err_i2, self.err_Om2,
self.err_K3,self.err_P3,self.err_e3,self.err_om3,self.err_ma3, self.err_i3, self.err_Om3,
self.err_K4,self.err_P4,self.err_e4,self.err_om4,self.err_ma4, self.err_i4, self.err_Om4,
self.err_K5,self.err_P5,self.err_e5,self.err_om5,self.err_ma5, self.err_i5, self.err_Om5,
self.err_K6,self.err_P6,self.err_e6,self.err_om6,self.err_ma6, self.err_i6, self.err_Om6,
self.err_K7,self.err_P7,self.err_e7,self.err_om7,self.err_ma7, self.err_i7, self.err_Om7,
self.err_K8,self.err_P8,self.err_e8,self.err_om8,self.err_ma8, self.err_i8, self.err_Om8,
self.err_K9,self.err_P9,self.err_e9,self.err_om9,self.err_ma9, self.err_i9, self.err_Om9,
]
return param_errors_gui
def use_param_gui(self):
use_param_gui = [self.use_K1, self.use_P1, self.use_e1, self.use_om1, self.use_ma1, self.use_incl1, self.use_Omega1,
self.use_K2, self.use_P2, self.use_e2, self.use_om2, self.use_ma2, self.use_incl2, self.use_Omega2,
self.use_K3, self.use_P3, self.use_e3, self.use_om3, self.use_ma3, self.use_incl3, self.use_Omega3,
self.use_K4, self.use_P4, self.use_e4, self.use_om4, self.use_ma4, self.use_incl4, self.use_Omega4,
self.use_K5, self.use_P5, self.use_e5, self.use_om5, self.use_ma5, self.use_incl5, self.use_Omega5,
self.use_K6, self.use_P6, self.use_e6, self.use_om6, self.use_ma6, self.use_incl6, self.use_Omega6,
self.use_K7, self.use_P7, self.use_e7, self.use_om7, self.use_ma7, self.use_incl7, self.use_Omega7,
self.use_K8, self.use_P8, self.use_e8, self.use_om8, self.use_ma8, self.use_incl8, self.use_Omega8,
self.use_K9, self.use_P9, self.use_e9, self.use_om9, self.use_ma9, self.use_incl9, self.use_Omega9,
]
return use_param_gui
###########################################################################
def param_gui_wd(self):
param_gui_wd = [
self.om_dot_1, self.om_dot_2, self.om_dot_3,
self.om_dot_4, self.om_dot_5, self.om_dot_6,
self.om_dot_7, self.om_dot_8, self.om_dot_9
]
return param_gui_wd
def use_param_gui_wd(self):
use_param_gui_wd = [
self.use_om_dot_1, self.use_om_dot_2, self.use_om_dot_3,
self.use_om_dot_4, self.use_om_dot_5, self.use_om_dot_6,
self.use_om_dot_7, self.use_om_dot_8, self.use_om_dot_9
]
return use_param_gui_wd
def param_errors_gui_wd(self):
param_errors_gui_wd = [
self.err_om_dot_1,self.err_om_dot_2,self.err_om_dot_3,
self.err_om_dot_4,self.err_om_dot_5,self.err_om_dot_6,
self.err_om_dot_7,self.err_om_dot_8,self.err_om_dot_9,
]
return param_errors_gui_wd
###########################################################################
def param_gui_tr(self):
param_gui_tr = [
self.t0_1, self.pl_rad_1, self.a_sol_1,
self.t0_2, self.pl_rad_2, self.a_sol_2,
self.t0_3, self.pl_rad_3, self.a_sol_3,
self.t0_4, self.pl_rad_4, self.a_sol_4,
self.t0_5, self.pl_rad_5, self.a_sol_5,
self.t0_6, self.pl_rad_6, self.a_sol_6,
self.t0_7, self.pl_rad_7, self.a_sol_7,
self.t0_8, self.pl_rad_8, self.a_sol_8,
self.t0_9, self.pl_rad_9, self.a_sol_9,
]
return param_gui_tr
def use_param_gui_tr(self):
use_param_gui_tr = [self.use_t0_1, self.use_pl_rad_1, self.use_a_sol_1,
self.use_t0_2, self.use_pl_rad_2, self.use_a_sol_2,
self.use_t0_3, self.use_pl_rad_3, self.use_a_sol_3,
self.use_t0_4, self.use_pl_rad_4, self.use_a_sol_4,
self.use_t0_5, self.use_pl_rad_5, self.use_a_sol_5,
self.use_t0_6, self.use_pl_rad_6, self.use_a_sol_6,
self.use_t0_7, self.use_pl_rad_7, self.use_a_sol_7,
self.use_t0_8, self.use_pl_rad_8, self.use_a_sol_8,
self.use_t0_9, self.use_pl_rad_9, self.use_a_sol_9,
]
return use_param_gui_tr
def err_t0(self):
err_t0 = [self.err_t0_1,self.err_t0_2,self.err_t0_3,
self.err_t0_4,self.err_t0_5,self.err_t0_6,
self.err_t0_7,self.err_t0_8,self.err_t0_9,
]
return err_t0
def err_pl_rad(self):
err_pl_rad = [self.err_pl_rad_1,self.err_pl_rad_2,self.err_pl_rad_3,
self.err_pl_rad_4,self.err_pl_rad_5,self.err_pl_rad_6,
self.err_pl_rad_7,self.err_pl_rad_8,self.err_pl_rad_9,
]
return err_pl_rad
def err_a_sol(self):
err_a_sol = [self.err_a_sol_1,self.err_a_sol_2,self.err_a_sol_3,
self.err_a_sol_4,self.err_a_sol_5,self.err_a_sol_6,
self.err_a_sol_7,self.err_a_sol_8,self.err_a_sol_9,
]
return err_a_sol
###########################################################################
def rvs_data_gui(self):
rvs_data_gui = [
self.Data1,self.Data2,self.Data3,self.Data4,self.Data5,
self.Data6,self.Data7,self.Data8,self.Data9,self.Data10
]
return rvs_data_gui
def rvs_data_jitter_gui(self):
rvs_data_jitter_gui = [
self.jitter_Data1,self.jitter_Data2,self.jitter_Data3,self.jitter_Data4,self.jitter_Data5,
self.jitter_Data6,self.jitter_Data7,self.jitter_Data8,self.jitter_Data9,self.jitter_Data10
]
return rvs_data_jitter_gui
def use_data_offset_gui(self):
use_data_offset_gui = [self.use_offset_Data1,self.use_offset_Data2,self.use_offset_Data3,self.use_offset_Data4,
self.use_offset_Data5,self.use_offset_Data6,self.use_offset_Data7,self.use_offset_Data8,
self.use_offset_Data9,self.use_offset_Data10]
return use_data_offset_gui
def use_data_jitter_gui(self):
use_data_jitter_gui = [self.use_jitter_Data1,self.use_jitter_Data2,self.use_jitter_Data3,self.use_jitter_Data4,self.use_jitter_Data5,
self.use_jitter_Data6,self.use_jitter_Data7,self.use_jitter_Data8,self.use_jitter_Data9,self.use_jitter_Data10]
return use_data_jitter_gui
def data_errors_gui(self):
data_errors_gui = [
self.err_Data1,self.err_Data2,self.err_Data3,self.err_Data4,self.err_Data5,
self.err_Data6,self.err_Data7,self.err_Data8,self.err_Data9,self.err_Data10
]
return data_errors_gui
def data_errors_jitter_gui(self):
data_errors_jitter_gui = [
self.err_jitter_Data1,self.err_jitter_Data2,self.err_jitter_Data3,self.err_jitter_Data4,self.err_jitter_Data5,
self.err_jitter_Data6,self.err_jitter_Data7,self.err_jitter_Data8,self.err_jitter_Data9,self.err_jitter_Data10
]
return data_errors_jitter_gui
def tra_data_gui(self):
tra_data_gui = [
self.trans_Data1,self.trans_Data2,self.trans_Data3,self.trans_Data4,self.trans_Data5,
self.trans_Data6,self.trans_Data7,self.trans_Data8,self.trans_Data9,self.trans_Data10
]
return tra_data_gui
def use_tra_data_offset_gui(self):
use_tra_data_offset_gui = [
self.use_offset_trans_Data1,self.use_offset_trans_Data2,self.use_offset_trans_Data3,self.use_offset_trans_Data4,
self.use_offset_trans_Data5,self.use_offset_trans_Data6,self.use_offset_trans_Data7,self.use_offset_trans_Data8,
self.use_offset_trans_Data9,self.use_offset_trans_Data10
]
return use_tra_data_offset_gui
def tra_data_errors_gui(self):
tra_data_errors_gui = [
self.err_trans_Data1,self.err_trans_Data2,self.err_trans_Data3,self.err_trans_Data4,self.err_trans_Data5,
self.err_trans_Data6,self.err_trans_Data7,self.err_trans_Data8,self.err_trans_Data9,self.err_trans_Data10
]
return tra_data_errors_gui
def tra_data_jitter_gui(self):
tra_data_jitter_gui = [
self.jitter_trans_Data1,self.jitter_trans_Data2,self.jitter_trans_Data3,self.jitter_trans_Data4,self.jitter_trans_Data5,
self.jitter_trans_Data6,self.jitter_trans_Data7,self.jitter_trans_Data8,self.jitter_trans_Data9,self.jitter_trans_Data10
]
return tra_data_jitter_gui
def use_tra_data_jitter_gui(self):
use_tra_data_jitter_gui = [
self.use_jitter_trans_Data1,self.use_jitter_trans_Data2,self.use_jitter_trans_Data3,self.use_jitter_trans_Data4,
self.use_jitter_trans_Data5,self.use_jitter_trans_Data6,self.use_jitter_trans_Data7,self.use_jitter_trans_Data8,
self.use_jitter_trans_Data9,self.use_jitter_trans_Data10
]
return use_tra_data_jitter_gui
def tra_data_errors_jitter_gui(self):
tra_data_errors_jitter_gui = [
self.err_jitter_trans_Data1,self.err_jitter_trans_Data2,self.err_jitter_trans_Data3,self.err_jitter_trans_Data4,
self.err_jitter_trans_Data5,self.err_jitter_trans_Data6,self.err_jitter_trans_Data7,self.err_jitter_trans_Data8,
self.err_jitter_trans_Data9,self.err_jitter_trans_Data10
]
return tra_data_errors_jitter_gui
def tra_data_lin_trend_gui(self):
tra_data_lin_trend_gui = [
self.tra_lin_trend_1,self.tra_lin_trend_2,self.tra_lin_trend_3,self.tra_lin_trend_4,self.tra_lin_trend_5,
self.tra_lin_trend_6,self.tra_lin_trend_7,self.tra_lin_trend_8,self.tra_lin_trend_9,self.tra_lin_trend_10
]
return tra_data_lin_trend_gui
def use_tra_data_lin_trend_gui(self):
use_tra_data_lin_trend_gui = [
self.use_tra_lin_trend_1,self.use_tra_lin_trend_2,self.use_tra_lin_trend_3,self.use_tra_lin_trend_4,
self.use_tra_lin_trend_5,self.use_tra_lin_trend_6,self.use_tra_lin_trend_7,self.use_tra_lin_trend_8,
self.use_tra_lin_trend_9,self.use_tra_lin_trend_10
]
return use_tra_data_lin_trend_gui
def err_tra_data_lin_trend_gui(self):
err_tra_data_lin_trend_gui = [
self.err_tra_lin_trend_1,self.err_tra_lin_trend_2,self.err_tra_lin_trend_3,self.err_tra_lin_trend_4,self.err_tra_lin_trend_5,
self.err_tra_lin_trend_6,self.err_tra_lin_trend_7,self.err_tra_lin_trend_8,self.err_tra_lin_trend_9,self.err_tra_lin_trend_10
]
return err_tra_data_lin_trend_gui
def tra_data_quad_trend_gui(self):
tra_data_quad_trend_gui = [
self.tra_quad_trend_1,self.tra_quad_trend_2,self.tra_quad_trend_3,self.tra_quad_trend_4,self.tra_quad_trend_5,
self.tra_quad_trend_6,self.tra_quad_trend_7,self.tra_quad_trend_8,self.tra_quad_trend_9,self.tra_quad_trend_10
]
return tra_data_quad_trend_gui
def use_tra_data_quad_trend_gui(self):
use_tra_data_quad_trend_gui = [
self.use_tra_quad_trend_1,self.use_tra_quad_trend_2,self.use_tra_quad_trend_3,self.use_tra_quad_trend_4,
self.use_tra_quad_trend_5,self.use_tra_quad_trend_6,self.use_tra_quad_trend_7,self.use_tra_quad_trend_8,
self.use_tra_quad_trend_9,self.use_tra_quad_trend_10
]
return use_tra_data_quad_trend_gui
def err_tra_data_quad_trend_gui(self):
err_tra_data_quad_trend_gui = [
self.err_tra_quad_trend_1,self.err_tra_quad_trend_2,self.err_tra_quad_trend_3,self.err_tra_quad_trend_4,self.err_tra_quad_trend_5,
self.err_tra_quad_trend_6,self.err_tra_quad_trend_7,self.err_tra_quad_trend_8,self.err_tra_quad_trend_9,self.err_tra_quad_trend_10
]
return err_tra_data_quad_trend_gui
def param_bounds_gui(self):
param_bounds_gui = [
[self.K_min_1,self.K_max_1],[self.P_min_1,self.P_max_1], [self.e_min_1,self.e_max_1],[self.om_min_1,self.om_max_1], [self.ma_min_1,self.ma_max_1],[self.incl_min_1,self.incl_max_1], [self.Omega_min_1,self.Omega_max_1],[self.t0_min_1,self.t0_max_1],[self.pl_rad_min_1,self.pl_rad_max_1],[self.a_sol_min_1,self.a_sol_max_1],
[self.K_min_2,self.K_max_2],[self.P_min_2,self.P_max_2], [self.e_min_2,self.e_max_2],[self.om_min_2,self.om_max_2], [self.ma_min_2,self.ma_max_2],[self.incl_min_2,self.incl_max_2], [self.Omega_min_2,self.Omega_max_2],[self.t0_min_2,self.t0_max_2],[self.pl_rad_min_2,self.pl_rad_max_2],[self.a_sol_min_2,self.a_sol_max_2],
[self.K_min_3,self.K_max_3],[self.P_min_3,self.P_max_3], [self.e_min_3,self.e_max_3],[self.om_min_3,self.om_max_3], [self.ma_min_3,self.ma_max_3],[self.incl_min_3,self.incl_max_3], [self.Omega_min_3,self.Omega_max_3],[self.t0_min_3,self.t0_max_3],[self.pl_rad_min_3,self.pl_rad_max_3],[self.a_sol_min_3,self.a_sol_max_3],
[self.K_min_4,self.K_max_4],[self.P_min_4,self.P_max_4], [self.e_min_4,self.e_max_4],[self.om_min_4,self.om_max_4], [self.ma_min_4,self.ma_max_4],[self.incl_min_4,self.incl_max_4], [self.Omega_min_4,self.Omega_max_4],[self.t0_min_4,self.t0_max_4],[self.pl_rad_min_4,self.pl_rad_max_4],[self.a_sol_min_4,self.a_sol_max_4],
[self.K_min_5,self.K_max_5],[self.P_min_5,self.P_max_5], [self.e_min_5,self.e_max_5],[self.om_min_5,self.om_max_5], [self.ma_min_5,self.ma_max_5],[self.incl_min_5,self.incl_max_5], [self.Omega_min_5,self.Omega_max_5],[self.t0_min_5,self.t0_max_5],[self.pl_rad_min_5,self.pl_rad_max_5],[self.a_sol_min_5,self.a_sol_max_5],
[self.K_min_6,self.K_max_6],[self.P_min_6,self.P_max_6], [self.e_min_6,self.e_max_6],[self.om_min_6,self.om_max_6], [self.ma_min_6,self.ma_max_6],[self.incl_min_6,self.incl_max_6], [self.Omega_min_6,self.Omega_max_6],[self.t0_min_6,self.t0_max_6],[self.pl_rad_min_6,self.pl_rad_max_6],[self.a_sol_min_6,self.a_sol_max_6],
[self.K_min_7,self.K_max_7],[self.P_min_7,self.P_max_7], [self.e_min_7,self.e_max_7],[self.om_min_7,self.om_max_7], [self.ma_min_7,self.ma_max_7],[self.incl_min_7,self.incl_max_7], [self.Omega_min_7,self.Omega_max_7],[self.t0_min_7,self.t0_max_7],[self.pl_rad_min_7,self.pl_rad_max_7],[self.a_sol_min_7,self.a_sol_max_7],
[self.K_min_8,self.K_max_8],[self.P_min_8,self.P_max_8], [self.e_min_8,self.e_max_8],[self.om_min_8,self.om_max_8], [self.ma_min_8,self.ma_max_8],[self.incl_min_8,self.incl_max_8], [self.Omega_min_8,self.Omega_max_8],[self.t0_min_8,self.t0_max_8],[self.pl_rad_min_8,self.pl_rad_max_8],[self.a_sol_min_8,self.a_sol_max_8],
[self.K_min_9,self.K_max_9],[self.P_min_9,self.P_max_9], [self.e_min_9,self.e_max_9],[self.om_min_9,self.om_max_9], [self.ma_min_9,self.ma_max_9],[self.incl_min_9,self.incl_max_9], [self.Omega_min_9,self.Omega_max_9],[self.t0_min_9,self.t0_max_9],[self.pl_rad_min_9,self.pl_rad_max_9],[self.a_sol_min_9,self.a_sol_max_9]
]
return param_bounds_gui
def offset_bounds_gui(self):
offset_bounds_gui = [
[self.Data1_min,self.Data1_max], [self.Data2_min,self.Data2_max], [self.Data3_min,self.Data3_max], [self.Data4_min,self.Data4_max], [self.Data5_min,self.Data5_max],
[self.Data6_min,self.Data6_max], [self.Data7_min,self.Data7_max], [self.Data8_min,self.Data8_max], [self.Data9_min,self.Data9_max], [self.Data10_min,self.Data10_max]
]
return offset_bounds_gui
def jitter_bounds_gui(self):
jitter_bounds_gui = [
[self.jitter1_min,self.jitter1_max], [self.jitter2_min,self.jitter2_max], [self.jitter3_min,self.jitter3_max], [self.jitter4_min,self.jitter4_max], [self.jitter5_min,self.jitter5_max],
[self.jitter6_min,self.jitter6_max], [self.jitter7_min,self.jitter7_max], [self.jitter8_min,self.jitter8_max], [self.jitter9_min,self.jitter9_max], [self.jitter10_min,self.Data10_max]
]
return jitter_bounds_gui
def offset_bounds_gui_tra(self):
offset_bounds_gui_tra = [
[self.tra_Data_min_1,self.tra_Data_max_1], [self.tra_Data_min_2,self.tra_Data_max_2], [self.tra_Data_min_3,self.tra_Data_max_3],
[self.tra_Data_min_4,self.tra_Data_max_4], [self.tra_Data_min_5,self.tra_Data_max_5], [self.tra_Data_min_6,self.tra_Data_max_6],
[self.tra_Data_min_7,self.tra_Data_max_7], [self.tra_Data_min_8,self.tra_Data_max_8], [self.tra_Data_min_9,self.tra_Data_max_9],
[self.tra_Data_min_10,self.tra_Data_max_10]
]
return offset_bounds_gui_tra
def jitter_bounds_gui_tra(self):
jitter_bounds_gui_tra = [
[self.tra_jitter_min_1,self.tra_jitter_max_1],[self.tra_jitter_min_2,self.tra_jitter_max_2],
[self.tra_jitter_min_3,self.tra_jitter_max_3],[self.tra_jitter_min_4,self.tra_jitter_max_4],
[self.tra_jitter_min_5,self.tra_jitter_max_5],[self.tra_jitter_min_6,self.tra_jitter_max_6],
[self.tra_jitter_min_7,self.tra_jitter_max_7],[self.tra_jitter_min_8,self.tra_jitter_max_8],
[self.tra_jitter_min_9,self.tra_jitter_max_9],[self.tra_jitter_min_10,self.tra_jitter_max_10]
]
return jitter_bounds_gui_tra
################### OmDot ########################
def om_dot_bounds_gui(self):
om_dot_bounds_gui = [
[self.omega_dot_min_1,self.omega_dot_max_1], [self.omega_dot_min_2,self.omega_dot_max_2],
[self.omega_dot_min_3,self.omega_dot_max_3], [self.omega_dot_min_4,self.omega_dot_max_4],
[self.omega_dot_min_5,self.omega_dot_max_5], [self.omega_dot_min_6,self.omega_dot_max_6],
[self.omega_dot_min_7,self.omega_dot_max_7], [self.omega_dot_min_8,self.omega_dot_max_8],
[self.omega_dot_min_9,self.omega_dot_max_9]
]
return om_dot_bounds_gui
################### Tra. reg. ########################
def data_tra_reg_group(self):
data_tra_reg_group = [
[self.tra_reg_bjd_1,self.tra_reg_airmass_1],[self.tra_reg_bjd_2,self.tra_reg_airmass_2],
[self.tra_reg_bjd_3,self.tra_reg_airmass_3],[self.tra_reg_bjd_4,self.tra_reg_airmass_4],
[self.tra_reg_bjd_5,self.tra_reg_airmass_5],[self.tra_reg_bjd_6,self.tra_reg_airmass_6],
[self.tra_reg_bjd_7,self.tra_reg_airmass_7],[self.tra_reg_bjd_8,self.tra_reg_airmass_8],
[self.tra_reg_bjd_9,self.tra_reg_airmass_9],[self.tra_reg_bjd_10,self.tra_reg_airmass_10]
]
return data_tra_reg_group
################### LD ########################
def data_ld_group(self):
data_ld_group = [
self.LD_group_1,self.LD_group_2,self.LD_group_3,self.LD_group_4,self.LD_group_5,
self.LD_group_6,self.LD_group_7,self.LD_group_8,self.LD_group_9,self.LD_group_10
]
return data_ld_group
def use_uni_ld_models(self):
use_uni_ld_models = [
self.use_uniform_ld_1,self.use_uniform_ld_2,self.use_uniform_ld_3,self.use_uniform_ld_4,self.use_uniform_ld_5,
self.use_uniform_ld_6,self.use_uniform_ld_7,self.use_uniform_ld_8,self.use_uniform_ld_9,self.use_uniform_ld_10
]
return use_uni_ld_models
def use_lin_ld_models(self):
use_lin_ld_models = [
self.use_linear_ld_1,self.use_linear_ld_2,self.use_linear_ld_3,self.use_linear_ld_4,self.use_linear_ld_5,
self.use_linear_ld_6,self.use_linear_ld_7,self.use_linear_ld_8,self.use_linear_ld_9,self.use_linear_ld_10
]
return use_lin_ld_models
def use_quad_ld_models(self):
use_quad_ld_models =[
self.use_quadratic_ld_1,self.use_quadratic_ld_2,self.use_quadratic_ld_3,self.use_quadratic_ld_4,self.use_quadratic_ld_5,
self.use_quadratic_ld_6,self.use_quadratic_ld_7,self.use_quadratic_ld_8,self.use_quadratic_ld_9,self.use_quadratic_ld_10
]
return use_quad_ld_models
def use_nonlin_ld_models(self):
use_nonlin_ld_models = [
self.use_nonlinear_ld_1,self.use_nonlinear_ld_2,self.use_nonlinear_ld_3,self.use_nonlinear_ld_4,self.use_nonlinear_ld_5,
self.use_nonlinear_ld_6,self.use_nonlinear_ld_7,self.use_nonlinear_ld_8,self.use_nonlinear_ld_9,self.use_nonlinear_ld_10
]
return use_nonlin_ld_models
def lin_u(self):
lin_u = [self.u1_linear_1,self.u1_linear_2,self.u1_linear_3,self.u1_linear_4,self.u1_linear_5,
self.u1_linear_6,self.u1_linear_7,self.u1_linear_8,self.u1_linear_9,self.u1_linear_10
]
return lin_u
def use_lin_u(self):
use_lin_u = [
self.use_u1_linear_1,self.use_u1_linear_2,self.use_u1_linear_3,self.use_u1_linear_4,self.use_u1_linear_5,
self.use_u1_linear_6,self.use_u1_linear_7,self.use_u1_linear_8,self.use_u1_linear_9,self.use_u1_linear_10
]
return use_lin_u
def err_lin_u(self):
err_lin_u = [self.err_u1_linear_1,self.err_u1_linear_2,self.err_u1_linear_3,self.err_u1_linear_4,self.err_u1_linear_5,
self.err_u1_linear_6,self.err_u1_linear_7,self.err_u1_linear_8,self.err_u1_linear_9,self.err_u1_linear_10
]
return err_lin_u
def quad_u1(self):
quad_u1 = [
self.u1_quadratic_1,self.u1_quadratic_2,self.u1_quadratic_3,self.u1_quadratic_4,self.u1_quadratic_5,
self.u1_quadratic_6,self.u1_quadratic_7,self.u1_quadratic_8,self.u1_quadratic_9,self.u1_quadratic_10
]
return quad_u1
def use_quad_u1(self):
use_quad_u1 = [
self.use_u1_quadratic_1,self.use_u1_quadratic_2,self.use_u1_quadratic_3,self.use_u1_quadratic_4,self.use_u1_quadratic_5,
self.use_u1_quadratic_6,self.use_u1_quadratic_7,self.use_u1_quadratic_8,self.use_u1_quadratic_9,self.use_u1_quadratic_10
]
return use_quad_u1
def err_quad_u1(self):
err_quad_u1 = [
self.err_u1_quadratic_1,self.err_u1_quadratic_2,self.err_u1_quadratic_3,self.err_u1_quadratic_4,self.err_u1_quadratic_5,
self.err_u1_quadratic_6,self.err_u1_quadratic_7,self.err_u1_quadratic_8,self.err_u1_quadratic_9,self.err_u1_quadratic_10
]
return err_quad_u1
def quad_u2(self):
quad_u2 = [
self.u2_quadratic_1,self.u2_quadratic_2,self.u2_quadratic_3,self.u2_quadratic_4,self.u2_quadratic_5,
self.u2_quadratic_6,self.u2_quadratic_7,self.u2_quadratic_8,self.u2_quadratic_9,self.u2_quadratic_10
]
return quad_u2
def use_quad_u2(self):
use_quad_u2 = [
self.use_u2_quadratic_1,self.use_u2_quadratic_2,self.use_u2_quadratic_3,self.use_u2_quadratic_4,self.use_u2_quadratic_5,
self.use_u2_quadratic_6,self.use_u2_quadratic_7,self.use_u2_quadratic_8,self.use_u2_quadratic_9,self.use_u2_quadratic_10
]
return use_quad_u2
def err_quad_u2(self):
err_quad_u2 = [
self.err_u2_quadratic_1,self.err_u2_quadratic_2,self.err_u2_quadratic_3,self.err_u2_quadratic_4,self.err_u2_quadratic_5,
self.err_u2_quadratic_6,self.err_u2_quadratic_7,self.err_u2_quadratic_8,self.err_u2_quadratic_9,self.err_u2_quadratic_10
]
return err_quad_u2
def nonlin_u1(self):
nonlin_u1 = [
self.u1_nonlin_1,self.u1_nonlin_2,self.u1_nonlin_3,self.u1_nonlin_4,self.u1_nonlin_5,
self.u1_nonlin_6,self.u1_nonlin_7,self.u1_nonlin_8,self.u1_nonlin_9,self.u1_nonlin_10
]
return nonlin_u1
def use_nonlin_u1(self):
use_nonlin_u1 = [
self.use_u1_nonlin_1,self.use_u1_nonlin_2,self.use_u1_nonlin_3,self.use_u1_nonlin_4,self.use_u1_nonlin_5,
self.use_u1_nonlin_6,self.use_u1_nonlin_7,self.use_u1_nonlin_8,self.use_u1_nonlin_9,self.use_u1_nonlin_10
]
return use_nonlin_u1
def err_nonlin_u1(self):
err_nonlin_u1 = [
self.err_u1_nonlin_1,self.err_u1_nonlin_2,self.err_u1_nonlin_3,self.err_u1_nonlin_4,self.err_u1_nonlin_5,
self.err_u1_nonlin_6,self.err_u1_nonlin_7,self.err_u1_nonlin_8,self.err_u1_nonlin_9,self.err_u1_nonlin_10
]
return err_nonlin_u1
def nonlin_u2(self):
nonlin_u2 = [
self.u2_nonlin_1,self.u2_nonlin_2,self.u2_nonlin_3,self.u2_nonlin_4,self.u2_nonlin_5,
self.u2_nonlin_6,self.u2_nonlin_7,self.u2_nonlin_8,self.u2_nonlin_9,self.u2_nonlin_10
]
return nonlin_u2
def use_nonlin_u2(self):
use_nonlin_u2 = [
self.use_u2_nonlin_1,self.use_u2_nonlin_2,self.use_u2_nonlin_3,self.use_u2_nonlin_4,self.use_u2_nonlin_5,
self.use_u2_nonlin_6,self.use_u2_nonlin_7,self.use_u2_nonlin_8,self.use_u2_nonlin_9,self.use_u2_nonlin_10
]
return use_nonlin_u2
def err_nonlin_u2(self):
err_nonlin_u2 = [
self.err_u2_nonlin_1,self.err_u2_nonlin_2,self.err_u2_nonlin_3,self.err_u2_nonlin_4,self.err_u2_nonlin_5,
self.err_u2_nonlin_6,self.err_u2_nonlin_7,self.err_u2_nonlin_8,self.err_u2_nonlin_9,self.err_u2_nonlin_10
]
return err_nonlin_u2
def nonlin_u3(self):
nonlin_u3 = [
self.u3_nonlin_1,self.u3_nonlin_2,self.u3_nonlin_3,self.u3_nonlin_4,self.u3_nonlin_5,
self.u3_nonlin_6,self.u3_nonlin_7,self.u3_nonlin_8,self.u3_nonlin_9,self.u3_nonlin_10
]
return nonlin_u3
def use_nonlin_u3(self):
use_nonlin_u3 = [
self.use_u3_nonlin_1,self.use_u3_nonlin_2,self.use_u3_nonlin_3,self.use_u3_nonlin_4,self.use_u3_nonlin_5,
self.use_u3_nonlin_6,self.use_u3_nonlin_7,self.use_u3_nonlin_8,self.use_u3_nonlin_9,self.use_u3_nonlin_10
]
return use_nonlin_u3
def err_nonlin_u3(self):
err_nonlin_u3 = [
self.err_u3_nonlin_1,self.err_u3_nonlin_2,self.err_u3_nonlin_3,self.err_u3_nonlin_4,self.err_u3_nonlin_5,
self.err_u3_nonlin_6,self.err_u3_nonlin_7,self.err_u3_nonlin_8,self.err_u3_nonlin_9,self.err_u3_nonlin_10
]
return err_nonlin_u3
def nonlin_u4(self):
nonlin_u4 = [
self.u4_nonlin_1,self.u4_nonlin_2,self.u4_nonlin_3,self.u4_nonlin_4,self.u4_nonlin_5,
self.u4_nonlin_6,self.u4_nonlin_7,self.u4_nonlin_8,self.u4_nonlin_9,self.u4_nonlin_10
]
return nonlin_u4
def use_nonlin_u4(self):
use_nonlin_u4 = [
self.use_u4_nonlin_1,self.use_u4_nonlin_2,self.use_u4_nonlin_3,self.use_u4_nonlin_4,self.use_u4_nonlin_5,
self.use_u4_nonlin_6,self.use_u4_nonlin_7,self.use_u4_nonlin_8,self.use_u4_nonlin_9,self.use_u4_nonlin_10
]
return use_nonlin_u4
def err_nonlin_u4(self):
err_nonlin_u4 = [
self.err_u4_nonlin_1,self.err_u4_nonlin_2,self.err_u4_nonlin_3,self.err_u4_nonlin_4,self.err_u4_nonlin_5,
self.err_u4_nonlin_6,self.err_u4_nonlin_7,self.err_u4_nonlin_8,self.err_u4_nonlin_9,self.err_u4_nonlin_10
]
return err_nonlin_u4
def ld_u1_bounds_gui(self):
ld_u1_bounds_gui = [
[self.u1_min_1,self.u1_max_1],[self.u1_min_2,self.u1_max_2],[self.u1_min_3,self.u1_max_3],
[self.u1_min_4,self.u1_max_4],[self.u1_min_5,self.u1_max_5],[self.u1_min_6,self.u1_max_6],
[self.u1_min_7,self.u1_max_7],[self.u1_min_8,self.u1_max_8],[self.u1_min_9,self.u1_max_9],
[self.u1_min_10,self.u1_max_10]
]
return ld_u1_bounds_gui
def ld_u2_bounds_gui(self):
ld_u2_bounds_gui = [
[self.u2_min_1,self.u2_max_1],[self.u2_min_2,self.u2_max_2],[self.u2_min_3,self.u2_max_3],
[self.u2_min_4,self.u2_max_4],[self.u2_min_5,self.u2_max_5],[self.u2_min_6,self.u2_max_6],
[self.u2_min_7,self.u2_max_7],[self.u2_min_8,self.u2_max_8],[self.u2_min_9,self.u2_max_9],
[self.u2_min_10,self.u2_max_10]
]
return ld_u2_bounds_gui
def ld_u3_bounds_gui(self):
ld_u3_bounds_gui = [
[self.u3_min_1,self.u3_max_1],[self.u3_min_2,self.u3_max_2],[self.u3_min_3,self.u3_max_3],
[self.u3_min_4,self.u3_max_4],[self.u3_min_5,self.u3_max_5],[self.u3_min_6,self.u3_max_6],
[self.u3_min_7,self.u3_max_7],[self.u3_min_8,self.u3_max_8],[self.u3_min_9,self.u3_max_9],
[self.u3_min_10,self.u3_max_10]
]
return ld_u3_bounds_gui
def ld_u4_bounds_gui(self):
ld_u4_bounds_gui = [
[self.u4_min_1,self.u4_max_1],[self.u4_min_2,self.u4_max_2],[self.u4_min_3,self.u4_max_3],
[self.u4_min_4,self.u4_max_4],[self.u4_min_5,self.u4_max_5],[self.u4_min_6,self.u4_max_6],
[self.u4_min_7,self.u4_max_7],[self.u4_min_8,self.u4_max_8],[self.u4_min_9,self.u4_max_9],
[self.u4_min_10,self.u4_max_10]
]
return ld_u4_bounds_gui
################# Bounds (Flat Prior) ################
def tra_lintr_bounds_gui(self):
tra_lintr_bounds_gui = [
[self.tra_lin_trend_bound_min_1,self.tra_lin_trend_bound_max_1],
[self.tra_lin_trend_bound_min_2,self.tra_lin_trend_bound_max_2],
[self.tra_lin_trend_bound_min_3,self.tra_lin_trend_bound_max_3],
[self.tra_lin_trend_bound_min_4,self.tra_lin_trend_bound_max_4],
[self.tra_lin_trend_bound_min_5,self.tra_lin_trend_bound_max_5],
[self.tra_lin_trend_bound_min_6,self.tra_lin_trend_bound_max_6],
[self.tra_lin_trend_bound_min_7,self.tra_lin_trend_bound_max_7],
[self.tra_lin_trend_bound_min_8,self.tra_lin_trend_bound_max_8],
[self.tra_lin_trend_bound_min_9,self.tra_lin_trend_bound_max_9],
[self.tra_lin_trend_bound_min_10,self.tra_lin_trend_bound_max_10]
]
return tra_lintr_bounds_gui
def tra_quadtr_bounds_gui(self):
tra_quadtr_bounds_gui = [
[self.tra_quad_trend_bound_min_1,self.tra_quad_trend_bound_max_1],
[self.tra_quad_trend_bound_min_2,self.tra_quad_trend_bound_max_2],
[self.tra_quad_trend_bound_min_3,self.tra_quad_trend_bound_max_3],
[self.tra_quad_trend_bound_min_4,self.tra_quad_trend_bound_max_4],
[self.tra_quad_trend_bound_min_5,self.tra_quad_trend_bound_max_5],
[self.tra_quad_trend_bound_min_6,self.tra_quad_trend_bound_max_6],
[self.tra_quad_trend_bound_min_7,self.tra_quad_trend_bound_max_7],
[self.tra_quad_trend_bound_min_8,self.tra_quad_trend_bound_max_8],
[self.tra_quad_trend_bound_min_9,self.tra_quad_trend_bound_max_9],
[self.tra_quad_trend_bound_min_10,self.tra_quad_trend_bound_max_10]
]
return tra_quadtr_bounds_gui
################# Normal Prior ################
def param_nr_priors_gui(self):
param_nr_priors_gui = [
[self.K_mean_1,self.K_sigma_1,self.use_K_norm_pr_1],[self.P_mean_1,self.P_sigma_1,self.use_P_norm_pr_1], [self.e_mean_1,self.e_sigma_1,self.use_e_norm_pr_1],[self.om_mean_1,self.om_sigma_1,self.use_om_norm_pr_1], [self.ma_mean_1,self.ma_sigma_1,self.use_ma_norm_pr_1],[self.incl_mean_1,self.incl_sigma_1,self.use_incl_norm_pr_1], [self.Omega_mean_1,self.Omega_sigma_1, self.use_Omega_norm_pr_1],[self.t0_mean_1,self.t0_sigma_1, self.use_t0_norm_pr_1],[self.pl_rad_mean_1,self.pl_rad_sigma_1,self.use_pl_rad_norm_pr_1],[self.a_sol_mean_1,self.a_sol_sigma_1,self.use_a_sol_norm_pr_1],
[self.K_mean_2,self.K_sigma_2,self.use_K_norm_pr_2],[self.P_mean_2,self.P_sigma_2,self.use_P_norm_pr_2], [self.e_mean_2,self.e_sigma_2,self.use_e_norm_pr_2],[self.om_mean_2,self.om_sigma_2,self.use_om_norm_pr_2], [self.ma_mean_2,self.ma_sigma_2,self.use_ma_norm_pr_2],[self.incl_mean_2,self.incl_sigma_2,self.use_incl_norm_pr_2], [self.Omega_mean_2,self.Omega_sigma_2, self.use_Omega_norm_pr_2],[self.t0_mean_2,self.t0_sigma_2, self.use_t0_norm_pr_2],[self.pl_rad_mean_2,self.pl_rad_sigma_2,self.use_pl_rad_norm_pr_2],[self.a_sol_mean_2,self.a_sol_sigma_2,self.use_a_sol_norm_pr_2],
[self.K_mean_3,self.K_sigma_3,self.use_K_norm_pr_3],[self.P_mean_3,self.P_sigma_3,self.use_P_norm_pr_3], [self.e_mean_3,self.e_sigma_3,self.use_e_norm_pr_3],[self.om_mean_3,self.om_sigma_3,self.use_om_norm_pr_3], [self.ma_mean_3,self.ma_sigma_3,self.use_ma_norm_pr_3],[self.incl_mean_3,self.incl_sigma_3,self.use_incl_norm_pr_3], [self.Omega_mean_3,self.Omega_sigma_3, self.use_Omega_norm_pr_3],[self.t0_mean_3,self.t0_sigma_3, self.use_t0_norm_pr_3],[self.pl_rad_mean_3,self.pl_rad_sigma_3,self.use_pl_rad_norm_pr_3],[self.a_sol_mean_3,self.a_sol_sigma_3,self.use_a_sol_norm_pr_3],
[self.K_mean_4,self.K_sigma_4,self.use_K_norm_pr_4],[self.P_mean_4,self.P_sigma_4,self.use_P_norm_pr_4], [self.e_mean_4,self.e_sigma_4,self.use_e_norm_pr_4],[self.om_mean_4,self.om_sigma_4,self.use_om_norm_pr_4], [self.ma_mean_4,self.ma_sigma_4,self.use_ma_norm_pr_4],[self.incl_mean_4,self.incl_sigma_4,self.use_incl_norm_pr_4], [self.Omega_mean_4,self.Omega_sigma_4, self.use_Omega_norm_pr_4],[self.t0_mean_4,self.t0_sigma_4, self.use_t0_norm_pr_4],[self.pl_rad_mean_4,self.pl_rad_sigma_4,self.use_pl_rad_norm_pr_4],[self.a_sol_mean_4,self.a_sol_sigma_4,self.use_a_sol_norm_pr_4],
[self.K_mean_5,self.K_sigma_5,self.use_K_norm_pr_5],[self.P_mean_5,self.P_sigma_5,self.use_P_norm_pr_5], [self.e_mean_5,self.e_sigma_5,self.use_e_norm_pr_5],[self.om_mean_5,self.om_sigma_5,self.use_om_norm_pr_5], [self.ma_mean_5,self.ma_sigma_5,self.use_ma_norm_pr_5],[self.incl_mean_5,self.incl_sigma_5,self.use_incl_norm_pr_5], [self.Omega_mean_5,self.Omega_sigma_5, self.use_Omega_norm_pr_5],[self.t0_mean_5,self.t0_sigma_5, self.use_t0_norm_pr_5],[self.pl_rad_mean_5,self.pl_rad_sigma_5,self.use_pl_rad_norm_pr_5],[self.a_sol_mean_5,self.a_sol_sigma_5,self.use_a_sol_norm_pr_5],
[self.K_mean_6,self.K_sigma_6,self.use_K_norm_pr_6],[self.P_mean_6,self.P_sigma_6,self.use_P_norm_pr_6], [self.e_mean_6,self.e_sigma_6,self.use_e_norm_pr_6],[self.om_mean_6,self.om_sigma_6,self.use_om_norm_pr_6], [self.ma_mean_6,self.ma_sigma_6,self.use_ma_norm_pr_6],[self.incl_mean_6,self.incl_sigma_6,self.use_incl_norm_pr_6], [self.Omega_mean_6,self.Omega_sigma_6, self.use_Omega_norm_pr_6],[self.t0_mean_6,self.t0_sigma_6, self.use_t0_norm_pr_6],[self.pl_rad_mean_6,self.pl_rad_sigma_6,self.use_pl_rad_norm_pr_6],[self.a_sol_mean_6,self.a_sol_sigma_6,self.use_a_sol_norm_pr_6],
[self.K_mean_7,self.K_sigma_7,self.use_K_norm_pr_7],[self.P_mean_7,self.P_sigma_7,self.use_P_norm_pr_7], [self.e_mean_7,self.e_sigma_7,self.use_e_norm_pr_7],[self.om_mean_7,self.om_sigma_7,self.use_om_norm_pr_7], [self.ma_mean_7,self.ma_sigma_7,self.use_ma_norm_pr_7],[self.incl_mean_7,self.incl_sigma_7,self.use_incl_norm_pr_7], [self.Omega_mean_7,self.Omega_sigma_7, self.use_Omega_norm_pr_7],[self.t0_mean_7,self.t0_sigma_7, self.use_t0_norm_pr_7],[self.pl_rad_mean_7,self.pl_rad_sigma_7,self.use_pl_rad_norm_pr_7],[self.a_sol_mean_7,self.a_sol_sigma_7,self.use_a_sol_norm_pr_7],
[self.K_mean_8,self.K_sigma_8,self.use_K_norm_pr_8],[self.P_mean_8,self.P_sigma_8,self.use_P_norm_pr_8], [self.e_mean_8,self.e_sigma_8,self.use_e_norm_pr_8],[self.om_mean_8,self.om_sigma_8,self.use_om_norm_pr_8], [self.ma_mean_8,self.ma_sigma_8,self.use_ma_norm_pr_8],[self.incl_mean_8,self.incl_sigma_8,self.use_incl_norm_pr_8], [self.Omega_mean_8,self.Omega_sigma_8, self.use_Omega_norm_pr_8],[self.t0_mean_8,self.t0_sigma_8, self.use_t0_norm_pr_8],[self.pl_rad_mean_8,self.pl_rad_sigma_8,self.use_pl_rad_norm_pr_8],[self.a_sol_mean_8,self.a_sol_sigma_8,self.use_a_sol_norm_pr_8],
[self.K_mean_9,self.K_sigma_9,self.use_K_norm_pr_9],[self.P_mean_9,self.P_sigma_9,self.use_P_norm_pr_9], [self.e_mean_9,self.e_sigma_9,self.use_e_norm_pr_9],[self.om_mean_9,self.om_sigma_9,self.use_om_norm_pr_9], [self.ma_mean_9,self.ma_sigma_9,self.use_ma_norm_pr_9],[self.incl_mean_9,self.incl_sigma_9,self.use_incl_norm_pr_9], [self.Omega_mean_9,self.Omega_sigma_9, self.use_Omega_norm_pr_9],[self.t0_mean_9,self.t0_sigma_9, self.use_t0_norm_pr_9],[self.pl_rad_mean_9,self.pl_rad_sigma_9,self.use_pl_rad_norm_pr_9],[self.a_sol_mean_9,self.a_sol_sigma_9,self.use_a_sol_norm_pr_9],
]
return param_nr_priors_gui
def offset_nr_priors_gui(self):
offset_nr_priors_gui = [
[self.RV_Data_mean_1,self.RV_Data_sigma_1,self.use_rvoff_nr_1],
[self.RV_Data_mean_2,self.RV_Data_sigma_2,self.use_rvoff_nr_2],
[self.RV_Data_mean_3,self.RV_Data_sigma_3,self.use_rvoff_nr_3],
[self.RV_Data_mean_4,self.RV_Data_sigma_4,self.use_rvoff_nr_4],
[self.RV_Data_mean_5,self.RV_Data_sigma_5,self.use_rvoff_nr_5],
[self.RV_Data_mean_6,self.RV_Data_sigma_6,self.use_rvoff_nr_6],
[self.RV_Data_mean_7,self.RV_Data_sigma_7,self.use_rvoff_nr_7],
[self.RV_Data_mean_8,self.RV_Data_sigma_8,self.use_rvoff_nr_8],
[self.RV_Data_mean_9,self.RV_Data_sigma_9,self.use_rvoff_nr_9],
[self.RV_Data_mean_10,self.RV_Data_sigma_10,self.use_rvoff_nr_10]
]
return offset_nr_priors_gui
def jitter_nr_priors_gui(self):
jitter_nr_priors_gui = [
[self.RV_jitter_mean_1,self.RV_jitter_sigma_1,self.use_rvjitt_nr_1],
[self.RV_jitter_mean_2,self.RV_jitter_sigma_2,self.use_rvjitt_nr_2],
[self.RV_jitter_mean_3,self.RV_jitter_sigma_3,self.use_rvjitt_nr_3],
[self.RV_jitter_mean_4,self.RV_jitter_sigma_4,self.use_rvjitt_nr_4],
[self.RV_jitter_mean_5,self.RV_jitter_sigma_5,self.use_rvjitt_nr_5],
[self.RV_jitter_mean_6,self.RV_jitter_sigma_6,self.use_rvjitt_nr_6],
[self.RV_jitter_mean_7,self.RV_jitter_sigma_7,self.use_rvjitt_nr_7],
[self.RV_jitter_mean_8,self.RV_jitter_sigma_8,self.use_rvjitt_nr_8],
[self.RV_jitter_mean_9,self.RV_jitter_sigma_9,self.use_rvjitt_nr_9],
[self.RV_jitter_mean_10,self.RV_jitter_sigma_10,self.use_rvjitt_nr_10]
]
return jitter_nr_priors_gui
def offset_nr_priors_gui_tra(self):
offset_nr_priors_gui_tra = [
[self.tra_Data_mean_1,self.tra_Data_sigma_1,self.use_traoff_nr_1],
[self.tra_Data_mean_2,self.tra_Data_sigma_2,self.use_traoff_nr_2],
[self.tra_Data_mean_3,self.tra_Data_sigma_3,self.use_traoff_nr_3],
[self.tra_Data_mean_4,self.tra_Data_sigma_4,self.use_traoff_nr_4],
[self.tra_Data_mean_5,self.tra_Data_sigma_5,self.use_traoff_nr_5],
[self.tra_Data_mean_6,self.tra_Data_sigma_6,self.use_traoff_nr_6],
[self.tra_Data_mean_7,self.tra_Data_sigma_7,self.use_traoff_nr_7],
[self.tra_Data_mean_8,self.tra_Data_sigma_8,self.use_traoff_nr_8],
[self.tra_Data_mean_9,self.tra_Data_sigma_9,self.use_traoff_nr_9],
[self.tra_Data_mean_10,self.tra_Data_sigma_10,self.use_traoff_nr_10]
]
return offset_nr_priors_gui_tra
def jitter_nr_priors_gui_tra(self):
jitter_nr_priors_gui_tra = [
[self.tra_jitter_mean_1,self.tra_jitter_sigma_1,self.use_trajitt_nr_1],
[self.tra_jitter_mean_2,self.tra_jitter_sigma_2,self.use_trajitt_nr_2],
[self.tra_jitter_mean_3,self.tra_jitter_sigma_3,self.use_trajitt_nr_3],
[self.tra_jitter_mean_4,self.tra_jitter_sigma_4,self.use_trajitt_nr_4],
[self.tra_jitter_mean_5,self.tra_jitter_sigma_5,self.use_trajitt_nr_5],
[self.tra_jitter_mean_6,self.tra_jitter_sigma_6,self.use_trajitt_nr_6],
[self.tra_jitter_mean_7,self.tra_jitter_sigma_7,self.use_trajitt_nr_7],
[self.tra_jitter_mean_8,self.tra_jitter_sigma_8,self.use_trajitt_nr_8],
[self.tra_jitter_mean_9,self.tra_jitter_sigma_9,self.use_trajitt_nr_9],
[self.tra_jitter_mean_10,self.tra_jitter_sigma_10,self.use_trajitt_nr_10]
]
return jitter_nr_priors_gui_tra
def tra_lin_trend_nr_priors_gui(self):
tra_lin_trend_nr_priors_gui = [
[self.tra_lin_trend_mean_1,self.tra_lin_trend_sigma_1,self.use_tra_lin_mean_nr_1],
[self.tra_lin_trend_mean_2,self.tra_lin_trend_sigma_2,self.use_tra_lin_mean_nr_2],
[self.tra_lin_trend_mean_3,self.tra_lin_trend_sigma_3,self.use_tra_lin_mean_nr_3],
[self.tra_lin_trend_mean_4,self.tra_lin_trend_sigma_4,self.use_tra_lin_mean_nr_4],
[self.tra_lin_trend_mean_5,self.tra_lin_trend_sigma_5,self.use_tra_lin_mean_nr_5],
[self.tra_lin_trend_mean_6,self.tra_lin_trend_sigma_6,self.use_tra_lin_mean_nr_6],
[self.tra_lin_trend_mean_7,self.tra_lin_trend_sigma_7,self.use_tra_lin_mean_nr_7],
[self.tra_lin_trend_mean_8,self.tra_lin_trend_sigma_8,self.use_tra_lin_mean_nr_8],
[self.tra_lin_trend_mean_9,self.tra_lin_trend_sigma_9,self.use_tra_lin_mean_nr_9],
[self.tra_lin_trend_mean_10,self.tra_lin_trend_sigma_10,self.use_tra_lin_mean_nr_10]
]
return tra_lin_trend_nr_priors_gui
def tra_quad_trend_nr_priors_gui(self):
tra_quad_trend_nr_priors_gui = [
[self.tra_quad_trend_mean_1,self.tra_quad_trend_sigma_1,self.use_tra_quad_mean_nr_1],
[self.tra_quad_trend_mean_2,self.tra_quad_trend_sigma_2,self.use_tra_quad_mean_nr_2],
[self.tra_quad_trend_mean_3,self.tra_quad_trend_sigma_3,self.use_tra_quad_mean_nr_3],
[self.tra_quad_trend_mean_4,self.tra_quad_trend_sigma_4,self.use_tra_quad_mean_nr_4],
[self.tra_quad_trend_mean_5,self.tra_quad_trend_sigma_5,self.use_tra_quad_mean_nr_5],
[self.tra_quad_trend_mean_6,self.tra_quad_trend_sigma_6,self.use_tra_quad_mean_nr_6],
[self.tra_quad_trend_mean_7,self.tra_quad_trend_sigma_7,self.use_tra_quad_mean_nr_7],
[self.tra_quad_trend_mean_8,self.tra_quad_trend_sigma_8,self.use_tra_quad_mean_nr_8],
[self.tra_quad_trend_mean_9,self.tra_quad_trend_sigma_9,self.use_tra_quad_mean_nr_9],
[self.tra_quad_trend_mean_10,self.tra_quad_trend_sigma_10,self.use_tra_quad_mean_nr_10]
]
return tra_quad_trend_nr_priors_gui
################# Jeff Prior ################
def param_jeff_priors_gui(self):
param_jeff_priors_gui = [
[self.K_jeff_alpha_1,self.K_jeff_beta_1,self.use_K_jeff_pr_1],[self.P_jeff_alpha_1,self.P_jeff_beta_1,self.use_P_jeff_pr_1], [self.e_jeff_alpha_1,self.e_jeff_beta_1,self.use_e_jeff_pr_1],[self.om_jeff_alpha_1,self.om_jeff_beta_1,self.use_om_jeff_pr_1], [self.ma_jeff_alpha_1,self.ma_jeff_beta_1,self.use_ma_jeff_pr_1],[self.incl_jeff_alpha_1,self.incl_jeff_beta_1,self.use_incl_jeff_pr_1], [self.Omega_jeff_alpha_1,self.Omega_jeff_beta_1, self.use_Omega_jeff_pr_1],[self.t0_jeff_alpha_1,self.t0_jeff_beta_1, self.use_t0_jeff_pr_1],[self.pl_rad_jeff_alpha_1,self.pl_rad_jeff_beta_1,self.use_pl_rad_jeff_pr_1],[self.a_sol_jeff_alpha_1,self.a_sol_jeff_beta_1,self.use_a_sol_jeff_pr_1],
[self.K_jeff_alpha_2,self.K_jeff_beta_2,self.use_K_jeff_pr_2],[self.P_jeff_alpha_2,self.P_jeff_beta_2,self.use_P_jeff_pr_2], [self.e_jeff_alpha_2,self.e_jeff_beta_2,self.use_e_jeff_pr_2],[self.om_jeff_alpha_2,self.om_jeff_beta_2,self.use_om_jeff_pr_2], [self.ma_jeff_alpha_2,self.ma_jeff_beta_2,self.use_ma_jeff_pr_2],[self.incl_jeff_alpha_2,self.incl_jeff_beta_2,self.use_incl_jeff_pr_2], [self.Omega_jeff_alpha_2,self.Omega_jeff_beta_2, self.use_Omega_jeff_pr_2],[self.t0_jeff_alpha_2,self.t0_jeff_beta_2, self.use_t0_jeff_pr_2],[self.pl_rad_jeff_alpha_2,self.pl_rad_jeff_beta_2,self.use_pl_rad_jeff_pr_2],[self.a_sol_jeff_alpha_2,self.a_sol_jeff_beta_2,self.use_a_sol_jeff_pr_2],
[self.K_jeff_alpha_3,self.K_jeff_beta_3,self.use_K_jeff_pr_3],[self.P_jeff_alpha_3,self.P_jeff_beta_3,self.use_P_jeff_pr_3], | |
<gh_stars>1-10
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from scipy import stats
import lightgbm as lgb
from datetime import datetime
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.foreignexchange import ForeignExchange
import os.path
import time
def halflife(yp):
yp_diff = yp-yp.shift()
delta_y = yp_diff[1:]
y_lag = yp[:-1]
regress = stats.linregress(y_lag, delta_y)
return -np.log(2)/regress[0]
def wwma(values, n):
"""
<NAME>'s EMA
"""
return values.ewm(alpha=1/n, adjust=False).mean()
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat, end='\r')
time.sleep(1)
t -= 1
if t == 0:
mins, secs = divmod(t, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat, end='\r', flush = True)
return
def atr(df, symbol, n=14):
df_symbol = df.loc[df.index.get_level_values('symbol') == symbol]
high = df_symbol['high']
low = df_symbol['low']
close = df_symbol['close']
df_symbol['tr0'] = abs(high - low)
df_symbol['tr1'] = abs(high - close.shift(1))
df_symbol['tr2'] = abs(low - close.shift(1))
tr = df_symbol[['tr0', 'tr1', 'tr2']].max(axis=1)
atr = wwma(tr, n)
return atr
def momentum_score(ts):
x = np.arange(len(ts))
log_ts = np.log(ts)
#regress = stats.linregress(x, log_ts)
mask = ~np.isnan(x) & ~np.isnan(log_ts)
regress = stats.linregress(x[mask], log_ts[mask])
annualized_slope = (np.power(np.exp(regress[0]), 252) -1) * 100
return annualized_slope * (regress[2] ** 2)
# def momentum(closes):
# returns = np.log(closes)
# x = np.arange(len(returns))
# mask = ~np.isnan(x) & ~np.isnan(returns)
# slope, _, rvalue, _, _ = stats.linregress(x[mask], returns[mask])
# return ((1 + slope) ** 252) * (rvalue ** 2) # annualize slope and multiply by R^2
def mmi(closes):
m = np.median(closes)
nh=0
nl=0
for i in range(1, len(closes)):
if closes[i] > m and closes[i] > closes[i-1]:
nl+=1
elif closes[i] < m and closes[i] < closes[i-1]:
nh+=1
return (nl+nh)/(len(closes)-1)
def save_csv(df, path, filename):
df.to_csv(path+filename, date_format='%Y-%m-%d %H:%M:%S')
def load_csv(path, filename):
df = pd.read_csv(path+filename, parse_dates = True)
df['date'] = df['date'].astype('datetime64[ns]')
df = df.set_index(['date','symbol'])
#df = df.loc[df['close'] != 0]
return df
def load_FX_csv(path, filename):
df = pd.read_csv(path+filename, parse_dates = True)
df['date'] = df['date'].astype('datetime64[ns]')
df = df.set_index('date')
#df = df.loc[df['close'] != 0]
return df
def load_portfo_csv(path, filename):
portfolio = pd.read_csv(path+filename, parse_dates = True)
# cols = ['date', 'date_close','symbol','position','cost','target_lower','target_upper']
# portfolio['symbol'] = portfolio['symbol'].astype('str')
# portfolio['date'] = portfolio['date'].astype('datetime64[ns]')
# portfolio['date_close'] = portfolio['date_close'].astype('datetime64[ns]')
# portfolio = portfolio[cols]
#portfolio = pd.read_csv(path_pc+'portfolio.csv', parse_dates = True)
portfolio['symbol'] = portfolio['symbol'].astype('str')
portfolio['date'] = portfolio['date'].astype('datetime64[ns]')
portfolio['date_close'] = portfolio['date_close'].astype('datetime64[ns]')
portfolio.drop(columns=['Unnamed: 0'], inplace=True)
return portfolio
def load_log_csv(path, filename):
log = pd.read_csv(path+filename, parse_dates = True)
cols = ['01) date_buy', '02) date_sell','03) symbol','04) position','05) price','06) amount','07) BOT/SLD']
log['01) date_buy'] = log['01) date_buy'].astype('datetime64[ns]')
log['02) date_sell'] = log['02) date_sell'].astype('datetime64[ns]')
log = log[cols]
return log
def get_symbols(symbols, key, outputsize='compact', adjusted=False, skipped_symbols=[]):
ts = TimeSeries(key, output_format='pandas')
out = pd.DataFrame()
if adjusted == True:
func = ts.get_daily_adjusted
cols = ['open','high','low','close', 'adjusted_close', 'volume', 'dividend_amount', 'split_coefficient']
else:
func = ts.get_daily
cols = ['open','high','low','close','volume']
for symbol in symbols:
if symbol in skipped_symbols:
print ('Skipping {} as instructed.'.format(symbol))
continue
else:
print('Trying to download ', symbol)
while True:
try:
df, meta = func(symbol=symbol, outputsize=outputsize)
except ValueError as e:
print('*')
print('* Valueerror from Alpha Vantage: ', e)
if 'Invalid API call' in str(e):
print('Symbol {} not available on Alpha Vantage. Skippping it.'.format(symbol))
break
elif 'Thank' in str(e):
print('API call frequency exceeded as advised by Alpha Vantage. Wait for a minute and try again.')
countdown(60)
print()
else:
df.columns = cols
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
df.reset_index(level=0, inplace=True)
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
break
return out.sort_index()
def get_symbols_intraday(symbols, key, outputsize='full'):
ts = TimeSeries(key, output_format='pandas')
out = pd.DataFrame()
for symbol in symbols:
df, meta = ts.get_intraday(symbol=symbol, interval='1min', outputsize=outputsize)
df.columns = ['open','high','low','close','volume'] #my convention: always lowercase
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
#df = df.set_index(['symbol'])
df.reset_index(level=0, inplace=True)
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
return out.sort_index()
def get_FX_symbols_intraday(symbols, key, outputsize='full'):
fe = ForeignExchange(key, output_format='pandas')
out = pd.DataFrame()
for symbol in symbols:
print('Trying to download ', symbol)
while True:
try:
df, meta = fe.get_currency_exchange_intraday(from_symbol=symbol[0:3], to_symbol=symbol[4:], interval='1min', outputsize=outputsize)
except ValueError as e:
print('*')
print('* Valueerror from Alpha Vantage: ', e)
if 'Invalid API call' in str(e):
print('Symbol {} not available on Alpha Vantage. Skippping it.'.format(symbol))
break
elif 'Thank' in str(e):
print('API call frequency exceeded as advised by Alpha Vantage. Wait for a minute and try again.')
countdown(60)
print()
else:
df.columns = ['open','high','low','close']
df['symbol'] = symbol[0:3]+'.'+ symbol[4:]
df.reset_index(level=0, inplace=True)
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
break
return out.sort_index()
def get_FX_symbols_daily(symbols, key, outputsize='full'):
fe = ForeignExchange(key, output_format='pandas')
out = pd.DataFrame()
for symbol in symbols:
print('Trying to download ', symbol)
while True:
try:
df, meta = fe.get_currency_exchange_daily(from_symbol=symbol[0:3], to_symbol=symbol[4:], outputsize=outputsize)
except ValueError as e:
print('*')
print('* Valueerror from Alpha Vantage: ', e)
if 'Invalid API call' in str(e):
print('Symbol {} not available on Alpha Vantage. Skippping it.'.format(symbol))
break
elif 'Thank' in str(e):
print('API call frequency exceeded as advised by Alpha Vantage. Wait for a minute and try again.')
countdown(60)
print()
else:
df.columns = ['open','high','low','close']
df['symbol'] = symbol[0:3]+'.'+ symbol[4:]
df.reset_index(level=0, inplace=True)
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
break
return out.sort_index()
def date_query (df, begin, end):
return df[(df.index.get_level_values('date')>= begin) &
(df.index.get_level_values('date')<= end)]
#binary switch - log return > 90% quantile will be 1, otherwise 0
def switch_upper(ts, upper_threshold):
result = ts.copy()
for i in range(len(ts)):
if ts[i] >= upper_threshold:
result[i] = 1
else: result[i] = 0
return result
#binary switch - log return < 10% quantile will be 1, otherwise 0
def switch_lower(ts, lower_threshold):
result = ts.copy()
for i in range(len(ts)):
if ts[i] <= lower_threshold:
result[i] = 1
else: result[i] = 0
return result
from sklearn.metrics import f1_score, precision_score
def lgb_f1_score(y_hat, data):
y_true = data.get_label()
y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'f1', f1_score(y_true, y_hat), True
def lgb_precision_score(y_hat, data):
y_true = data.get_label()
y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'precision_score', precision_score(y_true, y_hat), True
def to_days(days):
return pd.Timedelta('{} days'.format(str(days)))
def class_switch_binary(y_valid, y_pred, prob_threshold):
result = []
for prob in y_pred:
if prob > float(prob_threshold):
result.append(1)
else: result.append(0)
result_df = y_valid.copy()
result_df = result_df.to_frame()
#result_df.reset_index(level=0, inplace=True)
result_df['pred'] = result
return result_df['pred']
# def train_valid_test_split(df, start_date, start_date_valid, start_date_test, end_date_test):
# X_y_train = df[start_date : start_date_valid - pd.Timedelta('1 day')]
# X_y_valid = df[start_date_valid: start_date_test - pd.Timedelta('1 day')]
# X_y_test = df[start_date_test: end_date_test]
# return X_y_train, X_y_valid, X_y_test
def train_valid_test_split(df, start_date, start_date_valid, end_date_valid, start_date_test, end_date_test):
X_y_train = df[start_date : start_date_valid]
X_y_valid = df[start_date_valid + pd.Timedelta('1 day'): end_date_valid]
X_y_test = df[start_date_test + pd.Timedelta('1 day'): end_date_test]
return X_y_train, X_y_valid, X_y_test
def train_valid_split(df, start_date, start_date_valid, end_date_valid):
X_y_train = df[start_date : start_date_valid]
X_y_valid = df[start_date_valid + pd.Timedelta('1 day'): end_date_valid]
return X_y_train, X_y_valid
def add_target_upper(X_y_train, X_y_valid, X_y_test, q_upper, target_col, return_col):
upper_threshold = X_y_train[return_col].quantile(q=q_upper)
X_y_train[target_col] = switch_upper(X_y_train[return_col], upper_threshold)
X_y_valid[target_col] = switch_upper(X_y_valid[return_col], upper_threshold)
X_y_test[target_col] = switch_upper(X_y_test[return_col], upper_threshold)
return X_y_train, X_y_valid, X_y_test
def add_target_upper_notest(X_y_train, X_y_valid, q_upper, target_col, return_col):
upper_threshold = X_y_train[return_col].quantile(q=q_upper)
print("upper_threshold: ", upper_threshold)
X_y_train[target_col] = switch_upper(X_y_train[return_col], upper_threshold)
X_y_valid[target_col] = switch_upper(X_y_valid[return_col], upper_threshold)
return X_y_train, X_y_valid
def add_target_lower_notest(X_y_train, X_y_valid, q_lower, target_col, return_col):
lower_threshold = X_y_train[return_col].quantile(q=q_lower)
print("lower_threshold: ", lower_threshold)
X_y_train[target_col] = switch_lower(X_y_train[return_col], lower_threshold)
X_y_valid[target_col] = switch_lower(X_y_valid[return_col], lower_threshold)
return X_y_train, X_y_valid
def add_target_lower(X_y_train, X_y_valid, X_y_test, q_lower, target_col, return_col):
lower_threshold = X_y_train[return_col].quantile(q=q_lower)
X_y_train[target_col] = switch_lower(X_y_train[return_col], lower_threshold)
X_y_valid[target_col] = switch_lower(X_y_valid[return_col], lower_threshold)
X_y_test[target_col] = switch_lower(X_y_test[return_col], lower_threshold)
return X_y_train, X_y_valid, X_y_test
def downsample(X_y_train, target_col, test_ratio, random_seed):
df_positive = X_y_train.loc[X_y_train[target_col]==1]
df_negative = X_y_train.loc[X_y_train[target_col]==0]
df_negative_bigger, df_negative_downsampled = train_test_split(df_negative,
test_size=test_ratio, random_state=random_seed)
X_y_train_resampled = pd.concat([df_positive, df_negative_downsampled])
X_y_train_resampled = X_y_train_resampled.sort_index()
return X_y_train_resampled
def downsample_3class(X_y_train, target_col, random_seed):
class_list = [1,0,-1]
tuple = (len(X_y_train.loc[X_y_train[target_col] == 1]),len(X_y_train.loc[X_y_train[target_col] == 0]),
len(X_y_train.loc[X_y_train[target_col] == -1]))
lowest_n_class = class_list[tuple.index(min(tuple))]
class_list.pop(tuple.index(min(tuple)))
df_keep = X_y_train.loc[X_y_train[target_col] == lowest_n_class]
X_y_train_resampled = df_keep.copy()
for class_label in class_list:
df_to_downsample = X_y_train.loc[X_y_train[target_col] == class_label]
test_ratio = len(df_keep)/len(df_to_downsample)
df_to_downsample_bigger, df_downsampled = train_test_split(df_to_downsample,
test_size=test_ratio, random_state=random_seed)
X_y_train_resampled = pd.concat([X_y_train_resampled, df_downsampled])
X_y_train_resampled = X_y_train_resampled.sort_index()
return X_y_train_resampled
def downsample_positive(X_y_train, target_col, test_ratio, random_seed):
df_positive = X_y_train.loc[X_y_train[target_col]==1]
df_negative = X_y_train.loc[X_y_train[target_col]==0]
df_positive_bigger, df_positive_downsampled = train_test_split(df_positive,
test_size=test_ratio, random_state=random_seed)
X_y_train_resampled = pd.concat([df_negative, df_positive_downsampled])
X_y_train_resampled = X_y_train_resampled.sort_index()
return X_y_train_resampled
def feature_target_split(df, features_cols, target_col):
X_train = df[features_cols]
y_train = df[target_col]
return X_train, y_train
def knn_train(X_train, y_train, X_valid, y_valid, X_valid_close, p_range, leaf_size_range, n_neighbors_range, return_col_actual, prob_threshold = 0.7, sign = 1):
max_total_gain = float("-inf")
max_auc = float("-inf")
#max_precision_total_gain = float("-inf")
for p in p_range:
| |
<reponame>ru-gh/Password-Manager
import sqlite3
import pathlib
from PyQt5 import QtCore, QtGui, QtWidgets
s = None
accounts = pathlib.Path("Accounts.db")
user = pathlib.Path("User.db")
lam = [[user, 0], [accounts, 1]]
for file in lam:
print(file[0])
if file[0].exists():
print("File exist")
elif file[1] == 0:
print("check")
print("Creating User.db")
conn = sqlite3.connect('User.db')
c = conn.cursor()
c.execute("""CREATE TABLE security(ID INTEGER PRIMARY KEY,User string NOT NULL,Hash string NOT NULL,Topt string)""")
conn.commit()
conn.close()
else:
print("Creating Accounts.db")
conn = sqlite3.connect('Accounts.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE accounts(ID INTEGER PRIMARY KEY,Account string NOT NULL,User string NOT NULL,Hash string
NOT NULL,Date string NOT NULL,security_ID INTEGER)""")
conn.commit()
conn.close()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(881, 553)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(11, 11, 581, 344))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(4)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(2, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(3, 3, item)
self.grp_box_pgen = QtWidgets.QGroupBox(self.centralwidget)
self.grp_box_pgen.setGeometry(QtCore.QRect(10, 370, 426, 119))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.grp_box_pgen.sizePolicy().hasHeightForWidth())
self.grp_box_pgen.setSizePolicy(sizePolicy)
self.grp_box_pgen.setMinimumSize(QtCore.QSize(0, 111))
self.grp_box_pgen.setAutoFillBackground(False)
self.grp_box_pgen.setFlat(False)
self.grp_box_pgen.setObjectName("grp_box_pgen")
self.gridLayout_2 = QtWidgets.QGridLayout(self.grp_box_pgen)
self.gridLayout_2.setObjectName("gridLayout_2")
self.lbl_slen_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_slen_pgen.setObjectName("lbl_slen_pgen")
self.gridLayout_2.addWidget(self.lbl_slen_pgen, 0, 0, 1, 1)
self.tbox_slen_pgen = QtWidgets.QLineEdit(self.grp_box_pgen)
self.tbox_slen_pgen.setMinimumSize(QtCore.QSize(0, 20))
self.tbox_slen_pgen.setFrame(True)
self.tbox_slen_pgen.setObjectName("tbox_slen_pgen")
self.gridLayout_2.addWidget(self.tbox_slen_pgen, 0, 1, 1, 2)
self.btn_genpass = QtWidgets.QPushButton(self.grp_box_pgen)
self.btn_genpass.setMinimumSize(QtCore.QSize(100, 23))
self.btn_genpass.setObjectName("btn_genpass")
self.gridLayout_2.addWidget(self.btn_genpass, 0, 3, 1, 1)
self.lbl_genpass_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_genpass_pgen.setObjectName("lbl_genpass_pgen")
self.gridLayout_2.addWidget(self.lbl_genpass_pgen, 1, 0, 1, 1)
self.btn_cpy_pass = QtWidgets.QPushButton(self.grp_box_pgen)
self.btn_cpy_pass.setMinimumSize(QtCore.QSize(80, 23))
self.btn_cpy_pass.setObjectName("btn_cpy_pass")
self.gridLayout_2.addWidget(self.btn_cpy_pass, 2, 1, 1, 1)
self.lbl_warn_pgen = QtWidgets.QLabel(self.grp_box_pgen)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.lbl_warn_pgen.setPalette(palette)
self.lbl_warn_pgen.setFrameShadow(QtWidgets.QFrame.Plain)
self.lbl_warn_pgen.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_pgen.setObjectName("lbl_warn_pgen")
self.gridLayout_2.addWidget(self.lbl_warn_pgen, 2, 2, 1, 2)
self.tbox_genpass_pgen = QtWidgets.QLineEdit(self.grp_box_pgen)
self.tbox_genpass_pgen.setMinimumSize(QtCore.QSize(242, 20))
self.tbox_genpass_pgen.setStyleSheet("")
self.tbox_genpass_pgen.setFrame(True)
self.tbox_genpass_pgen.setObjectName("tbox_genpass_pgen")
self.gridLayout_2.addWidget(self.tbox_genpass_pgen, 1, 1, 1, 3)
self.lbl_showpass_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_showpass_pgen.setEnabled(True)
self.lbl_showpass_pgen.setGeometry(QtCore.QRect(336, 52, 20, 20))
self.lbl_showpass_pgen.setStyleSheet("background-color: transparent;")
self.lbl_showpass_pgen.setText("")
self.lbl_showpass_pgen.setPixmap(QtGui.QPixmap("../../Mega/Downloads/showpass.png"))
self.lbl_showpass_pgen.setScaledContents(True)
self.lbl_showpass_pgen.setObjectName("lbl_showpass_pgen")
self.tbox_genpass_pgen.raise_()
self.lbl_genpass_pgen.raise_()
self.lbl_warn_pgen.raise_()
self.btn_genpass.raise_()
self.lbl_slen_pgen.raise_()
self.tbox_slen_pgen.raise_()
self.btn_cpy_pass.raise_()
self.lbl_showpass_pgen.raise_()
self.btn_logout = QtWidgets.QPushButton(self.centralwidget)
self.btn_logout.setGeometry(QtCore.QRect(460, 370, 126, 23))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_logout.sizePolicy().hasHeightForWidth())
self.btn_logout.setSizePolicy(sizePolicy)
self.btn_logout.setMinimumSize(QtCore.QSize(0, 23))
self.btn_logout.setMaximumSize(QtCore.QSize(126, 16777215))
self.btn_logout.setObjectName("btn_logout")
self.tab_login = QtWidgets.QTabWidget(self.centralwidget)
self.tab_login.setGeometry(QtCore.QRect(600, 10, 265, 341))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tab_login.sizePolicy().hasHeightForWidth())
self.tab_login.setSizePolicy(sizePolicy)
self.tab_login.setMinimumSize(QtCore.QSize(0, 215))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.tab_login.setPalette(palette)
self.tab_login.setAutoFillBackground(False)
self.tab_login.setObjectName("tab_login")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.widget_login = QtWidgets.QWidget(self.tab)
self.widget_login.setGeometry(QtCore.QRect(3, 3, 261, 111))
self.widget_login.setObjectName("widget_login")
self.lbl_user_login = QtWidgets.QLabel(self.widget_login)
self.lbl_user_login.setGeometry(QtCore.QRect(9, 9, 48, 16))
self.lbl_user_login.setObjectName("lbl_user_login")
self.tbox_user_login = QtWidgets.QLineEdit(self.widget_login)
self.tbox_user_login.setGeometry(QtCore.QRect(63, 9, 184, 20))
self.tbox_user_login.setFrame(True)
self.tbox_user_login.setObjectName("tbox_user_login")
self.lbl_pass_login = QtWidgets.QLabel(self.widget_login)
self.lbl_pass_login.setGeometry(QtCore.QRect(9, 35, 46, 16))
self.lbl_pass_login.setObjectName("lbl_pass_login")
self.tbox_pass_login = QtWidgets.QLineEdit(self.widget_login)
self.tbox_pass_login.setGeometry(QtCore.QRect(63, 35, 184, 20))
self.tbox_pass_login.setFrame(True)
self.tbox_pass_login.setObjectName("tbox_pass_login")
self.btn_login = QtWidgets.QPushButton(self.widget_login)
self.btn_login.setGeometry(QtCore.QRect(10, 64, 241, 23))
self.btn_login.setMinimumSize(QtCore.QSize(0, 23))
self.btn_login.setObjectName("btn_login")
self.lbl_warn_login = QtWidgets.QLabel(self.widget_login)
self.lbl_warn_login.setGeometry(QtCore.QRect(10, 87, 241, 16))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_warn_login.sizePolicy().hasHeightForWidth())
self.lbl_warn_login.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.lbl_warn_login.setPalette(palette)
self.lbl_warn_login.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_login.setObjectName("lbl_warn_login")
self.lbl_showpass_login = QtWidgets.QLabel(self.widget_login)
self.lbl_showpass_login.setEnabled(True)
self.lbl_showpass_login.setGeometry(QtCore.QRect(224, 35, 21, 20))
self.lbl_showpass_login.setStyleSheet("background-color: transparent;")
self.lbl_showpass_login.setText("")
self.lbl_showpass_login.setPixmap(QtGui.QPixmap("../../Mega/Downloads/showpass.png"))
self.lbl_showpass_login.setScaledContents(True)
self.lbl_showpass_login.setObjectName("lbl_showpass_login")
self.tab_login.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.widget_signup = QtWidgets.QWidget(self.tab_2)
self.widget_signup.setGeometry(QtCore.QRect(3, 3, 261, 311))
self.widget_signup.setObjectName("widget_signup")
self.lbl_user_signup = QtWidgets.QLabel(self.widget_signup)
self.lbl_user_signup.setGeometry(QtCore.QRect(9, 9, 48, 16))
self.lbl_user_signup.setObjectName("lbl_user_signup")
self.tbox_user_signup = QtWidgets.QLineEdit(self.widget_signup)
self.tbox_user_signup.setGeometry(QtCore.QRect(76, 9, 171, 20))
self.tbox_user_signup.setInputMask("")
self.tbox_user_signup.setFrame(True)
self.tbox_user_signup.setObjectName("tbox_user_signup")
self.lbl_pass_signup = QtWidgets.QLabel(self.widget_signup)
self.lbl_pass_signup.setGeometry(QtCore.QRect(9, 35, 46, 16))
self.lbl_pass_signup.setObjectName("lbl_pass_signup")
self.tbox_pass_signup = QtWidgets.QLineEdit(self.widget_signup)
self.tbox_pass_signup.setGeometry(QtCore.QRect(76, 35, 171, 20))
self.tbox_pass_signup.setInputMask("")
self.tbox_pass_signup.setFrame(True)
self.tbox_pass_signup.setObjectName("tbox_pass_signup")
self.lbl_repass_signup = QtWidgets.QLabel(self.widget_signup)
self.lbl_repass_signup.setGeometry(QtCore.QRect(9, 61, 61, 16))
self.lbl_repass_signup.setObjectName("lbl_repass_signup")
self.tbox_repass_signup = QtWidgets.QLineEdit(self.widget_signup)
self.tbox_repass_signup.setGeometry(QtCore.QRect(76, 61, 171, 20))
self.tbox_repass_signup.setFrame(True)
self.tbox_repass_signup.setObjectName("tbox_repass_signup")
self.chkbox_2fa_signup = QtWidgets.QCheckBox(self.widget_signup)
self.chkbox_2fa_signup.setGeometry(QtCore.QRect(76, 86, 100, 17))
self.chkbox_2fa_signup.setLayoutDirection(QtCore.Qt.LeftToRight)
self.chkbox_2fa_signup.setChecked(True)
self.chkbox_2fa_signup.setTristate(False)
self.chkbox_2fa_signup.setObjectName("chkbox_2fa_signup")
self.btn_signup = QtWidgets.QPushButton(self.widget_signup)
self.btn_signup.setGeometry(QtCore.QRect(9, 264, 241, 23))
self.btn_signup.setCheckable(False)
self.btn_signup.setFlat(False)
self.btn_signup.setObjectName("btn_signup")
self.lbl_warn_signup = QtWidgets.QLabel(self.widget_signup)
self.lbl_warn_signup.setGeometry(QtCore.QRect(8, 287, 241, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.lbl_warn_signup.setPalette(palette)
self.lbl_warn_signup.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_signup.setObjectName("lbl_warn_signup")
self.lbl_qr_signup = QtWidgets.QLabel(self.widget_signup)
self.lbl_qr_signup.setGeometry(QtCore.QRect(55, 107, 150, 150))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_qr_signup.sizePolicy().hasHeightForWidth())
self.lbl_qr_signup.setSizePolicy(sizePolicy)
self.lbl_qr_signup.setMinimumSize(QtCore.QSize(0, 0))
| |
0.00635083268962935*m.x834 + m.x2334 == 0)
m.c1335 = Constraint(expr= - m.x825 - 0.11270166537926*m.x830 - 0.00635083268962935*m.x835 + m.x2335 == 0)
m.c1336 = Constraint(expr= - m.x836 - 0.5*m.x841 - 0.125*m.x846 + m.x2336 == 0)
m.c1337 = Constraint(expr= - m.x837 - 0.5*m.x842 - 0.125*m.x847 + m.x2337 == 0)
m.c1338 = Constraint(expr= - m.x838 - 0.5*m.x843 - 0.125*m.x848 + m.x2338 == 0)
m.c1339 = Constraint(expr= - m.x839 - 0.5*m.x844 - 0.125*m.x849 + m.x2339 == 0)
m.c1340 = Constraint(expr= - m.x840 - 0.5*m.x845 - 0.125*m.x850 + m.x2340 == 0)
m.c1341 = Constraint(expr= - m.x836 - 0.88729833462074*m.x841 - 0.393649167310369*m.x846 + m.x2341 == 0)
m.c1342 = Constraint(expr= - m.x837 - 0.88729833462074*m.x842 - 0.393649167310369*m.x847 + m.x2342 == 0)
m.c1343 = Constraint(expr= - m.x838 - 0.88729833462074*m.x843 - 0.393649167310369*m.x848 + m.x2343 == 0)
m.c1344 = Constraint(expr= - m.x839 - 0.88729833462074*m.x844 - 0.393649167310369*m.x849 + m.x2344 == 0)
m.c1345 = Constraint(expr= - m.x840 - 0.88729833462074*m.x845 - 0.393649167310369*m.x850 + m.x2345 == 0)
m.c1346 = Constraint(expr= - m.x836 - 0.11270166537926*m.x841 - 0.00635083268962935*m.x846 + m.x2346 == 0)
m.c1347 = Constraint(expr= - m.x837 - 0.11270166537926*m.x842 - 0.00635083268962935*m.x847 + m.x2347 == 0)
m.c1348 = Constraint(expr= - m.x838 - 0.11270166537926*m.x843 - 0.00635083268962935*m.x848 + m.x2348 == 0)
m.c1349 = Constraint(expr= - m.x839 - 0.11270166537926*m.x844 - 0.00635083268962935*m.x849 + m.x2349 == 0)
m.c1350 = Constraint(expr= - m.x840 - 0.11270166537926*m.x845 - 0.00635083268962935*m.x850 + m.x2350 == 0)
m.c1351 = Constraint(expr= - m.x851 - 0.5*m.x856 - 0.125*m.x861 + m.x2351 == 0)
m.c1352 = Constraint(expr= - m.x852 - 0.5*m.x857 - 0.125*m.x862 + m.x2352 == 0)
m.c1353 = Constraint(expr= - m.x853 - 0.5*m.x858 - 0.125*m.x863 + m.x2353 == 0)
m.c1354 = Constraint(expr= - m.x854 - 0.5*m.x859 - 0.125*m.x864 + m.x2354 == 0)
m.c1355 = Constraint(expr= - m.x855 - 0.5*m.x860 - 0.125*m.x865 + m.x2355 == 0)
m.c1356 = Constraint(expr= - m.x851 - 0.88729833462074*m.x856 - 0.393649167310369*m.x861 + m.x2356 == 0)
m.c1357 = Constraint(expr= - m.x852 - 0.88729833462074*m.x857 - 0.393649167310369*m.x862 + m.x2357 == 0)
m.c1358 = Constraint(expr= - m.x853 - 0.88729833462074*m.x858 - 0.393649167310369*m.x863 + m.x2358 == 0)
m.c1359 = Constraint(expr= - m.x854 - 0.88729833462074*m.x859 - 0.393649167310369*m.x864 + m.x2359 == 0)
m.c1360 = Constraint(expr= - m.x855 - 0.88729833462074*m.x860 - 0.393649167310369*m.x865 + m.x2360 == 0)
m.c1361 = Constraint(expr= - m.x851 - 0.11270166537926*m.x856 - 0.00635083268962935*m.x861 + m.x2361 == 0)
m.c1362 = Constraint(expr= - m.x852 - 0.11270166537926*m.x857 - 0.00635083268962935*m.x862 + m.x2362 == 0)
m.c1363 = Constraint(expr= - m.x853 - 0.11270166537926*m.x858 - 0.00635083268962935*m.x863 + m.x2363 == 0)
m.c1364 = Constraint(expr= - m.x854 - 0.11270166537926*m.x859 - 0.00635083268962935*m.x864 + m.x2364 == 0)
m.c1365 = Constraint(expr= - m.x855 - 0.11270166537926*m.x860 - 0.00635083268962935*m.x865 + m.x2365 == 0)
m.c1366 = Constraint(expr= - m.x866 - 0.5*m.x871 - 0.125*m.x876 + m.x2366 == 0)
m.c1367 = Constraint(expr= - m.x867 - 0.5*m.x872 - 0.125*m.x877 + m.x2367 == 0)
m.c1368 = Constraint(expr= - m.x868 - 0.5*m.x873 - 0.125*m.x878 + m.x2368 == 0)
m.c1369 = Constraint(expr= - m.x869 - 0.5*m.x874 - 0.125*m.x879 + m.x2369 == 0)
m.c1370 = Constraint(expr= - m.x870 - 0.5*m.x875 - 0.125*m.x880 + m.x2370 == 0)
m.c1371 = Constraint(expr= - m.x866 - 0.88729833462074*m.x871 - 0.393649167310369*m.x876 + m.x2371 == 0)
m.c1372 = Constraint(expr= - m.x867 - 0.88729833462074*m.x872 - 0.393649167310369*m.x877 + m.x2372 == 0)
m.c1373 = Constraint(expr= - m.x868 - 0.88729833462074*m.x873 - 0.393649167310369*m.x878 + m.x2373 == 0)
m.c1374 = Constraint(expr= - m.x869 - 0.88729833462074*m.x874 - 0.393649167310369*m.x879 + m.x2374 == 0)
m.c1375 = Constraint(expr= - m.x870 - 0.88729833462074*m.x875 - 0.393649167310369*m.x880 + m.x2375 == 0)
m.c1376 = Constraint(expr= - m.x866 - 0.11270166537926*m.x871 - 0.00635083268962935*m.x876 + m.x2376 == 0)
m.c1377 = Constraint(expr= - m.x867 - 0.11270166537926*m.x872 - 0.00635083268962935*m.x877 + m.x2377 == 0)
m.c1378 = Constraint(expr= - m.x868 - 0.11270166537926*m.x873 - 0.00635083268962935*m.x878 + m.x2378 == 0)
m.c1379 = Constraint(expr= - m.x869 - 0.11270166537926*m.x874 - 0.00635083268962935*m.x879 + m.x2379 == 0)
m.c1380 = Constraint(expr= - m.x870 - 0.11270166537926*m.x875 - 0.00635083268962935*m.x880 + m.x2380 == 0)
m.c1381 = Constraint(expr= - m.x881 - 0.5*m.x886 - 0.125*m.x891 + m.x2381 == 0)
m.c1382 = Constraint(expr= - m.x882 - 0.5*m.x887 - 0.125*m.x892 + m.x2382 == 0)
m.c1383 = Constraint(expr= - m.x883 - 0.5*m.x888 - 0.125*m.x893 + m.x2383 == 0)
m.c1384 = Constraint(expr= - m.x884 - 0.5*m.x889 - 0.125*m.x894 + m.x2384 == 0)
m.c1385 = Constraint(expr= - m.x885 - 0.5*m.x890 - 0.125*m.x895 + m.x2385 == 0)
m.c1386 = Constraint(expr= - m.x881 - 0.88729833462074*m.x886 - 0.393649167310369*m.x891 + m.x2386 == 0)
m.c1387 = Constraint(expr= - m.x882 - 0.88729833462074*m.x887 - 0.393649167310369*m.x892 + m.x2387 == 0)
m.c1388 = Constraint(expr= - m.x883 - 0.88729833462074*m.x888 - 0.393649167310369*m.x893 + m.x2388 == 0)
m.c1389 = Constraint(expr= - m.x884 - 0.88729833462074*m.x889 - 0.393649167310369*m.x894 + m.x2389 == 0)
m.c1390 = Constraint(expr= - m.x885 - 0.88729833462074*m.x890 - 0.393649167310369*m.x895 + m.x2390 == 0)
m.c1391 = Constraint(expr= - m.x881 - 0.11270166537926*m.x886 - 0.00635083268962935*m.x891 + m.x2391 == 0)
m.c1392 = Constraint(expr= - m.x882 - 0.11270166537926*m.x887 - 0.00635083268962935*m.x892 + m.x2392 == 0)
m.c1393 = Constraint(expr= - m.x883 - 0.11270166537926*m.x888 - 0.00635083268962935*m.x893 + m.x2393 == 0)
m.c1394 = Constraint(expr= - m.x884 - 0.11270166537926*m.x889 - 0.00635083268962935*m.x894 + m.x2394 == 0)
m.c1395 = Constraint(expr= - m.x885 - 0.11270166537926*m.x890 - 0.00635083268962935*m.x895 + m.x2395 == 0)
m.c1396 = Constraint(expr= - m.x896 - 0.5*m.x901 - 0.125*m.x906 + m.x2396 == 0)
m.c1397 = Constraint(expr= - m.x897 - 0.5*m.x902 - 0.125*m.x907 + m.x2397 == 0)
m.c1398 = Constraint(expr= - m.x898 - 0.5*m.x903 - 0.125*m.x908 + m.x2398 == 0)
m.c1399 = Constraint(expr= - m.x899 - 0.5*m.x904 - 0.125*m.x909 + m.x2399 == 0)
m.c1400 = Constraint(expr= - m.x900 - 0.5*m.x905 - 0.125*m.x910 + m.x2400 == 0)
m.c1401 = Constraint(expr= - m.x896 - 0.88729833462074*m.x901 - 0.393649167310369*m.x906 + m.x2401 == 0)
m.c1402 = Constraint(expr= - m.x897 - 0.88729833462074*m.x902 - 0.393649167310369*m.x907 + m.x2402 == 0)
m.c1403 = Constraint(expr= - m.x898 - 0.88729833462074*m.x903 - 0.393649167310369*m.x908 + m.x2403 == 0)
m.c1404 = Constraint(expr= - m.x899 - 0.88729833462074*m.x904 - 0.393649167310369*m.x909 + m.x2404 == 0)
m.c1405 = Constraint(expr= - m.x900 - 0.88729833462074*m.x905 - 0.393649167310369*m.x910 + m.x2405 == 0)
m.c1406 = Constraint(expr= - m.x896 - 0.11270166537926*m.x901 - 0.00635083268962935*m.x906 + m.x2406 == 0)
m.c1407 = Constraint(expr= - m.x897 - 0.11270166537926*m.x902 - 0.00635083268962935*m.x907 + m.x2407 == 0)
m.c1408 = Constraint(expr= - m.x898 - 0.11270166537926*m.x903 - 0.00635083268962935*m.x908 + m.x2408 == 0)
m.c1409 = Constraint(expr= - m.x899 - 0.11270166537926*m.x904 - 0.00635083268962935*m.x909 + m.x2409 == 0)
m.c1410 = Constraint(expr= - m.x900 - 0.11270166537926*m.x905 - 0.00635083268962935*m.x910 + m.x2410 == 0)
m.c1411 = Constraint(expr= - m.x911 - 0.5*m.x916 - 0.125*m.x921 + m.x2411 == 0)
m.c1412 = Constraint(expr= - m.x912 - 0.5*m.x917 - 0.125*m.x922 + m.x2412 == 0)
m.c1413 = Constraint(expr= - m.x913 - 0.5*m.x918 - 0.125*m.x923 + m.x2413 == 0)
m.c1414 = Constraint(expr= - m.x914 - 0.5*m.x919 - 0.125*m.x924 + m.x2414 == 0)
m.c1415 = Constraint(expr= - m.x915 - 0.5*m.x920 - 0.125*m.x925 + m.x2415 == 0)
m.c1416 = Constraint(expr= - m.x911 - 0.88729833462074*m.x916 - 0.393649167310369*m.x921 + m.x2416 == 0)
m.c1417 = Constraint(expr= - m.x912 - 0.88729833462074*m.x917 - 0.393649167310369*m.x922 + m.x2417 == 0)
m.c1418 = Constraint(expr= - m.x913 - 0.88729833462074*m.x918 - 0.393649167310369*m.x923 + m.x2418 == 0)
m.c1419 = Constraint(expr= - m.x914 - 0.88729833462074*m.x919 - 0.393649167310369*m.x924 + m.x2419 == 0)
m.c1420 = Constraint(expr= - m.x915 - 0.88729833462074*m.x920 - 0.393649167310369*m.x925 + m.x2420 == 0)
m.c1421 = Constraint(expr= - m.x911 - 0.11270166537926*m.x916 - 0.00635083268962935*m.x921 + m.x2421 == 0)
m.c1422 = Constraint(expr= - m.x912 - 0.11270166537926*m.x917 - 0.00635083268962935*m.x922 + m.x2422 == 0)
m.c1423 = Constraint(expr= - m.x913 - 0.11270166537926*m.x918 - 0.00635083268962935*m.x923 + m.x2423 == 0)
m.c1424 = Constraint(expr= - m.x914 - 0.11270166537926*m.x919 - 0.00635083268962935*m.x924 + m.x2424 == 0)
m.c1425 = Constraint(expr= - m.x915 - 0.11270166537926*m.x920 - 0.00635083268962935*m.x925 + m.x2425 == 0)
m.c1426 = Constraint(expr= - m.x926 - 0.5*m.x931 - 0.125*m.x936 + m.x2426 == 0)
m.c1427 = Constraint(expr= - m.x927 - 0.5*m.x932 - 0.125*m.x937 + m.x2427 == 0)
m.c1428 = Constraint(expr= - m.x928 - 0.5*m.x933 - 0.125*m.x938 + m.x2428 == 0)
m.c1429 = Constraint(expr= - m.x929 - 0.5*m.x934 - 0.125*m.x939 + m.x2429 == 0)
m.c1430 = Constraint(expr= - m.x930 - 0.5*m.x935 - 0.125*m.x940 + m.x2430 == 0)
m.c1431 = Constraint(expr= - m.x926 - 0.88729833462074*m.x931 - 0.393649167310369*m.x936 + m.x2431 == 0)
m.c1432 = Constraint(expr= - m.x927 - 0.88729833462074*m.x932 - 0.393649167310369*m.x937 + m.x2432 == 0)
m.c1433 = Constraint(expr= - m.x928 - 0.88729833462074*m.x933 - 0.393649167310369*m.x938 + m.x2433 == 0)
m.c1434 = Constraint(expr= - m.x929 - 0.88729833462074*m.x934 - 0.393649167310369*m.x939 + m.x2434 == 0)
m.c1435 = Constraint(expr= - m.x930 - 0.88729833462074*m.x935 - 0.393649167310369*m.x940 + m.x2435 == 0)
m.c1436 = Constraint(expr= - m.x926 - 0.11270166537926*m.x931 - 0.00635083268962935*m.x936 + m.x2436 == 0)
m.c1437 = Constraint(expr= - m.x927 - 0.11270166537926*m.x932 - 0.00635083268962935*m.x937 + m.x2437 == 0)
m.c1438 = Constraint(expr= - m.x928 - 0.11270166537926*m.x933 - 0.00635083268962935*m.x938 + m.x2438 == 0)
m.c1439 = Constraint(expr= - m.x929 - 0.11270166537926*m.x934 - 0.00635083268962935*m.x939 + m.x2439 == 0)
m.c1440 = Constraint(expr= - m.x930 - 0.11270166537926*m.x935 - 0.00635083268962935*m.x940 + m.x2440 == 0)
m.c1441 = Constraint(expr= - | |
<gh_stars>0
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from datetime import datetime
from django.db import models
from django.core.urlresolvers import reverse
from rapidsms.webui.managers import *
from patterns.models import Pattern
from locations.models import *
# TODO: remove this. it's a slightly weird version
# of ReporterGroup, which can't be nested. i'm
# not sure how it happened in the first place.
class Role(models.Model):
"""Basic representation of a role that someone can have. For example,
'supervisor' or 'data entry clerk'"""
name = models.CharField(max_length=160)
code = models.CharField(max_length=20, blank=True, null=True,\
help_text="Abbreviation")
patterns = models.ManyToManyField(Pattern, null=True, blank=True)
def match(self, token):
return self.regex and re.match(self.regex, token, re.IGNORECASE)
@property
def regex(self):
# convenience accessor for joining patterns
return Pattern.join(self.patterns)
def __unicode__(self):
return self.name
class ReporterGroup(models.Model):
title = models.CharField(max_length=30, unique=True)
parent = models.ForeignKey("self", related_name="children", null=True, blank=True)
description = models.TextField(blank=True)
objects = RecursiveManager()
class Meta:
verbose_name = "Group"
def __unicode__(self):
return self.title
# TODO: rename to something that indicates
# that it's a counter, not a queryset
def members(self):
return self.reporters.all().count()
class Reporter(models.Model):
"""This model represents a KNOWN person, that can be identified via
their alias and/or connection(s). Unlike the RapidSMS Person class,
it should not be used to represent unknown reporters, since that
could lead to multiple objects for the same "person". Usually, this
model should be created through the WebUI, in advance of the reporter
using the system - but there are always exceptions to these rules..."""
alias = models.CharField(max_length=20, unique=True)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
groups = models.ManyToManyField(ReporterGroup, related_name="reporters", blank=True)
# here are some fields that don't belong here
location = models.ForeignKey(Location, related_name="reporters", null=True, blank=True)
role = models.ForeignKey(Role, related_name="reporters", null=True, blank=True)
def __unicode__(self):
return self.connection().identity
# the language that this reporter prefers to
# receive their messages in, as a w3c language tag
#
# the spec: http://www.w3.org/International/articles/language-tags/Overview.en.php
# reference: http://www.iana.org/assignments/language-subtag-registry
#
# to summarize:
# english = en
# amharic = am
# chichewa = ny
# klingon = tlh
#
language = models.CharField(max_length=10, blank=True)
# although it's impossible to enforce, if a user registers
# themself (via the app.py backend), this flag should be set
# indicate that they probably shouldn't be trusted
registered_self = models.BooleanField()
class Meta:
ordering = ["last_name", "first_name"]
# define a permission for this app to use the @permission_required
# decorator in reporter's views
# in the admin's auth section, we have a group called 'manager' whose
# users have this permission -- and are able to see this section
permissions = (
("can_view", "Can view"),
)
def full_name(self):
return ("%s %s" % (
self.first_name,
self.last_name)).strip()
def __unicode__(self):
return self.full_name()
def __repr__(self):
return "%s (%s)" % (
self.full_name(),
self.alias)
def __json__(self):
return {
"pk": self.pk,
"alias": self.alias,
"first_name": self.first_name,
"last_name": self.last_name,
"str": unicode(self) }
@classmethod
def exists(klass, reporter, connection):
"""Checks if a reporter has already been entered into the system"""
try:
# look for a connection and reporter object matching what
# was passed in, and if they are already linked then this
# reporter already exists
existing_conn = PersistantConnection.objects.get\
(backend=connection.backend, identity=connection.identity)
# this currently checks first and last name, location and role.
# we may want to make this more lax
filters = {"first_name" : reporter.first_name,
"last_name" : reporter.last_name,
"location" : reporter.location,
"role" : reporter.role }
existing_reps = Reporter.objects.filter(**filters)
for existing_rep in existing_reps:
if existing_rep == existing_conn.reporter:
return True
return False
except PersistantConnection.DoesNotExist:
# if we couldn't find a connection then they
# don't exist
return False
@classmethod
def parse_name(klass, flat_name):
"""Given a single string, this function returns a three-string
tuple containing a suggested alias, first name, and last name,
via some quite crude pattern matching."""
patterns = [
# try a few common name formats.
# this is crappy but sufficient
r"([a-z]+)", # Adam
r"([a-z]+)\s+([a-z]+)", # <NAME>er
r"([a-z]+)\s+[a-z]+\.?\s+([a-z]+)",# <NAME>, <NAME>
r"([a-z]+)\s+([a-z]+\-[a-z]+)" # <NAME>-Fabian
]
def unique(str):
"""Checks an alias for uniqueness; if it is already taken, alter it
(by append incrementing digits) until an available alias is found."""
n = 1
alias = str.lower()
# keep on looping until an alias becomes available.
# --
# WARNING: this isn't going to work at high volumes, since the alias
# that we return might be taken before we have time to do anything
# with it! This should logic should probably be moved to the
# initializer, to make the find/grab alias loop atomic
while klass.objects.filter(alias__iexact=alias).count():
alias = "%s%d" % (str.lower(), n)
n += 1
return alias
# try each pattern, returning as
# soon as we find something that fits
for pat in patterns:
m = re.match("^%s$" % pat, flat_name, re.IGNORECASE)
if m is not None:
g = m.groups()
# return single names as-is
# they might already be aliases
if len(g) == 1:
alias = unique(g[0].lower())
return (alias, g[0], "")
else:
# return only the letters from
# the first and last names
alias = unique(g[0][0] + re.sub(r"[^a-zA-Z]", "", g[1]))
return (alias.lower(), g[0], g[1])
# we have no idea what is going on,
# so just return the whole thing
alias = unique(re.sub(r"[^a-zA-Z]", "", flat_name))
return (alias.lower(), flat_name, "")
def connection(self):
"""Returns the connection object last used by this Reporter.
The field is (probably) updated by app.py when receiving
a message, so depends on _incoming_ messages only."""
# TODO: add a "preferred" flag to connection, which then
# overrides the last_seen connection as the default, here
try:
return self.connections.latest("last_seen")
# if no connections exist for this reporter (how
# did that happen?!), then just return None...
except PersistantConnection.DoesNotExist:
return None
def last_seen(self):
"""Returns the Python datetime that this Reporter was last seen,
on any Connection. Before displaying in the WebUI, the output
should be run through the XXX filter, to make it prettier."""
# comprehend a list of datetimes that this
# reporter was last seen on each connection,
# excluding those that have never seen them
timedates = [
c.last_seen
for c in self.connections.all()
if c.last_seen is not None]
# return the latest, or none, if they've
# has never been seen on ANY connection
return max(timedates) if timedates else None
class PersistantBackend(models.Model):
"""This class exists to provide a primary key for each
named RapidSMS backend, which can be linked from the
other modules. We can't use a char field with OPTIONS
(in models which wish to link to a backend), since the
available backends (and their orders) may change after
deployment; hence, something persistant is needed."""
slug = models.CharField(max_length=30, unique=True)
title = models.CharField(max_length=30)
class Meta:
verbose_name = "Backend"
def __unicode__(self):
return self.slug
@classmethod
def from_message(klass, msg):
""""Fetch a PersistantBackend object from the data buried in a rapidsms.message.Message
object. In time, this should be moved to the message object itself, since persistance
should be fairly ubiquitous; but right now, that would couple the framework to this
individual app. So you can use this for now."""
be_slug = msg.connection.backend.slug
return klass.objects.get(slug=be_slug)
class PersistantConnection(models.Model):
"""This class is a persistant version of the RapidSMS Connection
class, to keep track of the various channels of communication
that Reporters use to interact with RapidSMS (as a backend +
identity pair, like rapidsms.connection.Connection). When a
Reporter is seen communicating via a new backend, or is expected
to do so in future, a PersistantConnection should be created,
so they can be recognized by their backend + identity pair."""
backend = models.ForeignKey(PersistantBackend, related_name="connections")
identity = models.CharField(max_length=30)
reporter = models.ForeignKey(Reporter, related_name="connections", blank=True, null=True)
last_seen = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = "Connection"
unique_together = ("backend", "identity")
def __unicode__(self):
return "%s:%s" % (
self.backend,
self.identity)
def __json__(self):
return {
"pk": self.pk,
"identity": self.identity,
"reporter": self.reporter,
"str": unicode(self) }
@classmethod
def from_message(klass, msg):
obj, | |
video_mapping = {'Algebra': [(u'Simple Equations', u'9Ek61w1LxSc', u'simple-equations'),
(u'Equations 2', u'XoEn1LfVoTo', u'equations-2'),
(u'Equations 3', u'f15zA0PhSek', u'equations-3'),
(u'Algebra Linear Equations 4',
u'9IUEk9fn2Vs',
u'algebra--linear-equations-4'),
(u'Algebra Solving Inequalities',
u'VgDe_D8ojxw',
u'algebra--solving-inequalities'),
(u'Algebra graphing lines 1',
u'2UrcUfBizyw',
u'algebra--graphing-lines-1'),
(u'Algebra Slope and Y-intercept intuition',
u'Nhn-anmubYU',
u'algebra--slope-and-y-intercept-intuition'),
(u'Algebra Slope', u'hXP1Gv9IMBo', u'algebra--slope'),
(u'Algebra Slope 2', u'Kk9IDameJXk', u'algebra--slope-2'),
(u'Algebra Slope 3', u'8XffLj2zvf4', u'algebra--slope-3'),
(u'Algebra Equation of a line',
u'gvwKv6F69F0',
u'algebra--equation-of-a-line'),
(u'Slope and Y-intercept Intuition',
u'8sz1IPjBRS8',
u'slope-and-y-intercept-intuition'),
(u'Averages', u'9VZsMY15xeU', u'averages'),
(u'Integer sums', u'W254ewkkMck', u'integer-sums'),
(u'Taking percentages', u'_SpE4hQ8D_o', u'taking-percentages'),
(u'Growing by a percentage',
u'X2jVap1YgwI',
u'growing-by-a-percentage'),
(u'Another Percent Word Problem',
u'd1oNF88SAgg',
u'another-percent-word-problem'),
(u'More percent problems',
u'4oeoIOan_h4',
u'more-percent-problems'),
(u'systems of equations',
u'nok99JOhcjo',
u'systems-of-equations'),
(u'Introduction to Ratios (new HD version)',
u'HpdMJaKaXXc',
u'introduction-to-ratios--new-hd-version'),
(u'Ratio problem with basic algebra (new HD)',
u'ITVQrzDSekU',
u'ratio-problem-with-basic-algebra--new-hd'),
(u'More advanced ratio problem--with Algebra (HD version)',
u'ItA_hhRtUuw',
u'more-advanced-ratio-problem--with-algebra--hd-version'),
(u'Alternate Solution to Ratio Problem (HD Version)',
u'-xyTz0WZ1W4',
u'alternate-solution-to-ratio-problem--hd-version'),
(u'Introduction to Ratios',
u'UsPmg_Ne1po',
u'introduction-to-ratios'),
(u'Advanced ratio problems',
u'PASSD2OcU0c',
u'advanced-ratio-problems'),
(u'Age word problems 1', u'bAUT_Pux73w', u'age-word-problems-1'),
(u'Age word problems 2', u'pPqPj8CAPvI', u'age-word-problems-2'),
(u'Age word problems 3', u'DplUpe3oyWo', u'age-word-problems-3'),
(u'Level 1 multiplying expressions',
u'Sc0e6xrRJYY',
u'level-1-multiplying-expressions'),
(u'Solving a quadratic by factoring',
u'N30tN9158Kc',
u'solving-a-quadratic-by-factoring'),
(u'i and Imaginary numbers',
u'rDLDGQMKT3M',
u'i-and-imaginary-numbers'),
(u'Complex Numbers (part 1)',
u'kpywdu1afas',
u'complex-numbers--part-1'),
(u'Complex Numbers (part 2)',
u'bPqB9a1uk_8',
u'complex-numbers--part-2'),
(u'Introduction to the quadratic equation',
u'IWigvJcCAJ0',
u'introduction-to-the-quadratic-equation'),
(u'Quadratic Equation part 2',
u'y19jYxzY8Y8',
u'quadratic-equation-part-2'),
(u'Completing the square',
u'gzm-uhj06q8',
u'completing-the-square'),
(u'Quadratic Formula (proof)',
u'mDmRYfma9C0',
u'quadratic-formula--proof'),
(u'Quadratic Inequalities',
u'ZNtzWpU80-0',
u'quadratic-inequalities'),
(u'Quadratic Inequalities (Visual Explanation)',
u'xdiBjypYFRQ',
u'quadratic-inequalities--visual-explanation'),
(u'Introduction to functions',
u'VhokQhjl5t0',
u'introduction-to-functions'),
(u'Functions Part 2', u'XEblO51pF5I', u'functions-part-2'),
(u'Functions (Part III)',
u'5fcRSie63Hs',
u'functions--part-iii'),
(u'Functions (part 4)', u'rbt51hXmzig', u'functions--part-4'),
(u'Domain of a function',
u'U-k5N1WPk4g',
u'domain-of-a-function'),
(u'Proof log a + log b = log ab',
u'FP2arCfAfBY',
u'proof--log-a---log-b---log-ab'),
(u'Proof A(log B) = log (B^A). log A - log B = log (A/B)',
u'yEAxG_D1HDw',
u'proof--a-log-b----log--b-a---log-a---log-b---log--a-b'),
(u'Proof log_a (B) = (log_x (B))/(log_x (A))',
u'cKOtT4WnZb4',
u'proof--log-a--b-----log-x--b----log-x--a'),
(u'Algebraic Long Division',
u'4u8_AMacu-Y',
u'algebraic-long-division'),
(u'Introduction to Conic Sections',
u'0A7RR0oy2ho',
u'introduction-to-conic-sections'),
(u'Conic Sections Intro to Circles',
u'6r1GQCxyMKI',
u'conic-sections--intro-to-circles'),
(u'Conic Sections Intro to Ellipses',
u'lvAYFUIEpFI',
u'conic-sections--intro-to-ellipses'),
(u'Conic Sections Intro to Hyperbolas',
u'pzSyOTkAsY4',
u'conic-sections--intro-to-hyperbolas'),
(u'Conic Sections Hyperbolas 2',
u'hl58vTCqVIY',
u'conic-sections--hyperbolas-2'),
(u'Conic Sections Hyperbolas 3',
u'lGQw-W1PxBE',
u'conic-sections--hyperbolas-3'),
(u'Identifying Conics 1',
u'LSJuu4Qm2qQ',
u'identifying-conics-1'),
(u'Identifying Conics 2',
u'cvA4VN1dpuY',
u'identifying-conics-2'),
(u'Conic Identification 3',
u'Dru0RHgfp2g',
u'conic-identification-3'),
(u'Foci of an Ellipse', u'QR2vxfwiHAU', u'foci-of-an-ellipse'),
(u'Foci of a Hyperbola', u'S0Fd2Tg2v7M', u'foci-of-a-hyperbola'),
(u'Proof Hyperbola Foci',
u'HPRFmu7JsKU',
u'proof--hyperbola-foci'),
(u'Partial Fraction Expansion 1',
u'S-XKGBesRzk',
u'partial-fraction-expansion-1'),
(u'Partial Fraction Expansion 2',
u'6FrPLJY0rqM',
u'partial-fraction-expansion-2'),
(u'Partial Fraction Expansion 3',
u'A52fEdPn9lg',
u'partial-fraction-expansion-3'),
(u'Parabola Focus and Directrix 1',
u'ZJf9shWlMz0',
u'parabola-focus-and-directrix-1'),
(u'Focus and Directrix of a Parabola 2',
u'1Jm9rREA-uA',
u'focus-and-directrix-of-a-parabola-2'),
(u'Two Passing Bicycles Word Problem',
u'Nx4Xk5S4HQA',
u'two-passing-bicycles-word-problem'),
(u'Passed Bike Word Problem',
u'P03ljxjy8Nw',
u'passed-bike-word-problem'),
(u'Passing Trains', u'qLLjD-g_9ms', u'passing-trains'),
(u'Overtaking Word Problem',
u'LYWUHaWwY0c',
u'overtaking-word-problem'),
(u'Early Train Word Problem',
u'uA3m8vbd2wc',
u'early-train-word-problem'),
(u'Officer on Horseback',
u'IPJAi5zWu9U',
u'officer-on-horseback'),
(u'Rational Inequalities',
u'ZjeMdXV0QMg',
u'rational-inequalities'),
(u'Rational Inequalities 2',
u'2RnS3fSHVV8',
u'rational-inequalities-2'),
(u'Interesting Polynomial Coefficient Problem',
u'hypi8QPsFEk',
u'interesting-polynomial-coefficient-problem'),
(u'Geometric series sum to figure out mortgage payments',
u'i05-okb1EJg',
u'geometric-series-sum-to-figure-out-mortgage-payments'),
(u'Introduction to Function Inverses',
u'W84lObmOp8M',
u'introduction-to-function-inverses'),
(u'Function Inverse Example 1',
u'wSiamij_i_k',
u'function-inverse-example-1'),
(u'Function Inverses Example 2',
u'aeyFb2eVH1c',
u'function-inverses-example-2'),
(u'Function Inverses Example 3',
u'Bq9cq9FZuNM',
u'function-inverses-example-3')],
'AlgebraIWorkedExamples': [(u'Solving Equations 1',
u'p5e5mf_G3FI',
u'solving-equations-1'),
(u'Solving Equations 2',
u'1c5HY3z4k8M',
u'solving-equations-2'),
(u'Solving Equations with the Distributive Property',
u'YZBStgZGyDY',
u'solving-equations-with-the-distributive-property'),
(u'Solving equations with the distributive property 2',
u'PL9UYj2awDc',
u'solving-equations-with-the-distributive-property-2'),
(u'Algebraic Word Problem',
u'cNlwi6lUCEM',
u'algebraic-word-problem'),
(u'Sum of Consecutive Odd Integers',
u'd8De3xcVmnw',
u'sum-of-consecutive-odd-integers'),
(u'Example of Solving for a Variable',
u'Aig1hkq3OsU',
u'example-of-solving-for-a-variable'),
(u'Solving for a Variable 2',
u'BR5yFOt0zao',
u'solving-for-a-variable-2'),
(u'Absolute Value 1',
u'NvGTCzAfvr0',
u'absolute-value-1'),
(u'Absolute Value and Number Lines',
u'frBJEYvyd-8',
u'absolute-value-and-number-lines'),
(u'Absolute Value Equations 1',
u'GwjiR2_7A7Y',
u'absolute-value-equations-1'),
(u'Absolute Value Equation Example 2',
u'jaizi_1IB5c',
u'absolute-value-equation-example-2'),
(u'Patterns in Sequences 1',
u'Zj-a_9cd5jc',
u'patterns-in-sequences-1'),
(u'Patterns in Sequences 2',
u'zIcxrhyJs6M',
u'patterns-in-sequences-2'),
(u'Equations of Sequence Patterns',
u'_3BnyEr5fG4',
u'equations-of-sequence-patterns'),
(u'Finding the 100th Term in a Sequence',
u'JtsyP0tnVRY',
u'finding-the-100th-term-in-a-sequence'),
(u'Functional Relationships 1',
u'5cK86VKoBPw',
u'functional-relationships-1'),
(u'Testing if a relationship is a function',
u'3SO1BQQ9_1E',
u'testing-if-a-relationship-is-a-function'),
(u'Domain and Range 1',
u'C6F33Ir-sY4',
u'domain-and-range-1'),
(u'Domain and Range 2',
u'0lY4PcCYoyE',
u'domain-and-range-2'),
(u'Direct Variation 1',
u'rSadG6EtJmY',
u'direct-variation-1'),
(u'Basic Rate Problem',
u'7WXijfIZh64',
u'basic-rate-problem'),
(u'Basic Linear Function',
u'YB1XuQ1Pc5s',
u'basic-linear-function'),
(u'Exploring linear relationships',
u'qPx7i1jwXX4',
u'exploring-linear-relationships'),
(u'Recognizing Linear Functions',
u'AZroE4fJqtQ',
u'recognizing-linear-functions'),
(u'Exploring nonlinear relationships',
u'D-0E9weT_t0',
u'exploring-nonlinear-relationships'),
(u'Slope of a line',
u'R948Tsyq4vA',
u'slope-of-a-line'),
(u'Slope Example',
u'jTCZfMMcHBo',
u'slope-example'),
(u'X and Y intercepts',
u'xGmef7lFc5w',
u'x--and-y-intercepts'),
(u'X and Y intercepts 2',
u'405boztgZig',
u'x-and-y-intercepts-2'),
(u'Graphing a line in slope intercept form',
u'uk7gS3cZVp4',
u'graphing-a-line-in-slope-intercept-form'),
(u'Equation of a line',
u'-BuoPowT86M',
u'equation-of-a-line'),
(u'Converting to slope-intercept form',
u'V6Xynlqc_tc',
u'converting-to-slope-intercept-form'),
(u'Point-slope and standard form',
u'-6Fu2T_RSGM',
u'point-slope-and-standard-form'),
(u'Parallel Lines',
u'T-aCweuimis',
u'parallel-lines'),
(u'Parallel Lines 2',
u'lOWXZFP8Vuc',
u'parallel-lines-2'),
(u'Parallel lines 3',
u'y5yNi08cr6I',
u'parallel-lines-3'),
(u'Perpendicular Lines',
u'0671cRNjeKI',
u'perpendicular-lines'),
(u'Perpendicular lines 2',
u'TsEhZRT16LU',
u'perpendicular-lines-2'),
(u'Interpreting Inequalities',
u'wo7DSaPP8hQ',
u'interpreting-inequalities'),
(u'Solving Inequalities',
u'y7QLay8wrW8',
u'solving-inequalities'),
(u'Inequalities',
u'FZ2APP6-grU',
u'inequalities'),
(u'Absolute value inequalities',
u'TvUCe6Bomy4',
u'absolute-value-inequalities'),
(u'Absolute Inequalities 2',
u'x5EJG_rAtkY',
u'absolute-inequalities-2'),
(u'Absolute value inequalities 3',
u'y9MGpOGQVqQ',
u'absolute-value-inequalities-3'),
(u'Writing and using inequalities',
u'RHe9X2HDEjA',
u'writing-and-using-inequalities'),
(u'Writing and using inequalities 2',
u'cCMpin3Te4s',
u'writing-and-using-inequalities-2'),
(u'Writing and using inequalities 3',
u'pbLiN8D9gAk',
u'writing-and-using-inequalities-3'),
(u'Solving and graphing linear inequalities in two variables',
u'EoCeL4SPIcA',
u'solving-and-graphing-linear-inequalities-in-two-variables'),
(u'Graphing linear inequalities in two variables',
u'lxTQrsUip9g',
u'graphing-linear-inequalities-in-two-variables'),
(u'Graphing linear inequalities in two variables',
u'YBYu5aZPLeg',
u'graphing-linear-inequalities-in-two-variables'),
(u'Solving systems by graphing',
u'1CiBuN6qraY',
u'solving-systems-by-graphing'),
(u'Solving systems by graphing 2',
u'BNHLzEv6Mjg',
u'solving-systems-by-graphing-2'),
(u'Solving systems by graphing 3',
u'F5Nb6cIRZLU',
u'solving-systems-by-graphing-3'),
(u'Solving systems by substitution 1',
u'HJV_HY0Sh0s',
u'solving-systems-by-substitution-1'),
(u'Solving systems by substitution 2',
u'wB3QCk0MGuw',
u'solving-systems-by-substitution-2'),
(u'Solving systems by substitution 3',
u'0BgUKHTW37E',
u'solving-systems-by-substitution-3'),
(u'Solving systems by elimination',
u'z1hz8-Kri1E',
u'solving-systems-by-elimination'),
(u'Solving systems by elimination 2',
u'Y6JsEja15Vk',
u'solving-systems-by-elimination-2'),
(u'Solving systems by elimination 3',
u'Dqp6xOeR3Ls',
u'solving-systems-by-elimination-3'),
(u'Systems and rate problems',
u'at4T4n4JYNc',
u'systems-and-rate-problems'),
(u'Systems and rate problems 2',
u'H5w55UbIZTw',
u'systems-and-rate-problems-2'),
(u'Systems and rate problems 3',
u'hTBQUibtnBk',
u'systems-and-rate-problems-3'),
(u'Mixture problems 1',
u'5Dzdrb8MKBg',
u'mixture-problems-1'),
(u'Mixture problems 2',
u'OBVGQt1Eeug',
u'mixture-problems-2'),
(u'Mixture problems 3',
u'JVlfQEhzLMM',
u'mixture-problems-3'),
(u'Graphing systems of inequalities',
u'TqsRlc02rtc',
u'graphing-systems-of-inequalities'),
(u'Graphing systems of inequalities 2',
u'YjT3QYfoy4Q',
u'graphing-systems-of-inequalities-2'),
(u'Graphing systems of inequalities 3',
u'UUwmo5qaTuI',
u'graphing-systems-of-inequalities-3'),
(u'Exponent Rules 1',
u'kSYJxGqOcjA',
u'exponent-rules-1'),
(u'Exponent Rules 2',
u'6ND4nKwyCEc',
u'exponent-rules-2'),
(u'Exponent Rules 3',
u'AbmQNC-iE84',
u'exponent-rules-3'),
(u'Scientific notation 1',
u'kSx873lOgIc',
u'scientific-notation-1'),
(u'Scientific notation 2',
u'vDfd0bj3mo0',
u'scientific-notation-2'),
(u'Scientific notation 3',
u'XJBwJjP2_hM',
u'scientific-notation-3'),
(u'Simplifying Expressions with Exponents',
u'tdO4UOLW9d8',
u'simplifying-expressions-with-exponents'),
(u'Simplifying Expressions with Exponents 2',
u'xnLcxdM8OD8',
u'simplifying-expressions-with-exponents-2'),
(u'Simplifying Expressions with Exponents 3',
u'zwUnRQbWPJU',
u'simplifying-expressions-with-exponents-3'),
(u'Pythagorean Theorem 1',
u'JVrkLIcA2qw',
u'pythagorean-theorem-1'),
(u'Pythagorean Theorem 2',
u'O64YFlX1_aI',
u'pythagorean-theorem-2'),
(u'Pythagorean Theorem 3',
u'T0IOrRETWhI',
u'pythagorean-theorem-3'),
(u'Simplifying Radical Expressions1',
u'drhoIgAhlQM',
u'simplifying-radical-expressions1'),
(u'Simplifying Radical Expressions 2',
u'F5iMsjwFLX8',
u'simplifying-radical-expressions-2'),
(u'Simplifying Radical Expressions 3',
u's9i2aVKyxTY',
u'simplifying-radical-expressions-3'),
(u'Solving Radical Equations 1',
u'y4C81qAa3pY',
u'solving-radical-equations-1'),
(u'Solving Radical Equations 2',
u'b6WtwQddAcY',
u'solving-radical-equations-2'),
(u'Solving Radical Equations 3',
u'g6nGcnVB8BM',
u'solving-radical-equations-3'),
(u'Applying Radical Equations 1',
u'U3JxFFdKCKM',
u'applying-radical-equations-1'),
(u'Applying Radical Equations 2',
u'YghRP8S2K-4',
u'applying-radical-equations-2'),
(u'Applying Radical Equations 3',
u'npUtXLjTnxg',
u'applying-radical-equations-3'),
(u'Fractional Exponent Expressions 1',
u'xjCnmPvcmOo',
u'fractional-exponent-expressions-1'),
(u'Fractional Exponent Expressions 2',
u'4F6cFLnAAFc',
u'fractional-exponent-expressions-2'),
(u'Fractional Exponent Expressions 3',
u'Ay0B6Kh9Y3Q',
u'fractional-exponent-expressions-3'),
(u'Multiplying and Dividing Monomials 1',
u'p_61XhXdlxI',
u'multiplying-and-dividing--monomials-1'),
(u'Multiplying and Dividing Monomials 2',
u'Rtje3kbC9lM',
u'multiplying-and-dividing-monomials-2'),
(u'Multiplying and Dividing Monomials 3',
u'abuhkDhowyc',
u'multiplying-and-dividing-monomials-3'),
(u'Polynomials1',
u'EvvxBdNIUeQ',
u'polynomials1'),
(u'Polynomials 2',
u'IGs7IB48Fvg',
u'polynomials-2'),
(u'Adding and Subtracting Polynomials 1',
u'ahdKdxsTj8E',
u'adding-and-subtracting-polynomials-1'),
(u'Adding and Subtracting Polynomials 2',
u'ZGl2ExHwdak',
u'adding-and-subtracting-polynomials-2'),
(u'Adding and Subtracting Polynomials 3',
u'DMyhUb1pZT0',
u'adding-and-subtracting-polynomials-3'),
(u'Multiplying Polynomials1',
u'HB48COey2O8',
u'multiplying-polynomials1'),
(u'Multiplying Polynomials 2',
u'J1HAY8E3gms',
u'multiplying-polynomials-2'),
(u'Multiplying Polynomials 3',
u'bamcYQDzVTw',
u'multiplying-polynomials-3'),
(u'Special Products of Polynomials 1',
u'4GcNzvILqtM',
u'special-products-of-polynomials-1'),
(u'Special Products of Polynomials 2',
u'4fQeHtSdw80',
u'special-products-of-polynomials-2'),
(u'U08_L2_T4_we3 Special Products of Polynomials 3',
u'jYfWFy0yxB4',
u'u08-l2-t4-we3-special-products-of-polynomials-3'),
(u'Bay Area CBS Station with Salman Khan',
u'PhHWmehThg8',
u'bay-area-cbs-station-with-salman-khan'),
(u'Factoring and the Distributive Property',
u'auQU-9KNG74',
u'factoring-and-the-distributive-property'),
(u'Factoring and the Distributive Property 2',
u'499MvHFrqUU',
u'factoring-and-the-distributive-property-2'),
(u'Factoring and the Distributive Property 3',
u'MZl6Mna0leQ',
u'factoring-and-the-distributive-property-3'),
(u'Factoring Trinomials by Grouping 1',
u'HXIj16mjfgk',
u'factoring-trinomials-by-grouping-1'),
(u'Factoring Trinomials by Grouping 2',
u'QQhkEGp-YQQ',
u'factoring-trinomials-by-grouping-2'),
(u'U09_L1_T2_we3 Factoring Trinomials by Grouping 3',
u'fVIZmOQBS5M',
u'u09-l1-t2-we3-factoring-trinomials-by-grouping-3'),
(u'Factoring Trinomials by Grouping 4',
u'u1SAo2GiX8A',
u'factoring-trinomials-by-grouping-4'),
(u'Factoring Trinomials by Grouping 5',
u'R-rhSQzFJL0',
u'factoring-trinomials-by-grouping-5'),
(u'Factoring Trinomials by Grouping 6',
u'd-2Lcp0QKfI',
u'factoring-trinomials-by-grouping-6'),
(u'U09_L2_T1_we1 Factoring Special Products 1',
u'XuwldEyWjH0',
u'u09-l2-t1-we1-factoring-special-products-1'),
(u'Factoring Special Products 2',
u'o-ZbdYVGehI',
u'factoring-special-products-2'),
(u'U09_L2_T1_we3 Factoring Special Products 3',
u'YahJQvY396o',
u'u09-l2-t1-we3-factoring-special-products-3'),
(u'Solving Quadratic Equations by Factoring.avi',
u'2ZzuZvz33X0',
u'solving-quadratic-equations-by-factoring-avi'),
(u'Solving Quadratic Equations by Factoring 2.avi',
u'04RpkdaNzr8',
u'solving-quadratic-equations-by-factoring-2-avi'),
(u'Solving Quadratic Equations by Factoring 3.avi',
u'vl9o9XEfXtw',
u'solving-quadratic-equations-by-factoring-3-avi'),
(u'Quadratic Functions 1',
u'CuPgmA7ytWA',
u'quadratic-functions-1'),
(u'Quadratic Functions 2',
u'dfoXtodyiIA',
u'quadratic-functions-2'),
(u'Quadratic Functions 3',
u'TgKBc3Igx1I',
u'quadratic-functions-3'),
(u'Completing the Square 1',
u'VvuuRpJbbHE',
u'completing-the-square-1'),
(u'Completing the Square 2',
u'6agzj3A9IgA',
u'completing-the-square-2'),
(u'Completing the Square 3',
u'02h9yhc7ruc',
u'completing-the-square-3'),
(u'Completing the Square | |
type: str
# type
self.type = type # type: str
# upload_id
self.upload_id = upload_id # type: str
def validate(self):
if self.domain_id is not None:
self.validate_max_length(self.domain_id, 'domain_id', 50)
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9]{1,50}')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.part_info_list:
for k in self.part_info_list:
if k:
k.validate()
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[0-9]+')
def to_map(self):
result = {}
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.file_path is not None:
result['file_path'] = self.file_path
result['part_info_list'] = []
if self.part_info_list is not None:
for k in self.part_info_list:
result['part_info_list'].append(k.to_map() if k else None)
if self.share_id is not None:
result['share_id'] = self.share_id
if self.type is not None:
result['type'] = self.type
if self.upload_id is not None:
result['upload_id'] = self.upload_id
return result
def from_map(self, map={}):
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
self.part_info_list = []
if map.get('part_info_list') is not None:
for k in map.get('part_info_list'):
temp_model = UploadPartInfo()
self.part_info_list.append(temp_model.from_map(k))
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
if map.get('type') is not None:
self.type = map.get('type')
if map.get('upload_id') is not None:
self.upload_id = map.get('upload_id')
return self
class HostingDeleteFileRequest(TeaModel):
"""
删除文件请求
"""
def __init__(self, headers=None, drive_id=None, file_path=None, permanently=None, share_id=None):
self.headers = headers # type: Dict[str, str]
# drive_id
self.drive_id = drive_id # type: str
# file_path
self.file_path = file_path # type: str
# permanently
# type: false
self.permanently = permanently # type: bool
# share_id
self.share_id = share_id # type: str
def validate(self):
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
self.validate_required(self.file_path, 'file_path')
if self.file_path is not None:
self.validate_max_length(self.file_path, 'file_path', 1000)
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[0-9a-zA-Z-]+')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.file_path is not None:
result['file_path'] = self.file_path
if self.permanently is not None:
result['permanently'] = self.permanently
if self.share_id is not None:
result['share_id'] = self.share_id
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
if map.get('permanently') is not None:
self.permanently = map.get('permanently')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
return self
class HostingDeleteFileResponse(TeaModel):
"""
删除文件 response
"""
def __init__(self, async_task_id=None, domain_id=None, drive_id=None, file_path=None, share_id=None):
# async_task_id
self.async_task_id = async_task_id # type: str
# domain_id
self.domain_id = domain_id # type: str
# drive_id
self.drive_id = drive_id # type: str
# file_path
self.file_path = file_path # type: str
# share_id
self.share_id = share_id # type: str
def validate(self):
if self.domain_id is not None:
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9A-Z]+')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[a-z0-9A-Z]+')
def to_map(self):
result = {}
if self.async_task_id is not None:
result['async_task_id'] = self.async_task_id
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.file_path is not None:
result['file_path'] = self.file_path
if self.share_id is not None:
result['share_id'] = self.share_id
return result
def from_map(self, map={}):
if map.get('async_task_id') is not None:
self.async_task_id = map.get('async_task_id')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
return self
class HostingDeleteFilesResponse(TeaModel):
"""
批量删除文件 response
"""
def __init__(self, deleted_file_id_list=None, domain_id=None, drive_id=None, share_id=None):
# deleted_file_id_list
self.deleted_file_id_list = deleted_file_id_list # type: List[str]
# domain_id
self.domain_id = domain_id # type: str
# drive_id
self.drive_id = drive_id # type: str
# share_id
self.share_id = share_id # type: str
def validate(self):
if self.domain_id is not None:
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9A-Z]+')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[0-9]+')
def to_map(self):
result = {}
if self.deleted_file_id_list is not None:
result['deleted_file_id_list'] = self.deleted_file_id_list
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.share_id is not None:
result['share_id'] = self.share_id
return result
def from_map(self, map={}):
if map.get('deleted_file_id_list') is not None:
self.deleted_file_id_list = map.get('deleted_file_id_list')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
return self
class HostingGetDownloadUrlRequest(TeaModel):
"""
获取文件下载地址的请求body
"""
def __init__(self, headers=None, drive_id=None, expire_sec=None, file_name=None, file_path=None, share_id=None):
self.headers = headers # type: Dict[str, str]
# drive_id
self.drive_id = drive_id # type: str
# expire_sec
self.expire_sec = expire_sec # type: int
# file_name
self.file_name = file_name # type: str
# file_path
self.file_path = file_path # type: str
# share_id
self.share_id = share_id # type: str
def validate(self):
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.expire_sec is not None:
self.validate_maximum(self.expire_sec, 'expire_sec', 14400)
self.validate_minimum(self.expire_sec, 'expire_sec', 10)
self.validate_required(self.file_path, 'file_path')
if self.file_path is not None:
self.validate_max_length(self.file_path, 'file_path', 1000)
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[0-9a-zA-Z-]+')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.expire_sec is not None:
result['expire_sec'] = self.expire_sec
if self.file_name is not None:
result['file_name'] = self.file_name
if self.file_path is not None:
result['file_path'] = self.file_path
if self.share_id is not None:
result['share_id'] = self.share_id
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('expire_sec') is not None:
self.expire_sec = map.get('expire_sec')
if map.get('file_name') is not None:
self.file_name = map.get('file_name')
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
return self
class HostingGetDownloadUrlResponse(TeaModel):
"""
获取download url response
"""
def __init__(self, expiration=None, method=None, url=None):
# expiration
self.expiration = expiration # type: str
# method
self.method = method # type: str
# url
self.url = url # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.expiration is not None:
result['expiration'] = self.expiration
if self.method is not None:
result['method'] = self.method
if self.url is not None:
result['url'] = self.url
return result
def from_map(self, map={}):
if map.get('expiration') is not None:
self.expiration = map.get('expiration')
if map.get('method') is not None:
self.method = map.get('method')
if map.get('url') is not None:
self.url = map.get('url')
return self
class HostingGetFileRequest(TeaModel):
"""
获取文件元数据
"""
def __init__(self, headers=None, drive_id=None, file_path=None, image_thumbnail_process=None,
image_url_process=None, share_id=None, url_expire_sec=None):
self.headers = headers # type: Dict[str, str]
# drive_id
self.drive_id = drive_id # type: str
# file_id
self.file_path = file_path # type: str
# image_thumbnail_process
# type:string
self.image_thumbnail_process = image_thumbnail_process # type: str
# image_thumbnail_process
# type:string
self.image_url_process = image_url_process # type: str
# share_id
self.share_id = share_id # type: str
# url_expire_sec
self.url_expire_sec = url_expire_sec # type: int
def validate(self):
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
self.validate_required(self.file_path, 'file_path')
if self.file_path is not None:
self.validate_max_length(self.file_path, 'file_path', 1000)
if self.share_id is not None:
self.validate_pattern(self.share_id, 'share_id', '[0-9a-zA-Z-]+')
if self.url_expire_sec is not None:
self.validate_maximum(self.url_expire_sec, 'url_expire_sec', 14400)
self.validate_minimum(self.url_expire_sec, 'url_expire_sec', 10)
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.file_path is not None:
result['file_path'] = self.file_path
if self.image_thumbnail_process is not None:
result['image_thumbnail_process'] = self.image_thumbnail_process
if self.image_url_process is not None:
result['image_url_process'] = self.image_url_process
if self.share_id is not None:
result['share_id'] = self.share_id
if self.url_expire_sec is not None:
result['url_expire_sec'] = self.url_expire_sec
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
if map.get('image_thumbnail_process') is not None:
self.image_thumbnail_process = map.get('image_thumbnail_process')
if map.get('image_url_process') is not None:
self.image_url_process = map.get('image_url_process')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
if map.get('url_expire_sec') is not None:
self.url_expire_sec = map.get('url_expire_sec')
return self
class HostingGetFileResponse(TeaModel):
"""
获取文件元数据response
"""
def __init__(self, content_hash=None, content_hash_name=None, content_type=None, crc_64hash=None,
created_at=None, description=None, domain_id=None, download_url=None, drive_id=None, file_extension=None,
file_path=None, name=None, parent_file_path=None, share_id=None, size=None, status=None, thumbnail=None,
trashed_at=None, type=None, updated_at=None, upload_id=None, url=None):
# Content Hash
self.content_hash = content_hash # type: str
# content_hash_name
self.content_hash_name = content_hash_name # type: str
# content_type
self.content_type = content_type # type: str
# crc64_hash
self.crc_64hash | |
= []
self.blockEditListSizer = blockEditListSizer = wx.BoxSizer(wx.VERTICAL)
for idx in range(0, 3):
p = rompanel.SpritePanel(blockWindow, wx.ID_ANY, 24, 24, self.palette, scale=2, bg=None, xpad=4, ypad=4, func=self.OnClickBlockEditPanel)
p.index = idx
self.blockEditPanels.append(p)
t = wx.StaticText(blockWindow, -1, `idx`)
self.blockEditText.append(t)
blockEditListSizer.Add(t, 0, wx.ALIGN_CENTER)
blockEditListSizer.Add(p, 0, wx.ALIGN_CENTER | wx.BOTTOM, 5)
#print "\n".join(dir(blockEditScroll))
self.blockEditListSelSizer = blockEditListSelSizer = wx.BoxSizer(wx.VERTICAL)
blockEditListSelSizer.Add(self.blockEditPosText, 0, wx.ALIGN_CENTER)
blockEditListSelSizer.Add(self.blockEditSlider, 1, wx.ALIGN_CENTER)
blockEditListSelSizer.Add(self.blockEditMaxText, 0, wx.ALIGN_CENTER)
blockEditListSizer.AddSizer(self.blockEditAddButton, 0, wx.ALL | wx.ALIGN_CENTER)
blockEditListSizer.AddSizer(self.blockEditDelButton, 0, wx.ALL | wx.ALIGN_CENTER)
sbs2blockListBS.AddSizer(blockEditListSelSizer, 0, wx.EXPAND | wx.RIGHT | wx.LEFT, 5)
sbs2blockListBS.AddSizer(blockEditListSizer, 1, wx.EXPAND | wx.RIGHT, 5)
# --------------------------------
# -----
#layoutWindowSizer = wx.BoxSizer(wx.HORIZONTAL)
#layoutWindow.SetSizer(layoutWindowSizer)
#self.mapEditScroll = mapEditScroll = wx.lib.scrolledpanel.ScrolledPanel(mapWindow, -1, size=(100,200))
#mapEditScroll.SetupScrolling(scroll_x = False)
#mapEditScroll.SetScrollRate(0,77)
self.blockListSlider = wx.Slider(layoutWindow, wx.ID_ANY, 0, 0, 15, style=wx.SL_HORIZONTAL)
self.blockListPosText = wx.StaticText(layoutWindow, -1, "0")
self.blockListMaxText = wx.StaticText(layoutWindow, -1, "0")
self.blockListSlider.context = "list"
self.blockListPanels = []
self.blockListText = []
self.blockListSizer = blockListSizer = wx.FlexGridSizer(7,8)
self.blockListLeftText = wx.StaticText(layoutWindow, -1, "L (0)")
self.blockListRightText = wx.StaticText(layoutWindow, -1, "R (0)")
self.blockListLeftPanel = rompanel.SpritePanel(layoutWindow, wx.ID_ANY, 24, 24, self.palette, scale=1.5, bg=17, func=self.OnClickBlockSelPanel)
self.blockListRightPanel = rompanel.SpritePanel(layoutWindow, wx.ID_ANY, 24, 24, self.palette, scale=1.5, bg=18, func=self.OnClickBlockSelPanel)
self.blockListOverText = wx.StaticText(layoutWindow, -1, "Block 000")
self.blockListOverText.SetFont(self.GetTopLevelParent().editFont)
self.blockListSliderSizer = blockListSliderSizer = wx.BoxSizer(wx.HORIZONTAL)
blockListSliderSizer.Add(self.blockListPosText, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
blockListSliderSizer.Add(self.blockListSlider, 1)
blockListSliderSizer.Add(self.blockListMaxText, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
blockListSliderSizer.Add(self.blockListOverText, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT | wx.LEFT, 10)
numC = blockListSizer.GetCols()
numR = blockListSizer.GetRows()
for idx in range(0, numC*numR, numC):
#for c in range(0, numC):
# t = wx.StaticText(layoutWindow, -1, `idx+c`)
# self.blockListText.append(t)
# blockListSizer.Add(t, 0, wx.ALIGN_CENTER)
for c in range(0, numC):
p = rompanel.SpritePanel(layoutWindow, wx.ID_ANY, 24, 24, self.palette, scale=1.5, bg=16, func=self.OnClickBlockListPanel, edit=True, grid=24)
p.index = idx+c
self.blockListPanels.append(p)
blockListSizer.Add(p, 0, wx.ALIGN_CENTER)
#print "\n".join(dir(mapEditScroll))
layoutWindowSizer2 = wx.StaticBoxSizer(wx.StaticBox(layoutWindow, -1, "Block List"), wx.HORIZONTAL)
layoutBlockListSizer = wx.BoxSizer(wx.VERTICAL)
layoutBlockListSizer.AddSizer(blockListSizer, 1, wx.EXPAND)
layoutBlockListSizer.AddSizer(blockListSliderSizer, 0, wx.EXPAND | wx.ALIGN_CENTER)
#layoutWindowSizer2.AddSizer(blockListSelSizer, 0, wx.EXPAND | wx.RIGHT | wx.LEFT, 5)
layoutWindowSizer2.AddSizer(layoutBlockListSizer, 0, wx.ALL, 5)
#layoutWindowSizer.Layout()
# -
layoutWindowSizer = wx.BoxSizer(wx.VERTICAL)
layoutWindowSubSizer = wx.BoxSizer(wx.VERTICAL)
layoutWindow.SetSizer(layoutWindowSizer)
layoutInterBlockSizer = wx.StaticBoxSizer(wx.StaticBox(layoutWindow, -1, "Selected Blocks"), wx.VERTICAL)
layoutInterObsSizer = wx.StaticBoxSizer(wx.StaticBox(layoutWindow, -1, "Movement Data"), wx.VERTICAL)
layoutInterEventSizer = wx.StaticBoxSizer(wx.StaticBox(layoutWindow, -1, "Event Data"), wx.VERTICAL)
layoutInterObsGrid = wx.GridBagSizer()
layoutInterEventGrid = wx.GridBagSizer()
radios = [None] * 12
texts = ["Obstructed", "* Stairs", "Warp", "Trigger",
"Table/Desk", "Chest", "Barrel", "Vase",
"Searchable", "Perm Copy", "Temp Copy", "Undo Copy"]
self.interBlockRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "Graphical Block", style=wx.RB_GROUP)
self.interBlockRadio.context = 0x03ff
radios[0] = self.interObsRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[1] = self.interStairsRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[2] = self.interWarpRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[3] = self.interTriggerRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[4] = self.interTableRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[5] = self.interChestRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[6] = self.interBarrelRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[7] = self.interVaseRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[8] = self.interSearchRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[9] = self.interCopyRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[10] = self.interShowRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
radios[11] = self.interHideRadio = wx.RadioButton(layoutWindow, wx.ID_ANY, "")
masks = [0xc000, 0x4000, 0x1000, 0x1400, 0x2800, 0x1800, 0x3000, 0x2c00, 0x1c00, 0x0400, 0x0800, 0x0c00]
layoutInterObsGrid.SetVGap(1)
layoutInterEventGrid.SetVGap(1)
layoutInterObsGrid.AddGrowableCol(1, 1)
layoutInterEventGrid.AddGrowableCol(1, 1)
#layoutInterObsGrid.SetFlexibleDirection(wx.HORIZONTAL)
for i in range(len(masks)):
mask = masks[i]
radios[i].SetLabel(texts[i])
radios[i].context = mask
if i%2:
radios[i].SetLayoutDirection(wx.Layout_RightToLeft)
if i < 2:
sizer = layoutInterObsGrid
ofs = 0
else:
sizer = layoutInterEventGrid
ofs = 2
flag = [wx.ALIGN_LEFT, wx.ALIGN_RIGHT][i%2]
flag2 = [wx.RIGHT, wx.LEFT][i%2]
p = rompanel.SpritePanel(layoutWindow, wx.ID_ANY, 24, 24, self.palette, scale=1, bg=None, func=None, draw=self.drawMapData)
p.special = mask
sizer.Add(p, flag=flag2 | wx.ALIGN_CENTER_VERTICAL, border=5, pos=(i / 2 * 2 - ofs, (i%2) * 2), span=(2,1))
sizer.Add(radios[i], flag=wx.ALIGN_CENTER_VERTICAL | flag, pos=(i - ofs, 1))
layoutInterObsSizer.AddSizer(layoutInterObsGrid, 0)#1, wx.EXPAND)
layoutInterEventSizer.AddSizer(layoutInterEventGrid, 0)#1, wx.EXPAND)
self.blockListSelSizer = blockListSelSizer = wx.FlexGridSizer(2,2)
blockListSelSizer.Add(self.blockListLeftText, 1, wx.ALIGN_CENTER)
blockListSelSizer.Add(self.blockListRightText, 1, wx.ALIGN_CENTER)
blockListSelSizer.Add(self.blockListLeftPanel, 1, wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, 10)
blockListSelSizer.Add(self.blockListRightPanel, 1, wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, 10)
layoutInterBlockSizer.Add(self.interBlockRadio, 0, wx.ALIGN_CENTER | wx.BOTTOM, 5)
layoutInterBlockSizer.AddSizer(blockListSelSizer, 1, wx.ALIGN_CENTER)
layoutWindowSubSizer.AddSizer(layoutInterBlockSizer, 0, wx.ALL ^ wx.BOTTOM | wx.EXPAND | wx.ALIGN_CENTER, 5)
layoutWindowSubSizer.AddSizer(layoutInterObsSizer, 0, wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ALIGN_CENTER, 5)
layoutWindowSubSizer.AddSizer(layoutInterEventSizer, 0, wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ALIGN_CENTER, 5)
layoutWndSizer.AddSizer(layoutWindowSubSizer, 0, wx.LEFT, 10)
layoutWndSizer.AddSizer(layoutWindowSizer2, 0, wx.ALL, 5)
#layoutSizer.Layout()
#layoutInterObsGrid.RecalcSizes()
#print "\n".join(dir(layoutInterObsGrid))
#print `layoutInterObsGrid.GetRowHeights()`
#raw_input()
# --------------------------------
sbs2eventSizer = wx.BoxSizer(wx.HORIZONTAL)
sbs2eventCol1Sizer = wx.BoxSizer(wx.VERTICAL)
#eventSetupBS = wx.StaticBoxSizer(wx.StaticBox(eventWindow, -1, "Select setup, if applicable."), wx.VERTICAL)
#eventSetupPropSizer = wx.FlexGridSizer(2,2)
#self.eventConfigNameText = wx.StaticText(eventWindow, -1, "Name:")
#self.eventConfigFlagCheck = wx.CheckBox(eventWindow, wx.ID_ANY, " Flag:")
#self.eventConfigNameCtrl = wx.TextCtrl(eventWindow, wx.ID_ANY)
#self.eventConfigFlagCtrl = wx.SpinCtrl(eventWindow, wx.ID_ANY, min=0, max=2047, size=(55,20))
#eventSetupPropSizer.Add(self.eventConfigNameText, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
#eventSetupPropSizer.Add(self.eventConfigNameCtrl, 0, wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
#eventSetupPropSizer.Add(self.eventConfigFlagCheck, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
#eventSetupPropSizer.Add(self.eventConfigFlagCtrl, 0, wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
#eventSetupBS.Add(eventSetupPropSizer, 0, wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, 5)
# ----
self.eventTypeBS = eventTypeBS = wx.StaticBoxSizer(wx.StaticBox(eventWindow, -1, "Event Type"), wx.VERTICAL)
eventTypeRow1Sizer = wx.BoxSizer(wx.HORIZONTAL)
self.eventTypeList = wx.ComboBox(eventWindow, wx.ID_ANY, size=(160,-1))
self.eventTypeList.AppendItems(
[
"Warps",
"Block Copies",
"Obtainable Items",
"NPCs",
"Scene Triggers",
"Books, Signs, Etc.",
])
self.eventTypeList.SetSelection(0)
self.eventConfigBS = eventConfigBS = wx.StaticBoxSizer(wx.StaticBox(eventWindow, -1, "Configuration (if applicable)"), wx.VERTICAL)
self.eventConfigList = wx.ListBox(eventWindow, wx.ID_ANY, size=(160,58))
self.eventConfigAddButton = wx.Button(eventWindow, wx.ID_ANY, "Add", size=(50,20))
self.eventConfigCopyButton = wx.Button(eventWindow, wx.ID_ANY, "Copy", size=(50,20))
self.eventConfigDelButton = wx.Button(eventWindow, wx.ID_ANY, "Delete", size=(50,20))
eventConfigButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
eventConfigButtonSizer.Add(self.eventConfigAddButton, 0)
eventConfigButtonSizer.Add(self.eventConfigCopyButton, 0)
eventConfigButtonSizer.Add(self.eventConfigDelButton, 0)
self.eventBS = eventBS = wx.StaticBoxSizer(wx.StaticBox(eventWindow, -1, "Event"), wx.VERTICAL)
self.eventList = wx.ListBox(eventWindow, wx.ID_ANY, size=(160,82))
#self.eventSelectCtrl = wx.SpinCtrl(eventWindow, wx.ID_ANY, min=0, max=20, size=(45,20))
eventTypeRow1Sizer.Add(self.eventTypeList, 1, wx.RIGHT | wx.LEFT, 5)
#ventTypeRow1Sizer.Add(self.eventSelectCtrl, 0)
eventConfigBS.Add(self.eventConfigList, 0, wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, 5)
eventConfigBS.AddSizer(eventConfigButtonSizer, 0, wx.ALIGN_CENTER)
self.eventAddButton = wx.Button(eventWindow, wx.ID_ANY, "Add", size=(50,20))
self.eventCopyButton = wx.Button(eventWindow, wx.ID_ANY, "Copy", size=(50,20))
self.eventDelButton = wx.Button(eventWindow, wx.ID_ANY, "Delete", size=(50,20))
eventListButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
eventListButtonSizer.Add(self.eventAddButton, 0)
eventListButtonSizer.Add(self.eventCopyButton, 0)
eventListButtonSizer.Add(self.eventDelButton, 0)
eventTypeBS.Add(eventTypeRow1Sizer, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
#eventConfigBS.AddSizer(eventConfigSizer, 0, wx.EXPAND | wx.BOTTOM | wx.ALIGN_CENTER, 5)
eventBS.Add(self.eventList, 0, wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, 5)
eventBS.AddSizer(eventListButtonSizer, 0, wx.ALIGN_CENTER)
self.eventPropBox = eventPropBox = wx.StaticBox(eventWindow, -1, "Event Properties")
self.eventPropBS = eventPropBS = wx.StaticBoxSizer(eventPropBox, wx.VERTICAL)
eventNameText = wx.StaticText(eventWindow, -1, "Name:")
self.eventNameCtrl = wx.TextCtrl(eventWindow, wx.ID_ANY, size=(240,-1))
eventPropNameSizer = wx.BoxSizer(wx.HORIZONTAL)
eventPropNameSizer.Add(eventNameText, 0, wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
eventPropNameSizer.Add(self.eventNameCtrl, 1)
eventPropBS.AddSizer(eventPropNameSizer, 0, wx.LEFT | wx.RIGHT | wx.EXPAND | wx.BOTTOM, 5)
# ----
self.eventPropWarp = wx.Panel(eventWindow, -1)
eventPropWarpSizer = wx.BoxSizer(wx.VERTICAL)
warpFromCoordGrid = wx.FlexGridSizer(2,2)
self.eventPropWarpXCheck = wx.CheckBox(self.eventPropWarp, wx.ID_ANY, " Trigger X:")
self.eventPropWarpYCheck = wx.CheckBox(self.eventPropWarp, wx.ID_ANY, " Trigger Y:")
self.eventPropWarpXCtrl = wx.SpinCtrl(self.eventPropWarp, wx.ID_ANY, min=0, max=64, size=(45,20))
self.eventPropWarpYCtrl = wx.SpinCtrl(self.eventPropWarp, wx.ID_ANY, min=0, max=64, size=(45,20))
warpFromCoordGrid.Add(self.eventPropWarpXCheck, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
warpFromCoordGrid.Add(self.eventPropWarpXCtrl, 0, wx.ALIGN_CENTER_VERTICAL, 5)
warpFromCoordGrid.Add(self.eventPropWarpYCheck, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
warpFromCoordGrid.Add(self.eventPropWarpYCtrl, 0, wx.ALIGN_CENTER_VERTICAL, 5)
warpDestMapSizer = wx.BoxSizer(wx.VERTICAL)
self.eventPropWarpChangeCheck = wx.CheckBox(self.eventPropWarp, wx.ID_ANY, " Change map to:")
self.eventPropWarpMapList = wx.ComboBox(self.eventPropWarp, wx.ID_ANY, size=(200,-1))
self.eventPropWarpMapList.AppendItems([s.name for s in self.rom.data["maps"]])
self.eventPropWarpMapList.SetSelection(0)
warpDestMapSizer.Add(self.eventPropWarpChangeCheck, 0, wx.LEFT | wx.EXPAND, 0)
warpDestMapSizer.AddSpacer(3)
warpDestMapSizer.Add(self.eventPropWarpMapList, 0, wx.LEFT | wx.EXPAND, 20)
warpToCoordSizer = wx.BoxSizer(wx.HORIZONTAL)
warpToCoordGrid = wx.FlexGridSizer(2,2)
self.eventPropWarpDestXCheck = wx.CheckBox(self.eventPropWarp, wx.ID_ANY, " New X: ")
self.eventPropWarpDestYCheck = wx.CheckBox(self.eventPropWarp, wx.ID_ANY, " New Y: ")
self.eventPropWarpDestXCtrl = wx.SpinCtrl(self.eventPropWarp, wx.ID_ANY, min=0, max=64, size=(45,20))
self.eventPropWarpDestYCtrl = wx.SpinCtrl(self.eventPropWarp, wx.ID_ANY, min=0, max=64, size=(45,20))
warpToCoordGrid.Add(self.eventPropWarpDestXCheck, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
warpToCoordGrid.Add(self.eventPropWarpDestXCtrl, 0, wx.ALIGN_CENTER_VERTICAL, 5)
warpToCoordGrid.Add(self.eventPropWarpDestYCheck, 0, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
warpToCoordGrid.Add(self.eventPropWarpDestYCtrl, 0, wx.ALIGN_CENTER_VERTICAL, 5)
self.warpFacingUpRadio = wx.RadioButton(self.eventPropWarp, wx.ID_ANY, "", style=wx.RB_GROUP)
self.warpFacingLeftRadio = wx.RadioButton(self.eventPropWarp, wx.ID_ANY, "")
self.warpFacingRightRadio = wx.RadioButton(self.eventPropWarp, wx.ID_ANY, "")
self.warpFacingDownRadio = wx.RadioButton(self.eventPropWarp, wx.ID_ANY, "")
self.warpFacingUpRadio.context = 1
self.warpFacingLeftRadio.context = 2
self.warpFacingRightRadio.context = 0
self.warpFacingDownRadio.context = 3
self.warpFacingRadios = [self.warpFacingRightRadio, self.warpFacingUpRadio, self.warpFacingLeftRadio, self.warpFacingDownRadio]
warpFacingFacingMidSizer = wx.BoxSizer(wx.HORIZONTAL)
warpFacingFacingMidSizer.Add(self.warpFacingLeftRadio, 1, wx.RIGHT, 5)
warpFacingFacingMidSizer.Add(self.warpFacingRightRadio, 1, wx.LEFT, 5)
warpFacingText = wx.StaticText(self.eventPropWarp, -1, "Facing")
warpFacingSizer = wx.BoxSizer(wx.VERTICAL)
warpFacingSizer.Add(warpFacingText, 0, wx.ALIGN_CENTER | wx.BOTTOM, 3)
warpFacingSizer.Add(self.warpFacingUpRadio, 0, wx.ALIGN_CENTER)
warpFacingSizer.AddSizer(warpFacingFacingMidSizer, 0, wx.ALIGN_CENTER | wx.EXPAND)
warpFacingSizer.Add(self.warpFacingDownRadio, 0, wx.ALIGN_CENTER | wx.BOTTOM, 3)
warpToCoordSizer.AddSizer(warpToCoordGrid, 0, wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 20)
warpToCoordSizer.AddSizer(warpFacingSizer, | |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vmc.orgs.sddcs.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Clusters(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.clusters'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ClustersStub)
def create(self,
org,
sddc,
cluster_config,
):
"""
Creates a new cluster in customers sddcs with passed clusterConfig.
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type cluster_config: :class:`com.vmware.vmc.model_client.ClusterConfig`
:param cluster_config: clusterConfig (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the cluster with the given identifier
"""
return self._invoke('create',
{
'org': org,
'sddc': sddc,
'cluster_config': cluster_config,
})
def delete(self,
org,
sddc,
cluster,
):
"""
This is a force operation which will delete the cluster even if there
can be a data loss. Before calling this operation, all the VMs should
be powered off.
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type cluster: :class:`str`
:param cluster: cluster identifier (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the cluster with the given id
"""
return self._invoke('delete',
{
'org': org,
'sddc': sddc,
'cluster': cluster,
})
class Convert(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.convert'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ConvertStub)
def create(self,
org,
sddc,
):
"""
This API converts a one host SDDC to a four node DEFAULT SDDC. It takes
care of configuring and upgrading the vCenter configurations on the
SDDC for high availability and data redundancy.
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates, Method not
allowed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the SDDC with given identifier
"""
return self._invoke('create',
{
'org': org,
'sddc': sddc,
})
class Esxs(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.esxs'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _EsxsStub)
def create(self,
org,
sddc,
esx_config,
action=None,
):
"""
Add/Remove one or more ESX hosts in the target cloud
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type esx_config: :class:`com.vmware.vmc.model_client.EsxConfig`
:param esx_config: esxConfig (required)
:type action: :class:`str` or ``None``
:param action: If = 'add', will add the esx. If = 'remove', will delete the
esx/esxs bound to a single cluster (Cluster Id is mandatory for non
cluster 1 esx remove). If = 'force-remove', will delete the esx
even if it can lead to data loss (This is an privileged operation).
If = 'addToAll', will add esxs to all clusters in the SDDC (This is
an privileged operation). If = 'removeFromAll', will delete the
esxs from all clusters in the SDDC (This is an privileged
operation). If = 'attach-diskgroup', will attach the provided
diskgroups to a given host (privileged). If = 'detach-diskgroup',
will detach the diskgroups of a given host (privileged). Default
behaviour is 'add' (optional)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the SDDC with the given identifier
"""
return self._invoke('create',
{
'org': org,
'sddc': sddc,
'esx_config': esx_config,
'action': action,
})
class Publicips(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.publicips'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PublicipsStub)
def create(self,
org,
sddc,
spec,
):
"""
Allocate public IPs for a SDDC
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type spec: :class:`com.vmware.vmc.model_client.SddcAllocatePublicIpSpec`
:param spec: allocation spec (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the SDDC with given identifier
"""
return self._invoke('create',
{
'org': org,
'sddc': sddc,
'spec': spec,
})
def delete(self,
org,
sddc,
id,
):
"""
Free one public IP for a SDDC
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type id: :class:`str`
:param id: ip allocation id (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the public IP with given IP address
"""
return self._invoke('delete',
{
'org': org,
'sddc': sddc,
'id': id,
})
def get(self,
org,
sddc,
id,
):
"""
Get one public IP for a SDDC
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type id: :class:`str`
:param id: ip allocation id (required)
:rtype: :class:`com.vmware.vmc.model_client.SddcPublicIp`
:return: com.vmware.vmc.model.SddcPublicIp
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the public IP with given IP address
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'id': id,
})
def list(self,
org,
sddc,
):
"""
list all public IPs for a SDDC
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:rtype: :class:`list` of :class:`com.vmware.vmc.model_client.SddcPublicIp`
:return:
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Cannot find the SDDC with given identifier
"""
return self._invoke('list',
{
'org': org,
'sddc': sddc,
})
def update(self,
org,
sddc,
id,
action,
sddc_public_ip_object,
):
"""
Attach or detach a public IP to workload VM for a SDDC
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type id: :class:`str`
:param id: ip allocation id (required)
:type action: :class:`str`
:param action: Type of action as 'attach', 'detach', 'reattach', or 'rename'. For
'attch', the public IP must not be attached and
'associated_private_ip' in the payload needs to be set with a
workload VM private IP. For 'detach', the public IP must be
attached and 'associated_private_ip' in the payload should not be
set with any value. For 'reattach', the public IP must be attached
and 'associated_private_ip' in the payload needs to be set with a
new workload VM private IP. For 'rename', the 'name' in the payload
needs to have a new name string. (required)
:type sddc_public_ip_object: :class:`com.vmware.vmc.model_client.SddcPublicIp`
:param sddc_public_ip_object: SddcPublicIp object to update (required)
:rtype: :class:`com.vmware.vmc.model_client.Task`
:return: com.vmware.vmc.model.Task
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
Unauthorized
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
The sddc is not in a state that's valid for updates
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Access not allowed to the operation for the current user
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
| |
or not this task supports offline caching.
"""
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
_validate_args(dataset_fn, ["split", "shuffle_files"])
for metric_fn in metric_fns:
_validate_args(metric_fn, ["targets", "predictions"])
self._name = name
self._dataset_fn = dataset_fn
self._text_preprocessor = (
[] if text_preprocessor is None else text_preprocessor)
self._token_preprocessor = (
[] if token_preprocessor is None else token_preprocessor)
self._sentencepiece_model_path = sentencepiece_model_path
self._metric_fns = metric_fns
# Use a pass-through if postprocess_fn is not provided
self._postprocess_fn = postprocess_fn or (lambda x, **unused_kwargs: x)
self._cache_dir = None
self._stats = {}
if isinstance(output_features, dict):
self._output_features = output_features
elif output_features is None or isinstance(output_features, list):
self._output_features = {
f: Feature() for f in output_features or _DEFAULT_FEATURE_KEYS
}
else:
raise ValueError("output_features must be a dict, list of str, or None")
self._output_features = collections.OrderedDict(
sorted(list(self._output_features.items()))
)
self._splits = splits
self._num_input_examples = num_input_examples
self._supports_caching = supports_caching
@property
def name(self):
return self._name
@property
def postprocess_fn(self):
return self._postprocess_fn
@property
def metric_fns(self):
return self._metric_fns
@property
def sentencepiece_model_path(self):
return self._sentencepiece_model_path
@property
def output_features(self):
return self._output_features
@property
def token_preprocessor(self):
return self._token_preprocessor
@property
def splits(self):
return self._splits
def num_input_examples(self, split):
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _preprocess_dataset(self, dataset, preprocessors, **preprocess_kwargs):
if not hasattr(preprocessors, "__iter__"):
preprocessors = [preprocessors]
for prep_fn in preprocessors:
dataset = prep_fn(dataset, **preprocess_kwargs)
return dataset
def _validate_dataset(
self,
dataset,
expected_output_type,
expected_output_rank,
error_label,
ensure_no_eos=False):
"""Validates properties of a tf.data.Dataset, raising Exceptions if needed.
Args:
dataset: a tf.data.Dataset to validate.
expected_output_type: a tf.dtype, the expected type of the model features.
expected_output_rank: an int, the expected rank of the model features.
error_label: a string, an identifier for the previous processing step to
report in raised ValueErrors.
ensure_no_eos: a bool, whether or not to verify that the model features
contain no EOS tokens.
Returns:
a validated tf.data.Dataset.
"""
types = tf.data.get_output_types(dataset)
shapes = tf.data.get_output_shapes(dataset)
for feat in self.output_features:
if feat not in types:
raise ValueError(
"Task dataset is missing expected output feature after {label}: "
"{feat}".format(label=error_label, feat=feat))
if expected_output_type != types[feat]:
raise ValueError(
"Task dataset has incorrect type for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=types[feat].name,
expected=expected_output_type.name))
if expected_output_rank != len(shapes[feat]):
raise ValueError(
"Task dataset has incorrect rank for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=len(shapes[feat]),
expected=expected_output_rank))
def _ensure_no_eos(feat, v):
if feat not in self.output_features:
return v
with tf.control_dependencies([
tf.assert_none_equal(
v, tf.constant(1, tf.int64),
message="Feature '{feat}' unexpectedly contains EOS=1 token "
"after {label}.".format(feat=feat, label=error_label))
]):
return v
if ensure_no_eos:
dataset = dataset.map(
lambda ex: {k: _ensure_no_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def preprocess_text(self, dataset):
"""Preprocessed text dataset."""
dataset = self._preprocess_dataset(dataset, self._text_preprocessor)
dataset = self._validate_dataset(
dataset, expected_output_type=tf.string, expected_output_rank=0,
error_label="text preprocessing")
return dataset
def preprocess_tokens(self, dataset, sequence_length):
"""Preprocesses tokenized dataset.
Args:
dataset: a tf.data.Dataset
sequence_length: dict mapping feature key to int length for that feature
Returns:
a tf.data.Dataset
"""
dataset = self._preprocess_dataset(
dataset, self._token_preprocessor,
sequence_length=sequence_length,
vocabulary=self.get_vocabulary())
dataset = self._validate_dataset(
dataset,
expected_output_type=tf.int64,
expected_output_rank=1,
error_label="token preprocessing",
ensure_no_eos=True)
# Trim and append EOS=1 token to model features.
def _trim_and_append_eos(feat, v):
if feat not in self.output_features:
return v
if self.output_features[feat].add_eos:
return tf.concat([v[:sequence_length[feat]-1], [1]], axis=0)
else:
return v[:sequence_length[feat]]
return dataset.map(
lambda ex: {k: _trim_and_append_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@property
def cache_dir(self):
"""Returns the cache directory (or None), initializing if needed."""
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in _GLOBAL_CACHE_DIRECTORIES]
for cache_dir in potential_cache_dirs:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self):
"""Wether or not this task supports offline caching."""
return self._supports_caching
def assert_cached(self):
"""Raises an assertion error if cached dataset does not exist."""
assert self.cache_dir, (
"'%s' does not exist in any of the task cache directories" % self.name)
def get_cached_stats(self, split=tfds.Split.TRAIN):
"""Returns basic statistics for cached dataset."""
self.assert_cached()
if split not in self._stats:
stats_path = get_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_vocabulary(self):
"""Returns a SentencePieceVocabulary object using the Task's model."""
return sentencepiece_vocabulary.SentencePieceVocabulary(
self.sentencepiece_model_path)
def get_dataset(
self,
sequence_length,
split=tfds.Split.TRAIN,
use_cached=False,
shuffle=True,
shuffle_buffer_size=_SHUFFLE_BUFFER_SIZE,
):
"""Returns a tf.data.Dataset from cache or generated on the fly.
Args:
sequence_length: dict mapping feature key to int length for that feature
split: string, the split to return.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to True.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
shuffle_buffer_size: an integer
Returns:
A mixed tf.data.Dataset.
"""
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
if use_cached:
ds = self._get_cached_dataset(split, shuffle)
else:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
ds = self.preprocess_text(ds)
# Tokenize
ds = encode_string_features(
ds, self.get_vocabulary(), keys=self.output_features,
copy_plaintext=True)
if (not use_cached and self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE):
ds = ds.cache()
# Post tokenization processing.
ds = self.preprocess_tokens(ds, sequence_length)
if shuffle:
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size)
return ds
def _get_cached_dataset(self, split=tfds.Split.TRAIN, shuffle=True):
"""Returns a tf.data.Dataset read from cached files."""
self.assert_cached()
with tf.io.gfile.GFile(get_info_path(self.cache_dir, split)) as f:
split_info = json.load(f)
# Use `FixedLenSequenceFeature` for sequences with variable length.
def _feature_config(shape, dtype):
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_desc = {
feat: _feature_config(**desc)
for feat, desc in split_info["features"].items()}
ds = tf.data.Dataset.list_files(
"%s-*-of-*%d" % (
get_tfrecord_prefix(self.cache_dir, split),
split_info["num_shards"]),
shuffle=shuffle)
ds = ds.interleave(
tf.data.TFRecordDataset,
cycle_length=16, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(lambda ex: tf.parse_single_example(ex, feature_desc),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.get_cached_stats(split)["examples"] <= _MAX_EXAMPLES_TO_MEM_CACHE:
ds = ds.cache()
return ds
class TfdsTask(Task):
"""A `Task` that uses TensorFlow Datasets to provide the input dataset."""
def __init__(
self,
name,
tfds_name,
text_preprocessor,
sentencepiece_model_path,
metric_fns,
tfds_data_dir=None,
splits=None,
supports_caching=True,
**task_kwargs):
"""TfdsTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
tfds_name: string, the name and version number of a TFDS dataset,
optionally with a config.
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
sentencepiece_model_path: string, path to a SentencePiece model file to
use for tokenization.
metric_fns: list(callable), list of metric functions with the signature
metric_fn(targets, predictions) to use during evaluation.
tfds_data_dir: string, an optional path to a specific TFDS data directory
to use.
splits: a list(string) of allowable splits to load, a dict mapping
allowable canonical splits (e.g., 'validation') to TFDS splits or slices
(e.g., 'train[':1%']), or None. The default, None, uses all available
splits from the TFDS dataset info.
supports_caching: bool, whether or not this task supports offline caching.
**task_kwargs: dict, additional keyword arguments for the parent `Task`
class.
"""
if ":" not in tfds_name:
raise ValueError(
"TFDS name must contain a version number, got: %s" % tfds_name)
self._tfds_dataset = LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
def dataset_fn(split, shuffle_files):
return self._tfds_dataset.load(split, shuffle_files)
super().__init__(
name,
dataset_fn=dataset_fn,
splits=list(splits) if splits else None,
text_preprocessor=text_preprocessor,
sentencepiece_model_path=sentencepiece_model_path,
metric_fns=metric_fns,
supports_caching=supports_caching,
**task_kwargs)
@property
def splits(self):
"""Override since we can't call `info.splits` until after init."""
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def num_input_examples(self, split):
return self.tfds_dataset.size(split)
class TextLineTask(Task):
"""A `Task` that reads text lines as input.
Requires a text_processor to be passed that takes a tf.data.Dataset of
strings and returns a tf.data.Dataset of feature dictionaries.
e.g. preprocessors.preprocess_tsv()
"""
def __init__(
self,
name,
split_to_filepattern,
text_preprocessor,
sentencepiece_model_path,
metric_fns,
skip_header_lines=0,
**task_kwargs):
"""TextLineTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with | |
#!/usr/bin/env python
import sys, tarfile, getopt, urllib, ConfigParser, os, os.path
import re, itertools
# Check python version as script doesn't run on 3.x
if sys.version_info >= (3, 0):
sys.stdout.write("Sorry, requires Python 2.x, not Python 3.x. You can try:\n")
sys.stdout.write("$ sudo /usr/bin/python2.7 /usr/local/packager/packager.py " + sys.argv[1:] + "\n")
sys.stdout.write("If that didn't work either, try:\n")
sys.stdout.write("$ sudo /usr/bin/python2.6 /usr/local/packager/packager.py " + sys.argv[1:] + "\n")
sys.exit(1)
class Cli(object):
"""Interactions with the command line."""
def run(self):
"""
Read the command line options and execute the appropriate
commands.
"""
# Get superset of all long options
long_options = set()
for option_arr in [globals()[module]().options().keys()
for module in globals()
if module[0:7] == 'Command']:
long_options.update(option_arr)
long_options = list(long_options)
options, args = getopt.gnu_getopt(sys.argv[1:], '', long_options)
if len(args) == 0:
self.usage()
command = args[0]
params = args[1:]
cmd_class = globals().get('Command' + command.capitalize(), None)
if cmd_class is None:
self.usage()
cmd_obj = cmd_class()
if len(args) <= cmd_obj.minargs():
self.usage()
# Verify that no invalid options were used for this command
cmd_options = cmd_obj.options().keys()
for opt in options:
# opt is a tuple ('--format', '')
if opt[0][2:] not in cmd_options and opt[0][2:] + '=' not in cmd_options:
print "ERROR: Option %s is not valid for command %s\n" % (opt[0], command)
self.usage()
cmd_obj.run(self, params, options)
def usage(self):
"""Prints usage information."""
msg = "Usage: %s [options] command packages...\n\nAvailable commands with their options:"
print msg % sys.argv[0]
prefix = " "
for command, cmd in [(module[7:].lower(), globals()[module]())
for module in globals()
if module[0:7] == 'Command']:
print prefix, command + ': ', cmd.info()
for opt, opt_info in cmd.options().iteritems():
print ' ' * 6 + '--' + opt + ': ' + opt_info
sys.exit(1)
class BaseCommand(object):
"""Base class for all commands."""
def info(self):
"""Description for usage."""
pass
def options(self):
"""
Returns a dictionary with all allowed command line options for this
command.
"""
pass
def minargs(self):
"""Returns the number of free arguments required."""
return 1
def run(self, cli, params, options):
"""Executes this command."""
pass
class CommandInfo(BaseCommand):
def info(self):
return "Gives information about a package."
def options(self):
return {
'format=': "Format for the output. Can be 'brief' or 'full'. 'full' is the default."
}
def run(self, cli, params, options):
format = 'full'
for opt in options:
if opt[0] == '--format':
format = opt[1]
for param in params:
registry = PackageRegistry()
pkg = Package(param, registry)
info = pkg.info()
if info:
if format == 'brief':
self.__briefOutput(info)
else:
self.__fullOutput(info)
def __briefOutput(self, info):
print info.get('name', 'NO NAME') + ': ' + info.get('version', 'NO VERSION')
def __fullOutput(self, info):
# Defines the order in which we want to show the keys
keys = ['name', 'version']
# Calculate max key length for nice tabulation
maxlen = 0
for key in info:
if len(key) > maxlen:
maxlen = len(key)
msg = " %" + str(maxlen) + "s: %s"
# Output all known keys first in correct order.
# Then the other keys in internal dictionary order.
for key in keys:
if key in info:
print msg % (key, info[key])
del info[key]
for key, value in info.iteritems():
print msg % (key, value)
class CommandInstall(BaseCommand):
def info(self):
return "Installs a package and all its dependencies."
def options(self):
return {
'root=': "Root directory to install the package into. Useful for testing."
}
def run(self, cli, params, options):
root = '/'
for opt in options:
if opt[0] == '--root':
root = opt[1]
registry = PackageRegistry()
for param in params:
pkg = Package(param, registry)
pkg.install(root, registry)
registry.save()
class CommandList(BaseCommand):
def info(self):
return "Lists all installed packages."
def options(self):
return {}
def minargs(self):
return 0
def run(self, cli, params, options):
registry = PackageRegistry()
# Calculate max key length for nice tabulation
maxlen = 0
for key in registry.packages:
if len(key) > maxlen:
maxlen = len(key)
msg = "%" + str(maxlen) + "s: %s"
for pkg, versions in registry.packages.iteritems():
print msg % (pkg, ", ".join(versions))
class Package(object):
"""
Represents a single package with the information.
Transparently handles fetching the package from network.
"""
def __init__(self, name, registry):
self.__name = name
self.__filename = ''
self.__file = None
self.__fetch(name, registry)
def info(self):
"""Returns information about this package as a dictionary."""
member = None
if not self.__file:
print "Did or could not download package: %s" % self.__name
return
for key in self.__file.getnames():
modkey = key.replace('./', '')
if modkey == 'pkg/info':
member = self.__file.getmember(key)
break
if member is None:
print "ERROR: pkg/info file not found in package %s" % self.__name
return
elif not member.isreg():
print "ERROR: Found pkg/info in package %s, but it's not a file." % self.__name
return
member = self.__file.extractfile(member)
self.__info = self.__parseInfo(member)
return self.__info
def install(self, root, registry):
"""Installs this package, resolving any dependencies if necessary."""
info = self.info()
if not info:
return;
print 'Installing package %s into root %s' % (self.__name, root)
curr_version = registry.isInstalled(info['name'], info['version'])
self._installDependencies(root, registry)
if curr_version:
print "Package %s is already installed at version %s. You wanted to install version %s." % (
info['name'], curr_version, info['version'])
return
if not os.path.exists(root):
os.makedirs(root, 0755)
if not os.path.isdir(root):
print "ERROR: %s is not a directory. Aborting installation."
return
# Special files we need later
fnames = {}
# Extract pre-install script and execute it
for preinstall in ('./pkg/pre-install', 'pkg/pre-install'):
print preinstall
try:
member = self.__file.getmember(preinstall)
except KeyError:
continue
fnames['pre-install'] = self.__getPostScript(self.__file,
preinstall, info['name'])
self.__executePostScript(fnames, 'pre-install')
for key in self.__file.getnames():
modkey = key.replace('./', '').replace('//', '/')
member = self.__file.getmember(key)
if modkey == '/' or modkey.find('/._') > -1 or modkey[0:4] == 'pkg/':
# Internal file
if modkey == 'pkg/post-install':
fnames['post-install'] = self.__getPostScript(self.__file, key, info['name'])
elif modkey == 'pkg/post-initial':
fnames['post-initial'] = self.__getPostScript(self.__file, key, info['name'])
else:
# Just ignore control files
pass
elif member.isdir() and os.path.exists(os.path.join(root, key)):
print "Skipping existing directory %s" % modkey
else:
print "Extracting %s" % modkey
dest = os.path.join(root, key)
if os.path.islink(dest) or (os.path.exists(dest) and
member.issym() or member.islnk()):
os.unlink(dest)
self.__file.extract(key, root)
self.__executePostScript(fnames, 'post-initial', not self.__name in registry.packages)
self.__executePostScript(fnames, 'post-install')
registry.register(info['name'], info['version'])
def _installDependencies(self, root, registry):
"""Install all required dependencies for this package."""
self.__installDependenciesRPM(root, registry)
self.__installDependenciesPackager(root, registry)
def __installDependenciesPackager(self, root, registry, deps_seen=[]):
"""
Install all required dependencies of the packager format for this
package.
"""
info = self.info()
if not info:
return
deps = info.get('depends', [])
for dep in deps:
pkg_name, version = self.__dependencyInstalled(dep, registry)
if version:
if pkg_name not in deps_seen:
pkg = Package(pkg_name, registry)
deps_seen.append(pkg_name)
pkg._installDependencies(root, registry)
else:
print "=" * 80
print "Installing dependency: %s" % dep
pkg = Package(pkg_name, registry)
pkg.install(root, registry)
print "=" * 80
def __installDependenciesRPM(self, root, registry):
info = self.info()
if not info:
return
deps = info.get('depends-rpm', [])
if not deps:
return
deps = ' '.join(deps)
print "Installing RPM dependencies: %s" % deps
retval = os.system("yum -y install " + deps)
if retval != 0:
msg = "ERROR: The yum command returned an invalid return code: %d." % retval
print msg
raise Exception(msg)
def __dependencyInstalled(self, package, registry):
"""
Checks if the given dependency is installed.
Interprets the dependency strings with optional version number.
"""
vmatch = re.match('(.+)\((<|<=|>|>=|=)([^\)]+)\)', package)
if vmatch:
package = vmatch.group(1).strip()
operator = vmatch.group(2).strip()
version = vmatch.group(3).strip()
return (package, registry.isInstalled(package, version, operator))
else:
# Need the latest version
package = package.strip()
if not package in registry.packages:
return (package, False)
else:
pkg = Package(package, registry)
info = pkg.info()
if not info:
return (package, True)
version = info['version']
return (package, registry.isInstalled(package, version, '>='))
def __parseInfo(self, file):
"""Parses an info document passed in as a file object."""
options = {
'depends': [],
'depends-rpm': [],
}
for line in file.readlines():
line = line.strip()
if line == '':
continue
key, value = line.split(':')
key = key.strip()
curr = options.get(key, None)
if curr is None:
options[key] = value.strip()
elif isinstance(curr, list):
options[key] = curr + [val.strip() for val in value.split(',')]
else:
print "ERROR: Key '%s' was repeated but can only appear once in the info file." % key
return options
| |
guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[3] == "T" or word[3] == "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[3] == "W" or word[3] == "w" :
toGuess = toGuess[:3] + "w" + toGuess[4:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[3] == "X" or word[3] == "x" :
toGuess = toGuess[:3] + "x" + toGuess[4:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[3] == "Y" or word[3] == "y" :
toGuess = toGuess[:3] + "y" + toGuess[4:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if | |
are:
# 1. Replacing the userid and password with another userid and pasword
# 2. Replacing the ssh_key with another ssh_key (may or may not have a password)
# 3. Replacing the userid and password with an ssh_key
# 4. Replacing the ssh_key with a userid nad password
# 5. Not changing anything just the ip address
# Figure out and set old_auth and new_auth to either userpass or key
if device.key is not None:
old_auth = "key"
else:
old_auth = "userpass"
if ssh_key is not None:
new_auth = "key"
elif userid is not None or password is not None:
new_auth = "userpass"
else:
new_auth = None
device_type = device.resource_type
if userid:
temp_userid = userid
else:
temp_userid = device.userid
temp_password = None
temp_ssh_key = None
if password:
temp_password = password
else:
if device.password is not None:
if new_auth is None: # only lookup old password if auth hasn't changed
temp_password = persistent_mgr.decrypt_data(device.password)
if ssh_key:
temp_ssh_key = ssh_key
else:
if device.key:
key = device.key
temp_ssh_key = key.value
if key.password:
if new_auth is None: # only lookup old password if auth hasn't changed
temp_password = <PASSWORD>_mgr.decrypt_data(key.password)
if new_auth == "key":
rc, message = _change_device_key(device, ip_address, temp_userid,
temp_ssh_key, temp_password)
elif new_auth == "userpass":
rc, message = _change_device_userpass(device, ip_address, temp_userid, temp_password)
else:
rc, message = _validate(ip_address, temp_userid, temp_password,
device_type, temp_ssh_key)
if rc != 0:
# return error if unable to validate with currently set info
return rc, message
else:
device.status = constants.access_status.SUCCESS.value
device.statusTime = datetime.utcnow()
if new_label is not None and new_label != device.label:
# have new label and the new label differs from the existing label for
# the target device
rc, message = validate_label(new_label)
if rc != 0:
# another device already exists with the label
logging.error("%s::failed to change device properties, a device with the new label (%s)"
" already exists.", _method_, new_label)
message = _("Failed to change device properties, a device with the new label"
" (%(label)s) already exists.") % {'label': new_label}
return 104, message
device.label = new_label
if rackid is not None:
device.rack_id = rackid
if rack_location is not None:
device.eia_location = rack_location
# pre_save hooks
hooks = _load_inventory_device_plugins()
hook_name = 'unknown' # keeps pylint happy
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.change_device_pre_save(device, old_device_info)
except Exception as e:
logging.exception(e)
message = _("Error in plugin (%s). Unable to change device: Reason: %s") % (hook_name, e)
return 102, message
# commit the change.
logging.info(
"%s: commit device changes now. device info: %s", _method_, device)
persistent_mgr.update_device(session)
if old_auth == "key" and new_auth == "userpass":
# Need to delete ssh key from database
key_info = device.key
persistent_mgr.delete_keys(session, [key_info])
# post_save hooks
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.change_device_post_save(device, old_device_info)
except Exception as e:
logging.exception(e)
message = push_message(message, _("After device properties were changed, "
"Error in plugin (%s): %s") % (hook_name, e))
# return success
logging.info("EXIT %s device properties changed", _method_)
message = push_message(message, _("Changed device successfully."))
session.close()
return 0, message
@entry_exit(exclude_index=[], exclude_name=[])
def list_resources(labels=None, isbriefly=False, device_types=None, deviceids=None,
list_device_id=False, is_detail=False, racks=None):
"""Get a list of devices based on the information present in that arguments.
Device ID's will override labels. labels or device ids will limit response to those items.
Args:
labels: list of labels to get device info for.
isbriefly: indicates if brief response desired (defaults to just labels)
device_types: list of device types to limit return device information to.
deviceids:
list_device_id: to include the device ID in the returned data set to True
is_detail: Set to true to include detailed information for the object
racks: list of rack identifiers containing rack ids
Returns:
integer with return code
dictionary with results based on parameters
Dictionary entries:
message: any message returned for the processing of this method
column_titles: dictionary of column titles accessed via the tag name they represent.
devices: list of device information packed in a dictionary structure
racks: list of rack information packed in a dictionary structure
"""
_method_ = 'resource_mgr.list_resource'
logging.debug("ENTRY %s", _method_)
all_tags = ['resourceid', 'label', 'rackid', 'rack-eia-location', 'machine-type-model',
'serial-number', 'ip-address', 'hostname', 'userid', 'version', 'architecture',
'resource-type', 'status', 'statusTime', 'web_url', 'auth_method', 'capabilities',
'roles']
brief_tags = ['label']
result = {}
session = persistent_mgr.create_database_session()
# get the list of device types requested as an array
device_types_array = device_types
# get the rack ids requested as an array
rack_ids_array = racks
# decide the set of data to return
tags = all_tags
if is_detail:
isbriefly = False
if isbriefly:
tags = brief_tags
# include deviceid info in returned data if requested.
if list_device_id:
tags.insert(0, 'resourceid')
# get devices based on labels and device ids
if deviceids is not None:
devices, _not_found_values = persistent_mgr.get_devices_by_ids(
session, deviceids)
elif labels is not None:
devices, _not_found_values = persistent_mgr.get_devices_by_labels(
session, labels)
else:
devices = persistent_mgr.get_all_devices(session)
# check if labels returned anything if specified
if len(devices) == 0 and labels:
message = _("No device labeled as \'%s\'") % labels
result['message'] = message
logging.debug("EXIT %s label not found", _method_)
return 101, result
logging.debug("%s: before filtering", _method_)
# possibly filter by device types if specified
filtered_result_devices = []
if device_types_array:
for device in devices:
if device.resource_type in device_types_array:
filtered_result_devices.append(device)
else:
filtered_result_devices = devices
# possibly filter by rack ids
rack_filtered_devices = []
if rack_ids_array:
for device in filtered_result_devices:
if device.rack_id in rack_ids_array:
rack_filtered_devices.append(device)
else:
rack_filtered_devices = filtered_result_devices
logging.debug("%s: before add table info", _method_)
# add table column info
table_columns_titles = []
result['column_tags'] = tags
for tag in tags:
table_columns_titles.append(_get_devicetag_text_id(tag))
result['column_titles'] = table_columns_titles
logging.debug("%s: before add devices to output", _method_)
result_devices = []
plugins = _load_device_plugins()
for device in rack_filtered_devices:
device_dict = device.to_dict_obj()
device_output = {}
for tag in tags:
tag_value = device_dict.get(tag)
if tag == 'machine-type-model' or tag == 'serial-number':
tag_value = tag_value.replace(',', ' ')
device_output[tag] = tag_value
# add the web url for the device
plugin = plugins[device.resource_type]
web_url = plugin.get_web_url(device.address)
device_output["web_url"] = web_url
# figure out the if it's logging and monitoring capable
device_output["capabilities"] = plugin.get_capabilities()
# figure out the roles this device plays
roles = []
for device_role in persistent_mgr.get_device_roles_by_device_id(session,
device.resource_id):
roles.append(device_role.role)
device_output["roles"] = roles
# add the auth_method for the device
if device.key:
auth_method = constants.auth_method.SSH_KEY_AUTHENTICATION.value
else:
auth_method = constants.auth_method.USERID_PASSWORD.value
device_output["auth_method"] = auth_method
# add final form of device info to result
result_devices.append(device_output)
result['resources'] = result_devices
message = ""
result['message'] = message
logging.debug("EXIT %s normal", _method_)
session.close()
return 0, result
@entry_exit(exclude_index=[], exclude_name=[])
def remove_resource(labels=None, all_devices=False, deviceids=None):
'''Remove devices based on information present in the arguments
If the option deviceIDs is specified, then the options labels and all_devices
will be ignored. If the option labels is specified then the all_devices
option will be ignored.
Args:
labels List of labels of devices to remove
all_devices indicate all devices to be removed.
deviceids list of device ids to remove
Returns:
ret return code
message message if provided with return code
'''
not_found_values = []
session = persistent_mgr.create_database_session()
if labels or deviceids:
all_devices = False
# get devices based on labels and device ids
if deviceids is not None:
devices, not_found_values = persistent_mgr.get_devices_by_ids(session, deviceids)
elif labels is not None:
devices, not_found_values = persistent_mgr.get_devices_by_labels(session, labels)
elif all_devices:
devices = persistent_mgr.get_all_devices(session)
else:
message = \
("Error: remove_device called without specifying to remove either a label, id or all")
return -1, message
hooks = _load_inventory_device_plugins()
hook_name = 'unknown' # keeps pylint happy
message = None
remove_devices = []
not_remove_devices = []
for device in devices:
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.remove_device_pre_save(device)
except Exception as e:
logging.exception(e)
message += _("Error in plugin (%s). Unable to remove resource: Reason: %s") % (hook_name, e)
not_remove_devices.append(device)
continue
remove_devices.append(device)
ret = 0
if len(remove_devices) > 0:
persistent_mgr.delete_devices(session, remove_devices)
labels_message = get_labels_message(remove_devices)
message = push_message(message, _("devices removed: %s.") % labels_message)
# Call hook for remove_device_post_save
for device in remove_devices:
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.remove_device_post_save(device)
except Exception as e:
logging.exception(e)
message = push_message(message, _("After device was removed. Error in plugin "
"(%s): %s") % (hook_name, e))
if len(not_remove_devices) > 0:
labels_message = get_labels_message(not_remove_devices)
message = | |
<filename>egg/zoo/sum_game/play.py
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import egg.core as core
from egg.core import Callback, Interaction, PrintValidationEvents
from egg.zoo.sum_game.architectures import RecoReceiver, Sender, SenderOracle
from egg.zoo.sum_game.data_readers import SumDataset
# the following section specifies parameters that are specific to our games: we will also inherit the
# standard EGG parameters from https://github.com/facebookresearch/EGG/blob/master/egg/core/util.py
def get_params(params):
parser = argparse.ArgumentParser()
# arguments controlling the game type
parser.add_argument(
"--game_type",
type=str,
default="reco",
help="Selects whether to play a reco(nstruction) or discri(mination) game (default: reco)",
)
# arguments concerning the input data and how they are processed
parser.add_argument(
"--train_data", type=str, default=None, help="Path to the train data"
)
parser.add_argument(
"--validation_data", type=str, default=None, help="Path to the validation data"
)
parser.add_argument(
"--n_range",
type=int,
default=None,
help="Range of the input integer x in [0, N]",
)
parser.add_argument(
"--validation_batch_size",
type=int,
default=0,
help="Batch size when processing validation data, whereas training data batch_size is controlled by batch_size (default: same as training data batch size)",
)
# arguments concerning the training method
parser.add_argument(
"--mode",
type=str,
default="rf",
help="Selects whether Reinforce or Gumbel-Softmax relaxation is used for training {rf, gs} (default: rf)",
)
parser.add_argument(
"--loss",
type=str,
default="mse",
help="Supervision loss: xent or mse",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="GS temperature for the sender, only relevant in Gumbel-Softmax (gs) mode (default: 1.0)",
)
parser.add_argument(
"--sender_entropy_coeff",
type=float,
default=1e-1,
help="Reinforce entropy regularization coefficient for Sender, only relevant in Reinforce (rf) mode (default: 1e-1)",
)
# arguments concerning the agent architectures
parser.add_argument(
"--sender_cell",
type=str,
default="rnn",
help="Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--receiver_cell",
type=str,
default="rnn",
help="Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--sender_hidden",
type=int,
default=10,
help="Size of the hidden layer of Sender (default: 10)",
)
parser.add_argument(
"--receiver_hidden",
type=int,
default=10,
help="Size of the hidden layer of Receiver (default: 10)",
)
parser.add_argument(
"--sender_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds symbols produced at previous step in Sender (default: 10)",
)
parser.add_argument(
"--receiver_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds the message symbols for Receiver (default: 10)",
)
# arguments controlling the script output
parser.add_argument(
"--print_validation_events",
default=False,
action="store_true",
help="If this flag is passed, at the end of training the script prints the input validation data, the corresponding messages produced by the Sender, and the output probabilities produced by the Receiver (default: do not print)",
)
parser.add_argument(
"--analyse",
default=False,
action="store_true",
help="If this flag is passed, analyse output at the end",
)
parser.add_argument(
"--balanced_ce",
type=float,
default=-1,
help="Weight the CE to balance the training between frequent and unfrequent labels. -1 to disable",
)
args = core.init(parser, params)
return args
def main(params):
opts = get_params(params)
if opts.validation_batch_size == 0:
opts.validation_batch_size = opts.batch_size
print(opts, flush=True)
print('Design loss...')
if opts.balanced_ce > 0:
sample_all = [[i+j for j in range(opts.n_range)] for i in range(opts.n_range)]
sample_all = [item for sublist in sample_all for item in sublist]
h, b = np.histogram(sample_all, list(range(opts.n_range*2)))
balance_weights = torch.from_numpy(1/h**opts.balanced_ce).float()
print('Balance weights: ', balance_weights)
def loss(
sender_input, _message, _receiver_input, receiver_output, labels, _aux_input
):
if opts.loss == 'xent':
# in the sum game case, accuracy is computed by comparing the index with highest score in Receiver output (a distribution of unnormalized
# probabilities over target poisitions) and the corresponding label read from input, indicating the ground-truth position of the target
acc = (receiver_output.argmax(dim=1) == labels).detach().float()
# We also compute the absolute difference between predition and target, as an additional metric
dist = (receiver_output.argmax(dim=1) - labels).detach().float().abs()
# similarly, the loss computes cross-entropy between the Receiver-produced target-position probability distribution and the labels
if opts.balanced_ce > 0:
loss = F.cross_entropy(receiver_output, labels, weight=balance_weights.to(labels.device), reduction="none")
else:
loss = F.cross_entropy(receiver_output, labels, reduction="none")
return loss, {"acc": acc, "dist": dist}
# again, see data_readers.py in this directory for the AttValRecoDataset data reading class
print('Building dataset...')
train_loader = DataLoader(
SumDataset(
path=opts.train_data,
n_range=opts.n_range,
),
batch_size=opts.batch_size,
shuffle=True,
num_workers=1,
)
test_loader = DataLoader(
SumDataset(
path=opts.validation_data,
n_range=opts.n_range,
),
batch_size=opts.validation_batch_size,
shuffle=False,
num_workers=1,
)
print('Initialising game...')
# the number of features for the Receiver (input) is given by 2*n_range because
# they are fed concat 1-hot representations of the input vectors
# It is similar for the sender as max sum is N+N=2N
n_features = 2 * opts.n_range
if opts.loss == 'xent':
n_features_rec = n_features - 1
elif opts.loss == 'mse':
n_features_rec = 1
# we define here the core of the receiver for the discriminative game, see the architectures.py file for details
# this will be embedded in a wrapper below to define the full architecture
receiver = RecoReceiver(n_features=n_features_rec, n_hidden=opts.receiver_hidden)
# we are now outside the block that defined game-type-specific aspects of the games: note that the core Sender architecture
# (see architectures.py for details) is shared by the two games (it maps an input vector to a hidden layer that will be use to initialize
# the message-producing RNN): this will also be embedded in a wrapper below to define the full architecture
sender = Sender(n_hidden=opts.sender_hidden, n_features=n_features)
# now, we instantiate the full sender and receiver architectures, and connect them and the loss into a game object
# the implementation differs slightly depending on whether communication is optimized via Gumbel-Softmax ('gs') or Reinforce ('rf', default)
if opts.mode.lower() == "gs":
# in the following lines, we embed the Sender and Receiver architectures into standard EGG wrappers that are appropriate for Gumbel-Softmax optimization
# the Sender wrapper takes the hidden layer produced by the core agent architecture we defined above when processing input, and uses it to initialize
# the RNN that generates the message
sender = core.RnnSenderGS(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
temperature=opts.temperature,
)
# the Receiver wrapper takes the symbol produced by the Sender at each step (more precisely, in Gumbel-Softmax mode, a function of the overall probability
# of non-eos symbols upt to the step is used), maps it to a hidden layer through a RNN, and feeds this hidden layer to the
# core Receiver architecture we defined above (possibly with other Receiver input, as determined by the core architecture) to generate the output
receiver = core.RnnReceiverGS(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnGS(sender, receiver, loss)
# callback functions can be passed to the trainer object (see below) to operate at certain steps of training and validation
# for example, the TemperatureUpdater (defined in callbacks.py in the core directory) will update the Gumbel-Softmax temperature hyperparameter
# after each epoch
callbacks = [core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1)]
else: # NB: any other string than gs will lead to rf training!
# here, the interesting thing to note is that we use the same core architectures we defined above, but now we embed them in wrappers that are suited to
# Reinforce-based optmization
# if opts.max_len>1:
sender = core.RnnSenderReinforce(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
)
receiver = core.RnnReceiverDeterministic(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnReinforce(
sender,
receiver,
loss,
sender_entropy_coeff=opts.sender_entropy_coeff,
receiver_entropy_coeff=0,
)
callbacks = []
# we are almost ready to train: we define here an optimizer calling standard pytorch functionality
optimizer = core.build_optimizer(game.parameters())
# in the following statement, we finally instantiate the trainer object with all the components we defined (the game, the optimizer, the data
# and the callbacks)
if opts.print_validation_events == True:
# we add a callback that will print loss and accuracy after each training and validation pass (see ConsoleLogger in callbacks.py in core directory)
# if requested by the user, we will also print a detailed log of the validation pass after full training: look at PrintValidationEvents in
# language_analysis.py (core directory)
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [
core.ConsoleLogger(print_train_loss=True, as_json=True),
core.PrintValidationEvents(n_epochs=opts.n_epochs),
core.MessageEntropy(print_train = True, is_gumbel = opts.mode.lower() == "gs")
],
)
else:
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [core.ConsoleLogger(print_train_loss=True, as_json=True)],
)
# and | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import collections
import datetime
import json
import pymongo
import sys
import traceback
import types
from . import docs
from girder import events, logger
from girder.constants import SettingKey, TerminalColor
from girder.models.model_base import AccessException, ValidationException
from girder.utility.model_importer import ModelImporter
from girder.utility import config
_importer = ModelImporter()
def _cacheAuthUser(fun):
"""
This decorator for getCurrentUser ensures that the authentication procedure
is only performed once per request, and is cached on the request for
subsequent calls to getCurrentUser().
"""
def inner(self, *args, **kwargs):
if hasattr(cherrypy.request, 'girderUser'):
return cherrypy.request.girderUser
user = fun(self, *args, **kwargs)
if type(user) is tuple:
setattr(cherrypy.request, 'girderUser', user[0])
else:
setattr(cherrypy.request, 'girderUser', user)
return user
return inner
def loadmodel(map, model, plugin='_core', level=None):
"""
This is a meta-decorator that can be used to convert parameters that are
ObjectID's into the actual documents.
"""
_model = _importer.model(model, plugin)
def meta(fun):
def wrapper(self, *args, **kwargs):
for raw, converted in map.iteritems():
if level is not None:
user = self.getCurrentUser()
kwargs[converted] = _model.load(
id=kwargs[raw], level=level, user=user)
else:
kwargs[converted] = _model.load(kwargs[raw])
if kwargs[converted] is None:
raise RestException('Invalid {} id ({}).'
.format(model, kwargs[raw]))
del kwargs[raw]
return fun(self, *args, **kwargs)
return wrapper
return meta
def _createResponse(val):
"""
Helper that encodes the response according to the requested "Accepts"
header from the client. Currently supports "application/json" and
"text/html".
"""
accepts = cherrypy.request.headers.elements('Accept')
for accept in accepts:
if accept.value == 'application/json':
break
elif accept.value == 'text/html': # pragma: no cover
# Pretty-print and HTML-ify the response for the browser
cherrypy.response.headers['Content-Type'] = 'text/html'
resp = json.dumps(val, indent=4, sort_keys=True,
separators=(',', ': '), default=str)
resp = resp.replace(' ', ' ').replace('\n', '<br />')
resp = '<div style="font-family:monospace;">%s</div>' % resp
return resp
# Default behavior will just be normal JSON output. Keep this
# outside of the loop body in case no Accept header is passed.
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(val, default=str)
def endpoint(fun):
"""
REST HTTP method endpoints should use this decorator. It converts the return
value of the underlying method to the appropriate output format and
sets the relevant response headers. It also handles RestExceptions,
which are 400-level exceptions in the REST endpoints, AccessExceptions
resulting from access denial, and also handles any unexpected errors
using 500 status and including a useful traceback in those cases.
If you want a streamed response, simply return a generator function
from the inner method.
"""
def endpointDecorator(self, *args, **kwargs):
try:
val = fun(self, args, kwargs)
if isinstance(val, types.FunctionType):
# If the endpoint returned a function, we assume it's a
# generator function for a streaming response.
cherrypy.response.stream = True
return val()
except RestException as e:
# Handle all user-error exceptions from the rest layer
cherrypy.response.status = e.code
val = {'message': e.message, 'type': 'rest'}
if e.extra is not None:
val['extra'] = e.extra
except AccessException as e:
# Permission exceptions should throw a 401 or 403, depending
# on whether the user is logged in or not
if self.getCurrentUser() is None:
cherrypy.response.status = 401
else:
cherrypy.response.status = 403
logger.exception('403 Error')
val = {'message': e.message, 'type': 'access'}
except ValidationException as e:
cherrypy.response.status = 400
val = {'message': e.message, 'type': 'validation'}
if e.field is not None:
val['field'] = e.field
except cherrypy.HTTPRedirect:
raise
except:
# These are unexpected failures; send a 500 status
logger.exception('500 Error')
cherrypy.response.status = 500
t, value, tb = sys.exc_info()
val = {'message': '%s: %s' % (t.__name__, str(value)),
'type': 'internal'}
curConfig = config.getConfig()
if curConfig['server']['mode'] != 'production':
# Unless we are in production mode, send a traceback too
val['trace'] = traceback.extract_tb(tb)
return _createResponse(val)
return endpointDecorator
class RestException(Exception):
"""
Throw a RestException in the case of any sort of incorrect
request (i.e. user/client error). Login and permission failures
should set a 403 code; almost all other validation errors
should use status 400, which is the default.
"""
def __init__(self, message, code=400, extra=None):
self.code = code
self.extra = extra
Exception.__init__(self, message)
class Resource(ModelImporter):
exposed = True
def route(self, method, route, handler, nodoc=False, resource=None):
"""
Define a route for your REST resource.
:param method: The HTTP method, e.g. 'GET', 'POST', 'PUT'
:type method: str
:param route: The route, as a list of path params relative to the
resource root. Elements of this list starting with ':' are assumed to
be wildcards.
:type route: list
:param handler: The method to be called if the route and method are
matched by a request. Wildcards in the route will be expanded and
passed as kwargs with the same name as the wildcard identifier.
:type handler: function
:param nodoc: If your route intentionally provides no documentation,
set this to True to disable the warning on startup.
:type nodoc: bool
"""
if not hasattr(self, '_routes'):
self._routes = collections.defaultdict(
lambda: collections.defaultdict(list))
# Insertion sort to maintain routes in required order.
def shouldInsert(a, b):
"""
Return bool representing whether route a should go before b. Checks
by comparing each token in order and making sure routes with
literals in forward positions come before routes with wildcards
in those positions.
"""
for i in xrange(0, len(a)):
if a[i][0] != ':' and b[i][0] == ':':
return True
return False
nLengthRoutes = self._routes[method.lower()][len(route)]
for i in xrange(0, len(nLengthRoutes)):
if shouldInsert(route, nLengthRoutes[i][0]):
nLengthRoutes.insert(i, (route, handler))
break
else:
nLengthRoutes.append((route, handler))
# Now handle the api doc if the handler has any attached
if resource is None and hasattr(self, 'resourceName'):
resource = self.resourceName
elif resource is None:
resource = handler.__module__.rsplit('.', 1)[-1]
if hasattr(handler, 'description'):
if handler.description is not None:
docs.addRouteDocs(
resource=resource, route=route, method=method,
info=handler.description.asDict(), handler=handler)
elif not nodoc:
routePath = '/'.join([resource] + list(route))
print TerminalColor.warning(
'WARNING: No description docs present for route {} {}'
.format(method, routePath))
def handleRoute(self, method, path, params):
"""
Match the requested path to its corresponding route, and calls the
handler for that route with the appropriate kwargs. If no route
matches the path requested, throws a RestException.
This method fires two events for each request if a matching route is
found. The names of these events are derived from the route matched by
the request. As an example, if the user calls GET /api/v1/item/123,
the following two events would be fired:
rest.get.item/:id.before
would be fired prior to calling the default API function, and
rest.get.item/:id.after
would be fired after the route handler returns. The query params are
passed in the info of the before and after event handlers as
event.info['params'], and the matched route tokens are passed in
as dict items of event.info, so in the previous example event.info would
also contain an 'id' key with the value of 123. For endpoints with empty
sub-routes, the trailing slash is omitted from the event name, e.g.:
rest.post.group.before
:param method: The HTTP method of the current request.
:type method: str
:param path: The path params of the request.
:type path: list
"""
if not self._routes:
raise Exception('No routes defined for resource')
method = method.lower()
for route, handler in self._routes[method][len(path)]:
kwargs = self._matchRoute(path, route)
if kwargs is not False:
kwargs['params'] = params
# Add before call for the API method. Listeners can return
# their own responses by calling preventDefault() and
# adding a response on the event.
if hasattr(self, 'resourceName'):
resource = self.resourceName
else:
resource = handler.__module__.rsplit('.', 1)[-1]
routeStr = '/'.join((resource, '/'.join(route))).rstrip('/')
eventPrefix = '.'.join(('rest', method, routeStr))
event = events.trigger('.'.join((eventPrefix, 'before')),
kwargs)
if event.defaultPrevented and len(event.responses) > 0:
val = event.responses[0]
else:
val = handler(**kwargs)
# Fire the after-call event that has a chance to augment the
# return value of the API method that was called. You can
# reassign the return value completely by adding a response | |
# -*- coding: utf-8 -*-
# TODO: This thing is a beast! Refactor.
import cmd
import os
import sys
import time
import traceback
from typing import Optional, List, Iterator, Tuple, Dict
from pyramids.categorization import Category
from pyramids.model import Model
from pyramids.rules.parse_rule import ParseRule
try:
from graphviz import Digraph
except ImportError:
Digraph = None
from pyramids.grammar import GrammarParser
from pyramids.trees import Parse, TreeNodeSet, ParseTreeUtils
try:
# noinspection PyPep8Naming
import cProfile as profile
except ImportError:
import profile
from pyramids.batching import Attempt, Result, ModelBatchController, FeedbackReceiver, Failure
from pyramids.config import ModelConfig
from pyramids.loader import ModelLoader
from pyramids.parsing import ParsingAlgorithm
from pyramids.generation import GenerationAlgorithm
from pyramids.sample_utils import Input, Target, SampleSet, SampleUtils
__author__ = '<NAME>'
__all__ = [
'ParserCmd',
'repl',
]
function_to_profile = None
class ParserCmd(cmd.Cmd):
def __init__(self, model_loader: ModelLoader):
cmd.Cmd.__init__(self)
self._model_loader = model_loader
self._model = model_loader.load_model()
self.prompt = '% '
self._simple = True
self._show_broken = False
self._parser_state = None
self._parses = [] # type: List[Parse]
self._whole_parses = 0
self._parse_index = 0
self._fast = False
self._timeout_interval = 5
self._emergency_mode = False
self._benchmark_path = None
self._benchmark: Optional[SampleSet] = None
self._benchmark_dirty = False
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
self._last_input_text = None
self.do_load()
@property
def model(self) -> Model:
return self._model
@property
def model_loader(self) -> ModelLoader:
return self._model_loader
@property
def max_parse_index(self) -> int:
if self._show_broken:
return len(self._parses) - 1 if self._parses else 0
return self._whole_parses - 1 if self._whole_parses else 0
@property
def parses_available(self) -> bool:
return bool(self._parser_state if self._show_broken else self._whole_parses)
@property
def last_input_text(self) -> Optional[str]:
return self._last_input_text
def onecmd(self, line: str) -> Optional[bool]:
# noinspection PyBroadException
try:
return cmd.Cmd.onecmd(self, line)
except Exception:
traceback.print_exc()
def precmd(self, line: str) -> str:
# Pre-processes command lines before they are executed.
line = line.strip()
if not line:
return line
command = line.split()[0]
if command == '+':
return 'good' + line[1:]
if command == '-':
return 'bad' + line[1:]
if command == '++':
return 'best' + line[2:]
if command == '--':
return 'worst' + line[2:]
return line
def postcmd(self, stop: Optional[bool], line: str) -> Optional[bool]:
# Post-processes command results before they are passed back to the
# command interpreter.
print('') # Print a blank line for clarity
return stop
def emptyline(self) -> Optional[bool]:
# Called when the user just hits enter with no input.
return self.do_next()
def default(self, line: str) -> Optional[bool]:
# Called when the command is unrecognized. By default, we assume
# it's a parse request.
return self.do_parse(line)
@staticmethod
def do_shell(line: str) -> None:
# Called when the command starts with "!".
try:
print(eval(line))
except SyntaxError:
exec(line)
def do_quit(self, line: str) -> Optional[bool]:
"""Save scoring features and exit the parser debugger."""
if line:
print("'quit' command does not accept arguments.")
return
self.do_save() # Save what we're doing first.
return True # Indicate we're ready to stop.
def do_exit(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'exit' command does not accept arguments.")
return
return self.do_quit(line)
def do_bye(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'bye' command does not accept arguments.")
return
return self.do_quit(line)
def do_done(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'done' command does not accept arguments.")
return
return self.do_quit(line)
@staticmethod
def do_cls(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
@staticmethod
def do_clear(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
def do_standardize(self, line: str) -> None:
"""Standardizes the parser's files."""
if not line:
if self._model and self._model.config_info:
config_info = self._model.config_info
else:
config_info = self._model_loader.load_model_config()
else:
config_info = ModelConfig(line)
self._model_loader.standardize_model(config_info)
def do_short(self, line: str) -> None:
"""Causes parses to be printed in short form instead of long form."""
if line:
print("'short' command does not accept arguments.")
return
self._simple = True
print("Parses will now be printed in short form.")
def do_broken(self, line: str) -> None:
"""Causes parses that have more pieces or gaps than necessary to be
listed."""
if line:
print("'broken' command does not accept arguments.")
return
self._show_broken = True
print("Parses with more pieces or gaps than necessary will now be listed.")
def do_whole(self, line: str) -> None:
"""Causes only parses that have no more pieces or gaps than necessary to be listed."""
if line:
print("'whole' command does not accept arguments.")
return
self._show_broken = False
self._parse_index = min(self._parse_index, self.max_parse_index)
print("Only parses with no more pieces or gaps than necessary will now be listed.")
def do_long(self, line: str) -> None:
"""Causes parses to be printed in long form instead of short form."""
if line:
print("'long' command does not accept arguments.")
return
self._simple = False
print("Parses will now be printed in long form.")
def do_fast(self, line: str) -> None:
"""Causes parsing to stop as soon as a single parse is found."""
if line:
print("'fast' command does not accept arguments.")
return
self._fast = True
print("Parsing will now stop as soon as a single parse is found.")
def do_complete(self, line: str) -> None:
"""Causes parsing to continue until all parses have been identified."""
if line:
print("'complete' command does not accept arguments.")
return
self._fast = False
print("Parsing will now continue until all parses have been identified.")
def do_load(self, line: str = '') -> None:
"""Save scoring features and load a parser from the given configuration file."""
self.do_save()
if not line:
line = self._model.config_info.config_file_path
if not os.path.isfile(line):
print("File not found: " + line)
return
config_info = ModelConfig(line)
self._model = self._model_loader.load_model(config_info)
self._parser_state = None
self._benchmark = (SampleUtils.load(config_info.benchmark_file)
if os.path.isfile(config_info.benchmark_file)
else None)
self._benchmark_dirty = False
def do_reload(self, line: str = '') -> None:
"""Save scoring features and reload the last configuration file provided."""
if line:
print("'reload' command does not accept arguments.")
return
self.do_save()
self.do_load(self._model.config_info.config_file_path
if self._model and self._model.config_info
else '')
def do_save(self, line: str = '') -> None:
"""Save scoring features."""
if line:
print("'save' command does not accept arguments.")
return
if self._model is not None:
self._model_loader.save_scoring_features(self._model)
if self._benchmark_dirty:
SampleUtils.save(self._benchmark, self._model.config_info.benchmark_file)
self._benchmark_dirty = False
def do_discard(self, line: str = '') -> None:
"""Discard scoring features."""
if line:
print("'discard' command does not accept arguments.")
return
self._model_loader.load_scoring_features(self._model)
config_info = self._model.config_info
if os.path.isfile(config_info.benchmark_file):
self._benchmark = SampleUtils.load(config_info.benchmark_file)
else:
self._benchmark = None
self._benchmark_dirty = False
@staticmethod
def do_compare(line: str) -> None:
"""Compare two categories to determine if either contains the other."""
definitions = [definition for definition in line.split() if definition]
if len(definitions) == 0:
print("Nothing to compare.")
return
if len(definitions) == 1:
print("Nothing to compare with.")
return
categories = set()
for definition in definitions:
categories.add(GrammarParser.parse_category(definition,
offset=line.find(definition) + 1))
categories = sorted(categories, key=str)
for category1 in categories:
for category2 in categories:
if category1 is not category2:
contains_phrase = [" does not contain ", " contains "][category2 in category1]
print(str(category1) + contains_phrase + str(category2))
def do_timeout(self, line: str) -> None:
"""Set (or display) the timeout duration for parsing."""
if not line:
print("Parsing timeout duration is currently " + str(self._timeout_interval) +
" seconds")
return
try:
try:
# Only bother with this because an integer looks prettier
# when printed.
self._timeout_interval = int(line)
except ValueError:
self._timeout_interval = float(line)
except ValueError:
print("Timeout duration could not be set to this value.")
else:
print("Set parsing timeout duration to " + str(self._timeout_interval) + " seconds.")
def _do_parse(self, line: str, timeout: float, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> Tuple[bool, bool, bool]:
if fast is None:
fast = self._fast
if new_parser_state or self._parser_state is None:
self._parser_state = ParsingAlgorithm.new_parser_state(self._model)
parse = ParsingAlgorithm.parse(self._parser_state, line, fast, timeout, emergency)
parse_timed_out = time.time() >= timeout
emergency_disambiguation = False
if restriction_category:
parse = parse.restrict(restriction_category)
self._parses = [disambiguation
for (disambiguation, rank)
in parse.get_sorted_disambiguations(None, None, timeout)]
if not self._parses:
emergency_disambiguation = True
self._parses = [parse.disambiguate()]
disambiguation_timed_out = time.time() >= timeout
self._whole_parses = len([disambiguation
for disambiguation in self._parses
if ((len(disambiguation.parse_trees)
== len(self._parses[0].parse_trees)) and
(disambiguation.total_gap_size()
== self._parses[0].total_gap_size()))])
self._parse_index = 0
self._last_input_text = line
return emergency_disambiguation, parse_timed_out, disambiguation_timed_out
def _handle_parse(self, line: str, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> None:
"""Handle parsing on behalf of do_parse, do_as, and do_extend."""
if not line:
print("Nothing to parse.")
return
start_time = time.time()
timeout = start_time + self._timeout_interval
| |
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '%(levelname)s/%(module)s %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
}
})
import time,os,sched,random,threading,traceback,datetime
import re,base64
import zlib
import requests as r
# import Identicon
# Identicon._crop_coner_round = lambda a,b:a # don't cut corners, please
import mimetypes as mt
from commons import *
from aql import *
wait_for_database_online()
fix_view_loss(None)
from medals import get_medals, get_user_medals
import flask
from flask import Flask, g, abort # session
from flask import render_template, request, send_from_directory, make_response
from api import api_registry, get_categories_info, get_url_to_post, get_url_to_post_given_details
from api import *
from session import save_session,load_session
from app import app
import sb1024_encryption
def route(r):
def rr(f):
app.add_url_rule(r, str(random.random()), f)
return rr
import glob
# hash all resource files see if they change
def hash_these(path_arr, pattern='*.*'):
resource_files_contents = b''
for path in path_arr:
import glob
files = glob.glob(path+pattern)
for fn in files:
print_info('checking file:', fn)
resource_files_contents += readfile(fn)
resource_files_hash = calculate_etag(resource_files_contents)
return resource_files_hash
resource_files_hash = hash_these(['templates/css/', 'templates/js/'])
print_info('resource_files_hash:', resource_files_hash)
images_resources_hash = hash_these(['templates/images/'], '*.png')
print_info('images_resources_hash:', images_resources_hash)
def route_static(frompath, topath, maxage=1800):
@route('/'+frompath+'/<path:path>')
def _(path):
cc = topath+'/'+path
if not os.path.exists(cc):
abort(404, 'File not found')
# return make_response('File not found', 404)
# with open(cc,'rb') as f:
# b = f.read()
b = readfile(cc)
resp = make_response(b, 200)
resp = etag304(resp)
type, encoding = mt.guess_type(cc)
if encoding:
resp.headers['Content-Encoding'] = encoding
if type:
resp.headers['Content-Type'] = type
if maxage!=0:
resp.headers['Cache-Control']= \
f'max-age={str(maxage)}, stale-while-revalidate=86400'
return resp
route_static('static', 'static')
route_static('images', 'templates/images', 3600*12)
route_static('css', 'templates/css', 3600)
route_static('js', 'templates/js', 3600)
route_static('highlight', 'templates/highlight', 3600*6)
route_static('jgawb', 'jgawb', 1800)
route_static('jicpb', 'jicpb', 1800)
@app.route('/favicon.ico')
def favicon():
b = readfile('templates/images/favicon_new_pressed.png')
resp = make_response(b, 200)
resp = etag304(resp)
resp.headers['Content-Type']='image/png'
resp.headers['Cache-Control'] = 'max-age=864000'
return resp
def create_all_necessary_indices():
# create index
def ci(coll,aa):
for a in aa:
print('creating index on',coll,a)
aqlc.create_index(coll, type='persistent', fields=a,
unique=False,sparse=False)
# create index with unique=True
def ciut(coll, a):
for ai in a:
print('creating index on',coll,[ai])
aqlc.create_index(coll, type='persistent', fields=[ai], unique=True, sparse=False)
ciut('threads', ['tid'])
ciut('threads', ['bigcats','pinned'])
ciut('threads', ['cid','pinned'])
ci('threads', indexgen(
[['delete'],['uid'],['delete','cid'],['delete','bigcats[*]'],['delete','tags[*]'],['tags[*]']],
['t_u','t_c','nreplies','vc','votes','t_hn','amv','nfavs'],
))
ci('threads', indexgen([[]], ['t_hn_u','t_next_hn_update','title','pinned']))
ci('posts', indexgen(
[['tid'],['uid'],['tid','delete']],
['t_c','vc','votes','nfavs','t_hn'],
))
ciut('categories', ['cid'])
ciut('users',['uid'])
ci('users',[['invitation'],['t_next_pr_update']])
ci('users',indexgen(
[[],['delete']],
['t_c','nposts','nthreads','nlikes','nliked','name','pagerank']
))
ci('invitations',indexgen(
[['uid','active'],['uid']],
['t_c'],
))
ci('votes',indexgen(
[
['type','id','vote','uid'],
['type','id','uid'],
['type','id','vote'],
['uid','vote'],
['to_uid','vote'],
],
['t_c'],
))
ci('conversations',[['convid']])
ci('conversations',indexgen(
[['uid'],['to_uid'],['uid','to_uid']],
['t_u'],
))
ci('messages',indexgen([['convid'],['to_uid'],[]],['t_c']))
ci('notifications',indexgen([['to_uid'],['to_uid','from_uid','why','url']],['t_c']))
ci('avatars',[['uid']])
ci('admins',[['name']])
ci('aliases',[['is','name'],['name','is']])
ci('operations',indexgen([['target'],[]], ['t_c']))
ci('followings', indexgen([['uid','follow'],['to_uid','follow']],['t_c']))
ci('favorites', indexgen([['uid']], ['t_c','pointer']))
ci('polls', [['t_c']])
ci('poll_votes', [['pollid', 'uid'],['pollid', 'choice']])
ci('blacklist',[['uid','to_uid'],['uid','enabled'],['to_uid','enabled']])
ci('comments',indexgen([[],['parent'],['parent','deleted'],['uid']],['t_c']))
is_integer = lambda i:isinstance(i, int)
is_string = lambda i:isinstance(i, str)
class Paginator:
def __init__(self,):
pass
def get_user_list(self,
sortby='uid',
order='desc',
pagesize=50,
pagenumber=1,
path=''):
assert sortby in ['t_c','uid','nthreads','nposts','nlikes','nliked','name','pagerank'] # future can have more.
# sortby = 't_c'
assert order in ['desc', 'asc']
pagenumber = max(1, pagenumber)
start = (pagenumber-1)*pagesize
count = pagesize
mode = 'user'
querystring_complex = '''
for u in users
sort u.{sortby} {order}
limit {start},{count}
//let stat = {{
// nthreads:length(for t in threads filter t.uid==u.uid return t),
// nposts:length(for p in posts filter p.uid==u.uid return p),
//}}
let invite = (for i in invitations filter i._key==u.invitation return i)[0]
let invited_by = invite.uid
let ip_addr = invite.ip_addr
let salt = invite.salt
return merge(u, {{invited_by, ip_addr, salt}}) //merge(u, stat)
'''.format(sortby=sortby, order=order,
start=start, count=count,)
querystring_simple = 'return length(for u in users return 1)'
num_users = aql(querystring_simple, silent=True)[0]
userlist = aql(querystring_complex, silent=True)
pagination_obj = self.get_pagination_obj(num_users, pagenumber, pagesize, order, path, sortby, mode=mode)
for u in userlist:
userfill(u)
# u['profile_string'] = u['name']
return userlist, pagination_obj
def get_post_one(self, pid):
pl = self.get_post_list(by='ids', ids=['posts/'+str(pid)])
if pl:
return pl[0]
else:
return False
def get_post_list(self,
by='thread',
tid=0,
uid=0,
sortby='t_c',
order='desc',
pagesize=50,
pagenumber=1,
path='',
mode='',
apply_origin=False,
ids=[]):
assert by in ['thread', 'user','all', 'ids']
assert is_integer(tid)
assert is_integer(uid)
assert sortby in ['t_c','votes','nfavs','t_hn']
# sortby = 't_c'
assert order in ['desc', 'asc']
assert isinstance(ids, list)
pagenumber = max(1, pagenumber)
start = (pagenumber-1)*pagesize
count = pagesize
qsc = querystring_complex = QueryString('for i in posts')
filter = QueryString()
if by=='thread':
filter.append('filter i.tid == @tid', tid=tid)
if mode=='question':
mode='post_q'
else:
mode='post'
elif by=='user': # filter by user
filter.append('filter i.uid == @uid', uid=uid)
mode='user_post'
elif by=='all':
filter.append('')
mode='all_post'
elif by=='ids':
qsc = QueryString('for id in @ids let i = document(id)', ids=ids)
selfuid = g.selfuid
qsc+=filter
qsc+=QueryString('''
let user = (for u in users filter u.uid==i.uid return u)[0]
let self_voted = length(for v in votes filter v.uid==@selfuid and v.id==to_number(i._key) and v.type=='post' and v.vote==1 return v)
let favorited = length(for f in favorites
filter f.uid==@selfuid and f.pointer==i._id return f)
let ncomments = length(for c in comments filter c.parent==i._id
return c)
let comments = (for c in comments filter c.parent==i._id
sort c.t_c desc
let uc = (for j in users filter j.uid==c.uid return j)[0]
limit 6 return merge(c, {user:uc}))
''', selfuid=selfuid)
if apply_origin:
qsc+=QueryString('''
let t = unset((for t in threads filter t.tid==i.tid return t)[0],'content')
'''
)
else:
qsc+=QueryString('let t = null')
if by!='ids':
qsc+=QueryString('''
sort i.{sortby} {order}
limit {start},{count}
return merge(i, {{user, self_voted, t,
favorited, ncomments, comments}})
'''.format(
sortby = sortby,order=order,start=start,count=count,
)
)
qss = querystring_simple = QueryString('return length(for i in posts')\
+ filter + QueryString('return i)')
count = aql(qss.s, silent=True, **qss.kw)[0]
postlist = aql(qsc.s, silent=True, **qsc.kw)
# uncomment if you want floor number in final output.
# for idx, p in enumerate(postlist):
# p['floor_num'] = idx + start + 1
pagination_obj = self.get_pagination_obj(count, pagenumber, pagesize, order, path, sortby, mode=mode)
remove_duplicate_brief(postlist)
mark_blacklisted(postlist)
postlist = sink_deleted(postlist)
return postlist, pagination_obj
else:
qsc+=QueryString('''
return merge(i, {user, self_voted, t, favorited})
''')
postlist = aql(qsc.s, silent=True, **qsc.kw)
remove_duplicate_brief(postlist)
return postlist
@stale_cache(maxsize=128, ttr=3, ttl=30)
def get_thread_list(self, *a, **k):
return self.get_thread_list_uncached(*a, **k)
def get_thread_list_uncached(self,
by='category',
category='all',
tagname='yadda',
uid=0,
sortby='t_u',
order='desc',
pagesize=50,
pagenumber=1,
path='',
ids=[]):
ts = time.time()
assert by in ['category', 'user', 'tag', 'ids']
assert is_string(category) or is_integer(category)
assert is_integer(uid)
assert sortby in ['t_u', 't_c', 'nreplies', 'vc', 'votes','t_hn','amv','nfavs']
assert order in ['desc', 'asc']
assert re.fullmatch(tagname_regex_long, tagname)
pagenumber = max(1, pagenumber)
assert pagesize<=50
start = (pagenumber-1)*pagesize
count = pagesize
qsc = querystring_complex = QueryString('''
for i in threads
''')
filter = QueryString()
if by=='category':
if category=='all':
filter.append('filter i.delete==null')
elif category=='deleted':
filter.append('filter i.delete==true')
elif is_integer(category):
filter.append('filter i.cid == @category and i.delete==null', category=category)
else:
# string bigcats
filter.append(
'filter i.delete==null and @category in i.bigcats',
category=category
)
mode='thread' if category!=4 else 'thread_water'
elif by=='tag':
filter.append('filter @tagname in i.tags and i.delete==null', tagname=tagname)
mode='tag_thread'
elif by=='user': # filter by user
filter.append('filter i.uid==@iuid', iuid=uid)
mode='user_thread'
elif by=='ids':
qsc = QueryString('''
for id in @ids let i = document(id)
''', ids = ids)
qsc += filter
qsc.append('''
sort i.{sortby} {order}
limit {start},{count}
'''.format(
sortby = sortby,
order = order,
start = start,
count = count,
)
)
qsc.append('''
let user = (for u in users filter u.uid == i.uid return u)[0]
let count = i.nreplies
let fin = (for p in posts filter p.tid == i.tid sort p.t_c desc limit 1 return p)[0]
let ufin = (for j in users filter j.uid == fin.uid return j)[0]
let c = (for c in categories filter c.cid==i.cid return c)[0]
//let mvu = ((i.mvu and i.mv>2) ?(for u in users filter u.uid == i.mvu return u)[0]: null)
''')
if by=='ids':
qsc.append('''
let favorited = length(for f in favorites
filter f.uid==@selfuid and f.pointer==i._id return f)
let self_voted = length(for v in votes filter v.uid==@selfuid and v.id==to_number(i.tid) and v.type=='thread' and v.vote==1 return v)
''', selfuid=g.selfuid)
qsc.append('''
return merge(i, {user:user, last:unset(fin,'content'), lastuser:ufin, cname:c.name, count:count,
favorited, self_voted})
''')
threadlist = aql(qsc.s, silent=True, **qsc.kw)
return threadlist
else:
qsc.append('''
return merge(i, {user:user, last:unset(fin,'content'), lastuser:ufin, cname:c.name, count:count})
''')
qss = querystring_simple = \
QueryString('return length(for i in threads')\
+ filter\
+ QueryString('return i)')
count = aql(qss.s, silent=True, **qss.kw)[0]
# print('done',time.time()-ts);ts=time.time()
threadlist = aql(qsc.s, silent=True, **qsc.kw)
# print('done',time.time()-ts);ts=time.time()
pagination_obj = self.get_pagination_obj(count, pagenumber, pagesize, order, path, sortby, mode)
for t in threadlist:
if 'content' in t:
tc = t['content']
ytb_videos = extract_ytb(tc)
t['youtube'] = ytb_videos[0] if len(ytb_videos) else None
t['content'] = None
remove_duplicate_brief(threadlist)
return threadlist, pagination_obj
@ttl_cache(maxsize=4096, ttl=120)
def get_pagination_obj(self,
count, pagenumber, pagesize, order, path, sortby,
mode='thread', postfix='', default_pagesize=None):
# total number of pages
total_pages = max(1, (count-1) // pagesize +1)
if total_pages > 1:
# list of surrounding numbers
slots = [pagenumber]
for i in range(1,9):
if len(slots)>=9:
break
if pagenumber+i <= total_pages:
slots.append(pagenumber+i)
if len(slots)>=9:
break
if pagenumber-i >= 1:
slots.insert(0, pagenumber-i)
# first and last numbers
slots[0] = 1
slots[-1]=total_pages
# second first and second | |
# the node in the nodal
# equivalence table, and DOF
# is the DOF reference
# number.
#
# --- dp 1 nrf Reaction forces. The force
# values are ordered
# according to the DOF order
# shown above in the DOF
# number reference table.
# pointer to result reaction forces
rnum = self.parse_step_substep(rnum)
rpointers = self._resultheader['rpointers']
ptr = rpointers[rnum] + self._solution_header(rnum)['ptrRF']
# table is always ANSYS LONG (INT64)
table, bufsz = self.read_record(ptr, True)
table = table.view(np.int64)
solution_header = self._result_solution_header(rnum)
numdof = solution_header['numdof']
rforces = self.read_record(ptr + bufsz)[:table.size]
shifted_table = (table - 1) / numdof
index = np.array(shifted_table, np.int64)
dof = np.round(1 + numdof*(shifted_table - index)).astype(np.int32)
if sort:
sidx = np.argsort(shifted_table)
index = index[sidx]
dof = dof[sidx]
rforces = rforces[sidx]
return rforces, index, dof
def plot_element_result(self, rnum, result_type, item_index,
in_element_coord_sys=False, **kwargs):
"""Plot an element result.
Parameters
----------
rnum : int
Result number.
result_type : str
Element data type to retreive.
- EMS: misc. data
- ENF: nodal forces
- ENS: nodal stresses
- ENG: volume and energies
- EGR: nodal gradients
- EEL: elastic strains
- EPL: plastic strains
- ECR: creep strains
- ETH: thermal strains
- EUL: euler angles
- EFX: nodal fluxes
- ELF: local forces
- EMN: misc. non-sum values
- ECD: element current densities
- ENL: nodal nonlinear data
- EHC: calculated heat generations
- EPT: element temperatures
- ESF: element surface stresses
- EDI: diffusion strains
- ETB: ETABLE items
- ECT: contact data
- EXY: integration point locations
- EBA: back stresses
- ESV: state variables
- MNL: material nonlinear record
item_index : int
Index of the data item for each node within the element.
in_element_coord_sys : bool, optional
Returns the results in the element coordinate system.
Default False and will return the results in the global
coordinate system.
Returns
-------
nnum : np.ndarray
ANSYS node numbers
result : np.ndarray
Array of result data
"""
# check result exists
result_type = result_type.upper()
if not self.available_results[result_type]:
raise ValueError('Result %s is not available in this result file'
% result_type)
if result_type not in ELEMENT_INDEX_TABLE_KEYS:
raise ValueError('Result "%s" is not an element result' % result_type)
bsurf = self._extract_surface_element_result(rnum,
result_type,
item_index,
in_element_coord_sys)
desc = self.available_results.description[result_type].capitalize()
kwargs.setdefault('stitle', desc)
return bsurf.plot(scalars='_scalars', **kwargs)
def _extract_surface_element_result(self, rnum, result_type, item_index,
in_element_coord_sys):
"""Return the surface of the grid with the active scalars
being the element result"""
# element header
rnum = self.parse_step_substep(rnum)
ele_ind_table, nodstr, etype, ptr_off = self._element_solution_header(rnum)
# the number of items per node
nitem = self._result_nitem(rnum, result_type)
if item_index > nitem - 1:
raise ValueError('Item index greater than the number of items in '
'this result type %s' % result_type)
# extract the surface and separate the surface faces
# TODO: add element/node components
surf = self.grid.extract_surface()
bsurf = break_apart_surface(surf, force_linear=True)
nnum_surf = surf.point_arrays['ansys_node_num'][bsurf['orig_ind']]
faces = bsurf.faces
if faces.dtype != np.int64:
faces = faces.astype(np.int64)
elem_ind = surf.cell_arrays['vtkOriginalCellIds']
# index within the element table pointing to the data of interest
result_index = ELEMENT_INDEX_TABLE_KEYS.index(result_type)
data = populate_surface_element_result(self.filename,
ele_ind_table,
nodstr,
etype,
nitem,
ptr_off, # start of result data
result_index,
bsurf.n_points,
faces,
bsurf.n_faces,
nnum_surf,
elem_ind,
self._mesh._elem,
self._mesh._elem_off,
item_index,
as_global=not in_element_coord_sys)
bsurf['_scalars'] = data
return bsurf
def _result_solution_header(self, rnum):
"""Return the solution header for a given cumulative result index"""
ptr = self._resultheader['rpointers'][rnum]
return parse_header(self.read_record(ptr), solution_data_header_keys)
def nodal_stress(self, rnum):
"""Retrieves the component stresses for each node in the
solution.
The order of the results corresponds to the sorted node
numbering.
Computes the nodal stress by averaging the stress for each
element at each node. Due to the discontinuities across
elements, stresses will vary based on the element they are
evaluated from.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
stress : numpy.ndarray
Stresses at X, Y, Z, XY, YZ, and XZ averaged at each corner
node.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, stress = rst.nodal_stress(0)
Notes
-----
Nodes without a stress value will be NAN.
Equivalent ANSYS command: PRNSOL, S
"""
return self._nodal_result(rnum, 'ENS')
def cylindrical_nodal_stress(self, rnum):
"""Retrieves the stresses for each node in the solution in the
cylindrical coordinate system as the following values:
``R``, ``THETA``, ``Z``, ``RTHETA``, ``THETAZ``, and ``RZ``
The order of the results corresponds to the sorted node
numbering.
Computes the nodal stress by averaging the stress for each
element at each node. Due to the discontinuities across
elements, stresses will vary based on the element they are
evaluated from.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
stress : numpy.ndarray
Stresses at R, THETA, Z, RTHETA, THETAZ, and RZ averaged
at each corner node where R is radial.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, stress = rst.cylindrical_nodal_stress(0)
Notes
-----
Nodes without a stress value will be NAN.
Equivalent ANSYS commands:
RSYS, 1
PRNSOL, S
"""
nnum, stress = self._nodal_result(rnum, 'ENS')
# angles relative to the XZ plane
if nnum.size != self._mesh.nodes.shape[0]:
mask = np.in1d(nnum, self._mesh.nnum)
angle = np.arctan2(self._mesh.nodes[mask, 1],
self._mesh.nodes[mask, 0])
else:
angle = np.arctan2(self._mesh.nodes[:, 1],
self._mesh.nodes[:, 0])
_binary_reader.euler_cart_to_cyl(stress, angle) # mod stress inplace
return nnum, stress
def nodal_temperature(self, rnum, **kwargs):
"""Retrieves the temperature for each node in the
solution.
The order of the results corresponds to the sorted node
numbering.
Equivalent MAPDL command: PRNSOL, TEMP
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
temperature : numpy.ndarray
Temperature at each node.
Examples
--------
>>> import pyansys
>>> rst = pyansys.read_binary('file.rst')
>>> nnum, stress = rst.nodal_temperature(0)
"""
if self._is_thermal:
nnum, temp = self.nodal_solution(rnum)
else:
nnum, temp = self._nodal_result(rnum, 'EPT')
temp = temp.ravel()
return nnum, temp
def plot_cylindrical_nodal_stress(self, rnum, comp=None, show_displacement=False,
displacement_factor=1, node_components=None,
element_components=None, sel_type_all=True,
**kwargs):
"""Plot nodal_stress in the cylindrical coordinate system.
Parameters
----------
rnum : int
Result number
comp : str, optional
Stress component to display. Available options:
- ``"R"``
- ``"THETA"``
- ``"Z"``
- ``"RTHETA"``
- ``"THETAZ"``
- ``"RZ"``
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default True.
**kwargs : keyword arguments
Optional keyword arguments. See help(pyvista.plot)
Examples
--------
Plot nodal stress in the radial direction
>>> import pyansys
>>> result = pyansys.read_binary('file.rst')
>>> result.plot_cylindrical_nodal_stress(0, 'R')
"""
available_comps = ['R', 'THETA', 'Z', 'RTHETA', 'THETAZ', 'RZ']
idx = check_comp(available_comps, comp)
_, scalars = self.cylindrical_nodal_stress(rnum)
scalars = scalars[:, idx]
grid = self.grid
if node_components:
grid, ind = self._extract_node_components(node_components, sel_type_all)
scalars = scalars[ind]
elif element_components:
grid, ind = self._extract_element_components(element_components)
scalars = scalars[ind]
kwargs.setdefault('stitle', '%s Cylindrical\nNodal Stress' % comp)
return self._plot_point_scalars(scalars, grid=grid, rnum=rnum,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
**kwargs)
def plot_nodal_temperature(self, rnum, show_displacement=False,
displacement_factor=1, node_components=None,
element_components=None, sel_type_all=True,
**kwargs):
"""Plot nodal temperature
Parameters
----------
rnum : int
Result number
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all | |
= self._content_parsers.GetData( only_selected = True )
for content_parser in content_parsers:
with ClientGUITopLevelWindows.DialogEdit( self, 'edit content parser', frame_key = 'deeply_nested_dialog' ) as dlg:
test_context = self._test_context_callable()
panel = EditContentParserPanel( dlg, content_parser, test_context )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
edited_content_parser = panel.GetValue()
self._content_parsers.DeleteDatas( ( content_parser, ) )
HydrusSerialisable.SetNonDupeName( edited_content_parser, self._GetExistingNames() )
self._content_parsers.AddDatas( ( edited_content_parser, ) )
else:
break
self._content_parsers.Sort()
def _GetExistingNames( self ):
names = { content_parser.GetName() for content_parser in self._content_parsers.GetData() }
return names
def GetData( self ):
return self._content_parsers.GetData()
def AddDatas( self, content_parsers ):
self._content_parsers.AddDatas( content_parsers )
self._content_parsers.Sort()
class EditNodes( wx.Panel ):
def __init__( self, parent, nodes, referral_url_callable, example_data_callable ):
wx.Panel.__init__( self, parent )
self._referral_url_callable = referral_url_callable
self._example_data_callable = example_data_callable
self._nodes = ClientGUIListCtrl.SaneListCtrlForSingleObject( self, 200, [ ( 'name', 120 ), ( 'node type', 80 ), ( 'produces', -1 ) ], delete_key_callback = self.Delete, activation_callback = self.Edit )
menu_items = []
menu_items.append( ( 'normal', 'content node', 'A node that parses the given data for content.', self.AddContentNode ) )
menu_items.append( ( 'normal', 'link node', 'A node that parses the given data for a link, which it then pursues.', self.AddLinkNode ) )
self._add_button = ClientGUICommon.MenuButton( self, 'add', menu_items )
self._copy_button = ClientGUICommon.BetterButton( self, 'copy', self.Copy )
self._paste_button = ClientGUICommon.BetterButton( self, 'paste', self.Paste )
self._duplicate_button = ClientGUICommon.BetterButton( self, 'duplicate', self.Duplicate )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self.Edit )
self._delete_button = ClientGUICommon.BetterButton( self, 'delete', self.Delete )
#
for node in nodes:
( display_tuple, sort_tuple ) = self._ConvertNodeToTuples( node )
self._nodes.Append( display_tuple, sort_tuple, node )
#
vbox = wx.BoxSizer( wx.VERTICAL )
button_hbox = wx.BoxSizer( wx.HORIZONTAL )
button_hbox.Add( self._add_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._copy_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._paste_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._duplicate_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._edit_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._delete_button, CC.FLAGS_VCENTER )
vbox.Add( self._nodes, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( button_hbox, CC.FLAGS_BUTTON_SIZER )
self.SetSizer( vbox )
def _ConvertNodeToTuples( self, node ):
( name, node_type, produces ) = node.ToPrettyStrings()
return ( ( name, node_type, produces ), ( name, node_type, produces ) )
def _GetExportObject( self ):
to_export = HydrusSerialisable.SerialisableList()
for node in self._nodes.GetObjects( only_selected = True ):
to_export.append( node )
if len( to_export ) == 0:
return None
elif len( to_export ) == 1:
return to_export[0]
else:
return to_export
def _ImportObject( self, obj ):
if isinstance( obj, HydrusSerialisable.SerialisableList ):
for sub_obj in obj:
self._ImportObject( sub_obj )
else:
if isinstance( obj, ( ClientParsing.ContentParser, ClientParsing.ParseNodeContentLink ) ):
node = obj
( display_tuple, sort_tuple ) = self._ConvertNodeToTuples( node )
self._nodes.Append( display_tuple, sort_tuple, node )
else:
wx.MessageBox( 'That was not a script--it was a: ' + type( obj ).__name__ )
def AddContentNode( self ):
dlg_title = 'edit content node'
empty_node = ClientParsing.ContentParser()
panel_class = EditContentParserPanel
self.AddNode( dlg_title, empty_node, panel_class )
def AddLinkNode( self ):
dlg_title = 'edit link node'
empty_node = ClientParsing.ParseNodeContentLink()
panel_class = EditParseNodeContentLinkPanel
self.AddNode( dlg_title, empty_node, panel_class )
def AddNode( self, dlg_title, empty_node, panel_class ):
with ClientGUITopLevelWindows.DialogEdit( self, dlg_title, frame_key = 'deeply_nested_dialog' ) as dlg_edit:
referral_url = self._referral_url_callable()
example_data = self._example_data_callable()
if isinstance( empty_node, ClientParsing.ContentParser ):
panel = panel_class( dlg_edit, empty_node, ( {}, example_data ) )
else:
panel = panel_class( dlg_edit, empty_node, referral_url, example_data )
dlg_edit.SetPanel( panel )
if dlg_edit.ShowModal() == wx.ID_OK:
new_node = panel.GetValue()
( display_tuple, sort_tuple ) = self._ConvertNodeToTuples( new_node )
self._nodes.Append( display_tuple, sort_tuple, new_node )
def Copy( self ):
export_object = self._GetExportObject()
if export_object is not None:
json = export_object.DumpToString()
HG.client_controller.pub( 'clipboard', 'text', json )
def Delete( self ):
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._nodes.RemoveAllSelected()
def Duplicate( self ):
nodes_to_dupe = self._nodes.GetObjects( only_selected = True )
for node in nodes_to_dupe:
dupe_node = node.Duplicate()
( display_tuple, sort_tuple ) = self._ConvertNodeToTuples( dupe_node )
self._nodes.Append( display_tuple, sort_tuple, dupe_node )
def Edit( self ):
for i in self._nodes.GetAllSelected():
node = self._nodes.GetObject( i )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit node', frame_key = 'deeply_nested_dialog' ) as dlg:
referral_url = self._referral_url_callable()
example_data = self._example_data_callable()
if isinstance( node, ClientParsing.ContentParser ):
panel = EditContentParserPanel( dlg, node, ( {}, example_data ) )
elif isinstance( node, ClientParsing.ParseNodeContentLink ):
panel = EditParseNodeContentLinkPanel( dlg, node, example_data = example_data )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
edited_node = panel.GetValue()
( display_tuple, sort_tuple ) = self._ConvertNodeToTuples( edited_node )
self._nodes.UpdateRow( i, display_tuple, sort_tuple, edited_node )
def GetValue( self ):
return self._nodes.GetObjects()
def Paste( self ):
raw_text = HG.client_controller.GetClipboardText()
try:
obj = HydrusSerialisable.CreateFromString( raw_text )
self._ImportObject( obj )
except:
wx.MessageBox( 'I could not understand what was in the clipboard' )
class EditParseNodeContentLinkPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, node, referral_url = None, example_data = None ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
if referral_url is None:
referral_url = 'test-url.com/test_query'
self._referral_url = referral_url
if example_data is None:
example_data = ''
self._my_example_url = None
notebook = wx.Notebook( self )
( name, formula, children ) = node.ToTuple()
#
edit_panel = wx.Panel( notebook )
edit_panel.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_FRAMEBK ) )
self._name = wx.TextCtrl( edit_panel )
get_example_parsing_context = lambda: {}
self._formula = EditFormulaPanel( edit_panel, formula, self.GetTestContext )
children_panel = ClientGUICommon.StaticBox( edit_panel, 'content parsing children' )
self._children = EditNodes( children_panel, children, self.GetExampleURL, self.GetExampleData )
#
test_panel = wx.Panel( notebook )
test_panel.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_FRAMEBK ) )
self._example_data = ClientGUICommon.SaneMultilineTextCtrl( test_panel )
self._example_data.SetMinSize( ( -1, 200 ) )
self._example_data.SetValue( example_data )
self._test_parse = wx.Button( test_panel, label = 'test parse' )
self._test_parse.Bind( wx.EVT_BUTTON, self.EventTestParse )
self._results = ClientGUICommon.SaneMultilineTextCtrl( test_panel )
self._results.SetMinSize( ( -1, 200 ) )
self._test_fetch_result = wx.Button( test_panel, label = 'try fetching the first result' )
self._test_fetch_result.Bind( wx.EVT_BUTTON, self.EventTestFetchResult )
self._test_fetch_result.Disable()
self._my_example_data = ClientGUICommon.SaneMultilineTextCtrl( test_panel )
#
info_panel = wx.Panel( notebook )
message = '''This node looks for one or more urls in the data it is given, requests each in turn, and gives the results to its children for further parsing.
If your previous query result responds with links to where the actual content is, use this node to bridge the gap.
The formula should attempt to parse full or relative urls. If the url is relative (like href="/page/123"), it will be appended to the referral url given by this node's parent. It will then attempt to GET them all.'''
info_st = wx.StaticText( info_panel, label = message )
info_st.Wrap( 400 )
#
self._name.SetValue( name )
#
children_panel.Add( self._children, CC.FLAGS_EXPAND_BOTH_WAYS )
#
vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'name or description (optional): ', self._name ) )
gridbox = ClientGUICommon.WrapInGrid( edit_panel, rows )
vbox.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( self._formula, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( children_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
edit_panel.SetSizer( vbox )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._example_data, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._test_parse, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._results, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._test_fetch_result, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._my_example_data, CC.FLAGS_EXPAND_BOTH_WAYS )
test_panel.SetSizer( vbox )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( info_st, CC.FLAGS_EXPAND_BOTH_WAYS )
| |
#import cv
import cv2
import math
import numpy as np
from numpy import linalg as npla
import sys
import scipy.interpolate
import scipy.sparse
import weave
import unittest
import common
import config
def CompareMatricesWithNanElements(M1, M2):
assert M1.shape == M2.shape;
assert M1.ndim == 2;
for r in range(M1.shape[0]):
for c in range(M2.shape[0]):
if np.isnan(M1[r, c]):
if not np.isnan(M2[r, c]):
return False;
else:
if M1[r, c] != M2[r, c]:
return False;
return True;
def ConvertCvMatToNPArray(cvmat):
m = [];
for r in range(cvmat.rows):
mR = [cvmat[r, c] for c in range(cvmat.cols)];
m.append(mR);
return np.array(m);
def Repr3DMatrix(m):
assert m.ndim == 3;
res = "";
for i in range(m.shape[2]):
res += ("\n[:, :, %d] = " % i) + str(m[:, :, i]); # + "\n";
return res;
"""
From http://www.mathworks.com/help/matlab/matlab_prog/symbol-reference.html:
Dot-Dot-Dot (Ellipsis) - ...
A series of three consecutive periods (...) is the line continuation operator in MATLAB.
Line Continuation
Continue any MATLAB command or expression by placing an ellipsis at the end of the line to be continued:
"""
def fix(x):
"""
From http://www.mathworks.com/help/matlab/ref/fix.html
fix
Round toward zero
Syntax:
B = fix(A)
Description:
B = fix(A) rounds the elements of A toward zero,
resulting in an array of integers.
For complex A, the imaginary and real parts are
rounded independently.
Examples:
a = [-1.9, -0.2, 3.4, 5.6, 7.0, 2.4+3.6i]
a =
Columns 1 through 4
-1.9000 -0.2000 3.4000 5.6000
Columns 5 through 6
7.0000 2.4000 + 3.6000i
fix(a)
ans =
Columns 1 through 4
-1.0000 0 3.0000 5.0000
Columns 5 through 6
7.0000 2.0000 + 3.0000i
"""
if x < 0:
return math.ceil(x);
else:
return math.floor(x);
# eps() is used by fspecial().
def eps(val=1.0):
"""
Following http://wiki.scipy.org/NumPy_for_Matlab_Users,
eps is equivalent to spacing(1).
Note: Matlab's double precision is numpy's float64.
"""
"""
From NumPy help (see also
http://docs.scipy.org/doc/numpy/reference/generated/numpy.finfo.html)
>>> np.info(np.spacing)
spacing(x[, out])
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
"""
"""
From http://www.mathworks.com/help/matlab/ref/eps.html
"""
epsRes = np.spacing(val);
return epsRes;
def max(A):
"""
From http://www.mathworks.com/help/matlab/ref/max.html:
C = max(A) returns the largest elements along different dimensions of an array.
If A is a vector, max(A) returns the largest element in A.
[C,I] = max(...) finds the indices of the maximum values of A,
and returns them in output vector I.
If there are several identical maximum values,
the index of the first one found is returned.
"""
assert A.ndim == 1;
for i in range(A.shape[0]):
if np.isnan(A[i]):
A[i] = -1.0e-300;
C = np.max(A);
# We find now the index(indices) of C in A
I = np.nonzero(A == C)[0];
if False:
common.DebugPrint("MatlabMax(): a = %s" % str(A));
common.DebugPrint("MatlabMax(): C = %s" % str(C));
common.DebugPrint("MatlabMax(): I.shape = %s" % str(I.shape));
# We want only 1 element, so we make the index also an int
I = I[0];
return C, I;
def fliplr(M):
#fliplr(M);
return M[:, ::-1];
"""
We convert a tuple of (2 or 3) array indices (or array or indices) into a
linear (scalar) index (respectively, array of linear indice)
"""
def sub2ind(matrixSize, rowSub, colSub, dim3Sub=None):
"""
Note that this is a limited implementation of Matlab's sub2ind,
in the sense we support only 2 and 3 dimensions.
BUT it is easy to generalize it.
"""
assert (len(matrixSize) == 2) or (len(matrixSize) == 3);
"""
Inspired from https://stackoverflow.com/questions/15230179/how-to-get-the-linear-index-for-a-numpy-array-sub2ind
(see also http://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel_multi_index.html)
From Matlab help of sub2ind (http://www.mathworks.com/help/matlab/ref/sub2ind.html):
Convert subscripts to linear indices
Syntax
linearInd = sub2ind(matrixSize, rowSub, colSub)
linearInd = sub2ind(arraySize, dim1Sub, dim2Sub, dim3Sub, ...)
# Determines whether the multi-index should be viewed as indexing in
# C (row-major) order or FORTRAN (column-major) order.
"""
#return np.ravel_multi_index((rowSub - 1, colSub - 1), dims=matrixSize, order="F");
if dim3Sub == None:
res = np.ravel_multi_index((rowSub, colSub), dims=matrixSize, order="F");
else:
res = np.ravel_multi_index((rowSub, colSub, dim3Sub), dims=matrixSize, order="F");
return res;
def find(X):
"""
find Find indices of nonzero elements.
I = find(X) returns the linear indices corresponding to
the nonzero entries of the array X. X may be a logical expression.
Use IND2SUB(SIZE(X),I) to calculate multiple subscripts from
the linear indices I.
I = find(X,K) returns at most the first K indices corresponding to
the nonzero entries of the array X. K must be a positive integer,
but can be of any numeric type.
I = find(X,K,'first') is the same as I = find(X,K).
I = find(X,K,'last') returns at most the last K indices corresponding
to the nonzero entries of the array X.
[I,J] = find(X,...) returns the row and column indices instead of
linear indices into X. This syntax is especially useful when working
with sparse matrices. If X is an N-dimensional array where N > 2, then
J is a linear index over the N-1 trailing dimensions of X.
[I,J,V] = find(X,...) also returns a vector V containing the values
that correspond to the row and column indices I and J.
Example:
A = magic(3)
find(A > 5)
finds the linear indices of the 4 entries of the matrix A that are
greater than 5.
[rows,cols,vals] = find(speye(5))
finds the row and column indices and nonzero values of the 5-by-5
sparse identity matrix.
See also sparse, ind2sub, relop, nonzeros.
"""
"""
Alex: caution needs to be taken when translating
find() - in Matlab when find() is supposed to return 1 array the
indices are of the elements numbered in
Fortran order (column-major order), while np.nonzero() returns
invariably a tuple of 2 arrays, the first for the rows, the second
for the columns;
but when find is supposed to return 2 arrays, for row and
column we don't need to worry about this.
"""
"""
Retrieving indices in "Fortran" (column-major) order, like in Matlab.
We do this in order to get the harris points sorted like in Matlab.
"""
c, r = np.nonzero(X.T);
#return c, r;
return sub2ind(c, r, X.shape);
"""
This version is at least 50 times faster than ordfilt2_vectorized().
It is very efficient. It also makes a few assumptions which were
respected in the code of Evangelidis - check below.
"""
def ordfilt2(A, order, domain):
"""
common.DebugPrint("Entered Matlab.ordfilt2(order=%d, domain=%s): " \
"A.dtype = %s" % \
(order, str(domain), str(A.dtype)));
"""
common.DebugPrint("Entered Matlab.ordfilt2(order=%d): " \
"A.dtype = %s" % \
(order, str(A.dtype)));
assert A.ndim == 2;
assert domain.shape[0] == domain.shape[1];
assert order == domain.shape[0] * domain.shape[0];
assert np.abs((domain - 1.0) < 1.0e-5).all(); # !!!!TODO: this is time consuming - take it out if there are issues
"""
(Documented from http://stackoverflow.com/questions/16685071/implementation-of-matlab-api-ordfilt2-in-opencv)
See http://docs.opencv.org/modules/imgproc/doc/filtering.html#dilate
cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
Inspired from
http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html#dilation
"""
#(order == domain.shape[0] * domain.shape[0])
kernel = np.ones(domain.shape, np.uint8);
res = cv2.dilate(A, kernel, iterations=1);
return res;
if False:
# From http://docs.opencv.org/modules/imgproc/doc/filtering.html#erode
if (order == 0):
res = cv2.dilate(A, kernel, iterations=1);
return res;
#res = np.zeros(A.shape, dtype=np.int32);
if False:
res = np.empty(A.shape, dtype=np.float64); #np.int64);
else:
res = np.empty(A.shape, dtype=A.dtype); #np.int64);
"""
PyArray_Descr *PyArray_DESCR(PyArrayObject* arr)
Returns a borrowed reference to the dtype property of the array.
PyArray_Descr *PyArray_DTYPE(PyArrayObject* arr)
New in version 1.7.
A synonym for PyArray_DESCR, named to be consistent with the .dtype. usage within Python.
"""
if False:
assert A.dtype == np.float64; #np.int64;
assert res.dtype == np.float64; #np.int64;
else:
assert (A.dtype == np.float32) or (A.dtype == np.float64);
assert res.dtype == A.dtype;
if A.dtype == np.float32:
dtypeSize = 4; # np.float32 is 4 bytes
elif A.dtype == np.float64:
dtypeSize = 8; # np.float64 is 8 bytes
common.DebugPrint("Matlab.ordfilt2(): dtypeSize = %d" % dtypeSize);
# We check to have the matrices in row-major order-style
assert A.strides == (A.shape[1] * dtypeSize, dtypeSize);
assert res.strides == (res.shape[1] * dtypeSize, dtypeSize);
# See http://wiki.scipy.org/Weave, about how to handle NP array in Weave
CPP_code = """
int r, c;
int rd, cd;
int rdu, cdu;
int center = domain_array->dimensions[0] / 2;
int numRows, numCols;
| |
<reponame>astrobot/omnifit
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy import convolution
import warnings
import pickle
import os, sys
from .. import utils
from copy import deepcopy
from functools import wraps
def clonable(func):
@wraps(func)
def wrapper(self,*args,**kwargs):
try:
clone = kwargs.pop('clone')
except KeyError:
clone = False
if clone:
newclass = deepcopy(self)
retdata = getattr(newclass,func.__name__)(*args,**kwargs)
if retdata is not None:
return newclass,retdata
else:
return newclass
else:
retdata = func(self,*args,**kwargs)
if retdata is not None:
return retdata
return wrapper
class BaseSpectrum:
"""
A class to represent spectroscopic data.
This class is designed to work for spectroscopic data of ices, but
may work for other types of spectroscopic data as well.
This is the most basic version of the class, concerned solely with
the contents of the x and y attributes.
Attributes
----------
x : `astropy.units.Quantity`
Represents the data on the "x-axis" of the spectrum,
i.e. usually the wavelength or frequency
y : `astropy.units.Quantity`
Represents the data on the "x-axis" of the spectrum,
i.e. the flux or optical depth
dy : `NoneType` or `float`
The uncertainty of y. Can be given during initialisation,
or automatically calculated during baselining. (default=None)
specname : `string`
The name of the spectrum (default='Unknown spectrum')
baselined : `bool`
Indicates whether the spectrum has been baselined or not
convolved : `bool`
Indicates whether the spectrum has been put through convolution
"""
def __init__(self,x,y,dy=None,specname='Unknown spectrum',nondata=[]):
"""
BaseSpectrum(x,y,dy=None,specname='Unknown spectrum',nondata=[])
Constructor for the BaseSpectrum class. Requires x and y;
everything else is optional.
Parameters
----------
x : `astropy.units.Quantity` or `numpy.ndarray`
Represents the data on the "x-axis" of the spectrum.
This is stored as an astropy quantity and thus it is
recommended that the class constructor is called with
such an input. However, the constructor also accepts
a numpy ndarray, in which case it will try to guess
the units and then convert the input into an appropriate
astropy quantity.
The autodetection assumes the units are in kayser units
(i.e. reciprocal wavenumbers with the unit cm^-1) if the
mean of the input array is greater than 1000. Otherwise
the autodetection assumes the units are in microns.
y : `astropy.units.Quantity` or `numpy.ndarray`
Represents the data on the "x-axis" of the spectrum.
This is stored as an astropy quantity and thus it is
recommended that the class constructor is called with
such an input. However, the constructor also accepts
a numpy ndarray, in which case it will assume that
the units are in optical depth and then convert the
input into this astropy quantity.
dy : `float`, optional
The uncertainty of y. If given, this is assumed to be
the uncertainty of the y axis data in the same units
as given (or assumed) with the y input. Otherwise
the uncertainty is left as None during initialisation
and will be calculated as part of baselining.
specname : `string`, optional
An optional human-readable name can be given to the
spectrum via this input.
nondata : `list`, optional
If information unrelated to the x and y input data is
stored in the class instance, the variable names in which
this information is stored can be given here. This causes
various internal functions (related to automatic sorting
and error-checking) of the class to ignore these
variables.
It is not usually necessary for the user to use this input
during initialisation; it is most often used by children of
the BaseSpectrum class.
"""
if len(x) != len(y): #Check that input is sane
raise RuntimeError('Input arrays have different sizes.')
if type(x) != u.quantity.Quantity:
#try to guess x units (between micron and kayser; the most common units) if none given
if np.mean(x) > 1000.:
warnings.warn('The x data is not in astropy unit format. Autodetection assumes kayser.',RuntimeWarning)
self.x=x*u.kayser
else:
warnings.warn('The x data is not in astropy unit format. Autodetection assumes micron.',RuntimeWarning)
self.x=x*u.micron
else:
self.x=x
if type(y) != u.quantity.Quantity:
warnings.warn('The y data is not in astropy unit format. Assuming optical depth.',RuntimeWarning)
self.y=y*utils.unit_od
else:
self.y=y
if dy is not None:
self.dy=dy
else:
self.dy=None
self.name=str(specname) #Spectrum name
self.baselined=False #Has the spectrum been baselined?
self.convolved=False #Has the spectrum been convolved?
self.__nondata = [
'_BaseSpectrum__nondata',\
'name',\
'convolved','baselined',\
'dy'\
]
for cnondata in nondata: #Add the extra non-array variable names into nondata
if not cnondata in self.__nondata:
self.__nondata.append(cnondata)
self.__fixbad() #Drop bad data.
self.__sort()
def __sort(self):
"""
__sort()
An internal method which sorts the data arrays so that they
all go in increasing order of x.
Parameters
----------
None
"""
sorter=np.argsort(self.x)
nondatavars = self.__nondata
ownvarnames = self.__dict__.keys()
ownvarnames = filter (lambda a: not a in nondatavars, ownvarnames)
varlength = len(self.__dict__[ownvarnames[0]])
iGoodones = np.isfinite(np.ones(varlength))
for cVarname in ownvarnames:
self.__dict__[cVarname]=self.__dict__[cVarname][sorter]
def __fixbad(self):
"""
__fixbad()
An internal method which replaces all non-number data (e.g.
infinities) in the data arrays with `numpy.nan`.
Parameters
----------
None
"""
ignorevars = self.__nondata
ownvarnames = self.__dict__.keys()
ownvarnames = filter (lambda a: a not in ignorevars, ownvarnames)
varlength = len(self.__dict__[ownvarnames[0]])
iGoodones = np.isfinite(np.ones(varlength))
for cVarname in ownvarnames:
cVar = self.__dict__[cVarname]
if len(cVar) != varlength:
raise RuntimeError('Anomalous variable length detected in spectrum!')
iGoodones = np.logical_and(iGoodones,np.isfinite(cVar))
iBadones = np.logical_not(iGoodones)
for cVarname in ownvarnames:
if cVarname != 'x':
self.__dict__[cVarname][iBadones]=np.nan
def plot(self,axis,x='x',y='y',**kwargs):
"""
plot(axis,x='x',y='y',**kwargs)
Plot the contents of the spectrum into a given matplotlib axis.
Defaults to the data contained in the x and y attributes, but
can also plot other data content if instructed to do so.
Parameters
----------
axis : `matplotlib.axis`
The axis which the plot will be generated in.
x : `string`, optional
The name of the variable to be plotted on the x axis.
y : `string`, optional
The name of the variable to be plotted on the x axis.
**kwargs : Arguments, optional
This can be used to pass additional arguments
to `matplotlib.pyplot.plot`, which is used by this
method for its plotting.
"""
try: #assume it's with astropy units
plotx = self.__dict__[x].value
except ValueError:
plotx = self.__dict__[x]
try: #assume it's with astropy units
ploty = self.__dict__[y].value
except ValueError:
ploty = self.__dict__[y]
axis.plot(plotx,ploty,**kwargs)
@clonable
def convert2wn(self):
"""
convert2wn(clone=False)
Convert the x axis data to kayser (reciprocal wavenumber) units.
Re-sort the data afterwards.
Parameters
----------
clone : `bool`, optional
If set to True, returns a modified copy of the spectrum instead
of operating on the existing spectrum.
"""
self.convert2(u.kayser)
@clonable
def convert2wl(self):
"""
convert2wl(clone=False)
Convert the x axis data to wavelength (in microns) units.
Re-sort the data afterwards.
Parameters
----------
clone : `bool`, optional
If set to True, returns a modified copy of the spectrum instead
of operating on the existing spectrum.
"""
self.convert2(u.micron)
@clonable
def convert2(self,newunit):
"""
convert2(newunit,clone=False)
Convert the x axis data to given spectral units.
Re-sort the data afterwards.
Parameters
----------
newunit : `astropy.units.core.Unit`
Desired (spectral) unit the x axis data should be
converted to.
clone : `bool`, optional
If set to True, returns a modified copy of the spectrum instead
of operating on the existing spectrum.
"""
with u.set_enabled_equivalencies(u.equivalencies.spectral()):
self.x=self.x.to(newunit)
self.__sort()
@clonable
def subspectrum(self,limit_lower,limit_upper):
"""
subspectrum(limit_lower,limit_upper,clone=False)
Cropped the spectrum along along the x axis using the given
inclusive limits.
Parameters
----------
limit_lower : `float`
The desired minimum x axis of the cropped spectrum, in
current units of the spectrum. This limit is inclusive.
limit_upper : `float`
The desired maximum x axis of the cropped spectrum, in
current units of the spectrum. This limit is inclusive.
clone : `bool`, optional
If set to True, returns a modified copy of the spectrum instead
of operating on the existing spectrum.
"""
iSub = np.logical_and(np.greater_equal(self.x.value,limit_lower),np.less_equal(self.x.value,limit_upper))
newX = self.x[iSub]
newY = self.y[iSub]
self.x = newX
self.y = newY
@clonable
def interpolate(self,target_spectrum):
"""
interpolate(target_spectrum,clone=False)
Interpolate spectrum to match target spectrum resolution.
Does not modify current spectrum, but returns a new one, which is
a copy of the current spectrum but with the interpolated data on
the x and y fields.
The target spectrum has to be using the same units on the x and
y axes as the current spectrum, or |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.