id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Kafthon-0.0.1-py3-none-any.whl/kafthon/events.py | from __future__ import annotations
import functools
from typing import Optional, Callable
from . import kafthon
from .field_mapping import FieldMapping
from .field import Field, NOT_SET
class MetaEvent(type):
def __new__(cls, cls_name, base_cls, attributes, **kwargs):
if cls_name != 'BaseEvent':
field_mapping = {}
for attr, value in list(attributes.items()):
if isinstance(value, Field):
del attributes[attr]
field_mapping[attr] = value
attributes['_fields'] = FieldMapping(field_mapping)
event_cls = super().__new__(cls, cls_name, base_cls, attributes, **kwargs)
return event_cls
class BaseEvent(dict, metaclass=MetaEvent):
_kafthon_app: kafthon.Kafthon
_fields: FieldMapping = FieldMapping({})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__set_defaults()
def __set_defaults(self):
missing = self._fields.field_names - set(self.keys())
for field_name in missing:
default = self._fields[field_name].get_default()
if default != NOT_SET:
self[field_name] = default
@classmethod
def subscribe(cls, handler: Optional[Callable] = None, unwrap: bool = True):
if handler is None:
return functools.partial(cls.subscribe, unwrap=unwrap)
is_method = (
hasattr(handler, '__code__') and
hasattr(handler.__code__, 'co_varnames') and
isinstance(handler.__code__.co_varnames, tuple) and # ensure it is no mock obj
len(handler.__code__.co_varnames) > 0 and
handler.__code__.co_varnames[0] == 'self'
)
if is_method:
cls._kafthon_app._register_method_subscription(
event_type=cls,
unwrap=unwrap,
method=handler
)
return handler
cls._subscribe(handler, unwrap)
@classmethod
def _subscribe(cls, handler, unwrap):
return cls._kafthon_app.event_hub.subscribe(
cls,
handler,
unwrap=unwrap
)
def send(self):
if self._kafthon_app.validate_events:
self.validate()
self._kafthon_app.event_hub.send(self)
return self
def validate(self):
self._fields.validate_event(self)
@property
def name(self):
return type(self).__name__
def __repr__(self):
return '<%s %s>' % (
self.name,
super().__repr__()
)
__str__ = __repr__ | PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/web/media/js/scriptaculous/builder.js |
// Copyright (c) 2005-2010 Thomas Fuchs (http://script.aculo.us, http://mir.aculo.us)
//
// script.aculo.us is freely distributable under the terms of an MIT-style license.
// For details, see the script.aculo.us web site: http://script.aculo.us/
var Builder = {
NODEMAP: {
AREA: 'map',
CAPTION: 'table',
COL: 'table',
COLGROUP: 'table',
LEGEND: 'fieldset',
OPTGROUP: 'select',
OPTION: 'select',
PARAM: 'object',
TBODY: 'table',
TD: 'table',
TFOOT: 'table',
TH: 'table',
THEAD: 'table',
TR: 'table'
},
// note: For Firefox < 1.5, OPTION and OPTGROUP tags are currently broken,
// due to a Firefox bug
node: function(elementName) {
elementName = elementName.toUpperCase();
// try innerHTML approach
var parentTag = this.NODEMAP[elementName] || 'div';
var parentElement = document.createElement(parentTag);
try { // prevent IE "feature": http://dev.rubyonrails.org/ticket/2707
parentElement.innerHTML = "<" + elementName + "></" + elementName + ">";
} catch(e) {}
var element = parentElement.firstChild || null;
// see if browser added wrapping tags
if(element && (element.tagName.toUpperCase() != elementName))
element = element.getElementsByTagName(elementName)[0];
// fallback to createElement approach
if(!element) element = document.createElement(elementName);
// abort if nothing could be created
if(!element) return;
// attributes (or text)
if(arguments[1])
if(this._isStringOrNumber(arguments[1]) ||
(arguments[1] instanceof Array) ||
arguments[1].tagName) {
this._children(element, arguments[1]);
} else {
var attrs = this._attributes(arguments[1]);
if(attrs.length) {
try { // prevent IE "feature": http://dev.rubyonrails.org/ticket/2707
parentElement.innerHTML = "<" +elementName + " " +
attrs + "></" + elementName + ">";
} catch(e) {}
element = parentElement.firstChild || null;
// workaround firefox 1.0.X bug
if(!element) {
element = document.createElement(elementName);
for(attr in arguments[1])
element[attr == 'class' ? 'className' : attr] = arguments[1][attr];
}
if(element.tagName.toUpperCase() != elementName)
element = parentElement.getElementsByTagName(elementName)[0];
}
}
// text, or array of children
if(arguments[2])
this._children(element, arguments[2]);
return $(element);
},
_text: function(text) {
return document.createTextNode(text);
},
ATTR_MAP: {
'className': 'class',
'htmlFor': 'for'
},
_attributes: function(attributes) {
var attrs = [];
for(attribute in attributes)
attrs.push((attribute in this.ATTR_MAP ? this.ATTR_MAP[attribute] : attribute) +
'="' + attributes[attribute].toString().escapeHTML().gsub(/"/,'"') + '"');
return attrs.join(" ");
},
_children: function(element, children) {
if(children.tagName) {
element.appendChild(children);
return;
}
if(typeof children=='object') { // array can hold nodes and text
children.flatten().each( function(e) {
if(typeof e=='object')
element.appendChild(e);
else
if(Builder._isStringOrNumber(e))
element.appendChild(Builder._text(e));
});
} else
if(Builder._isStringOrNumber(children))
element.appendChild(Builder._text(children));
},
_isStringOrNumber: function(param) {
return(typeof param=='string' || typeof param=='number');
},
build: function(html) {
var element = this.node('div');
$(element).update(html.strip());
return element.down();
},
dump: function(scope) {
if(typeof scope != 'object' && typeof scope != 'function') scope = window; //global scope
var tags = ("A ABBR ACRONYM ADDRESS APPLET AREA B BASE BASEFONT BDO BIG BLOCKQUOTE BODY " +
"BR BUTTON CAPTION CENTER CITE CODE COL COLGROUP DD DEL DFN DIR DIV DL DT EM FIELDSET " +
"FONT FORM FRAME FRAMESET H1 H2 H3 H4 H5 H6 HEAD HR HTML I IFRAME IMG INPUT INS ISINDEX "+
"KBD LABEL LEGEND LI LINK MAP MENU META NOFRAMES NOSCRIPT OBJECT OL OPTGROUP OPTION P "+
"PARAM PRE Q S SAMP SCRIPT SELECT SMALL SPAN STRIKE STRONG STYLE SUB SUP TABLE TBODY TD "+
"TEXTAREA TFOOT TH THEAD TITLE TR TT U UL VAR").split(/\s+/);
tags.each( function(tag){
scope[tag] = function() {
return Builder.node.apply(Builder, [tag].concat($A(arguments)));
};
});
}
}; | PypiClean |
/DobbyStock-0.1.tar.gz/DobbyStock-0.1/.history/start_game_20221224105021.py |
from main_package.Stock_main import *
from main_package.Buy import *
from sub_package.User import *
from sub_package.bot import *
from sub_package.endgame import *
Stock1 = Stock()
U1 = User(Stock1)
U1.process()
U2 = bot(Stock1)
U2.process()
player_1_status = [U1.get_expense_list(), U1.get_price_of_stock_buy_list(), U1.get_volume()]
dobby_2_status = [U2.get_expense_list(), U2.get_price_of_stock_buy_list(), U2.get_volume()]
stock_list = [Stock1.get_high_price(), Stock1.get_low_price(), Stock1.get_size()]
# print(player_1_status, '\n', dobby_2_status, '\n', stock_list)
# Execute end game
# Add cashback amount to totals
(cb1, cb2) = cashback(player_1_status, dobby_2_status)
dobby_2_status.append(cb2)
player_1_status.append(cb1)
#Final value
dobby_final_value = final_price(dobby_2_status, stock_list)
dobby_2_status.append(dobby_final_value)
# print(dobby_final_value)
player_final_value = final_price(player_1_status, stock_list)
player_1_status.append(player_final_value)
# create a dataframe for player 1
headers = ['Totalexpense','Buyprice','Buyvol','Cashback','Sellprice']
pd1 = pd.DataFrame(player_1_status, headers)
pd1.index.name = 'Round'
pd1 = pd1.T
pd1['Sellvalue'] = pd1.Buyvol * pd1.Sellprice
pd1['GainLoss'] = pd1.Sellvalue + pd1.Cashback - pd1.Totalexpense
pd1.index = np.arange(1, len(pd1) + 1) # set index of dataframe sarting from 1
pd1.loc['Column_Total']= pd1.sum(numeric_only=True, axis=0)
# create a dataframe for player 2
headers = ['Totalexpense','Buyprice','Buyvol','Cashback','Sellprice']
pd2 = pd.DataFrame(dobby_2_status, headers)
pd2.index.name = 'Round'
pd2 = pd2.T
pd2['Sellvalue'] = pd2.Buyvol * pd2.Sellprice
pd2['GainLoss'] = pd2.Sellvalue + pd2.Cashback - pd2.Totalexpense
pd2.index = np.arange(1, len(pd2) + 1) # set index of dataframe sarting from 1
pd2.loc['Column_Total']= pd2.sum(numeric_only=True, axis=0)
# print the final result
print("\n------Final Result------\n")
print("Player")
print(pd1)
print("\nDobby")
print(pd2)
player_score = pd1.loc['Column_Total']['GainLoss']
dobby_score = pd2.loc['Column_Total']['GainLoss']
if dobby_score > player_score:
print("The winner is: DOBBY!!!")
else:
print("The winner is: YOU!!!") | PypiClean |
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/data/pandas_compat.py | from unittest.mock import patch
import numpy as np
from pandas.core.dtypes.common import is_string_dtype
from scipy import sparse as sp
from scipy.sparse import csr_matrix
import pandas as pd
from pandas.core.arrays import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
from pandas.api.types import (
is_categorical_dtype, is_object_dtype,
is_datetime64_any_dtype, is_numeric_dtype, is_integer_dtype
)
from Orange.data import (
Table, Domain, DiscreteVariable, StringVariable, TimeVariable,
ContinuousVariable,
)
from Orange.data.table import Role
__all__ = ['table_from_frame', 'table_to_frame']
class OrangeDataFrame(pd.DataFrame):
_metadata = ["orange_variables", "orange_weights",
"orange_attributes", "orange_role"]
def __init__(self, *args, **kwargs):
"""
A pandas DataFrame wrapper for one of Table's numpy arrays:
- sets index values corresponding to Orange's global row indices
e.g. ['_o1', '_o2'] (allows Orange to handle selection)
- remembers the array's role in the Table (attribute, class var, meta)
- keeps the Variable objects, and uses them in back-to-table conversion,
should a column name match a variable's name
- stores weight values (legacy)
Parameters
----------
table : Table
orange_role : Role, (default=Role.Attribute)
When converting back to an orange table, the DataFrame will
convert to the right role (attrs, class vars, or metas)
"""
if len(args) <= 0 or not isinstance(args[0], Table):
super().__init__(*args, **kwargs)
return
table = args[0]
if 'orange_role' in kwargs:
role = kwargs.pop('orange_role')
elif len(args) >= 2:
role = args[1]
else:
role = Role.Attribute
if role == Role.Attribute:
data = table.X
vars_ = table.domain.attributes
elif role == Role.ClassAttribute:
data = table.Y
vars_ = table.domain.class_vars
else: # if role == Role.Meta:
data = table.metas
vars_ = table.domain.metas
index = ['_o' + str(id_) for id_ in table.ids]
varsdict = {var._name: var for var in vars_}
columns = varsdict.keys()
if sp.issparse(data):
data = data.asformat('csc')
sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])]
data = dict(enumerate(sparrays))
super().__init__(data, index=index, **kwargs)
self.columns = columns
# a hack to keep Orange df _metadata in sparse->dense conversion
self.sparse.to_dense = self.__patch_constructor(self.sparse.to_dense)
else:
super().__init__(data=data, index=index, columns=columns, **kwargs)
self.orange_role = role
self.orange_variables = varsdict
self.orange_weights = (dict(zip(index, table.W))
if table.W.size > 0 else {})
self.orange_attributes = table.attributes
def __patch_constructor(self, method):
def new_method(*args, **kwargs):
with patch(
'pandas.DataFrame',
OrangeDataFrame
):
df = method(*args, **kwargs)
df.__finalize__(self)
return df
return new_method
@property
def _constructor(self):
return OrangeDataFrame
def to_orange_table(self):
return table_from_frame(self)
def __finalize__(self, other, method=None, **_):
"""
propagate metadata from other to self
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if method == 'concat':
objs = other.objs
elif method == 'merge':
objs = other.left, other.right
else:
objs = [other]
orange_role = getattr(self, 'orange_role', None)
dicts = {dname: getattr(self, dname, {})
for dname in ('orange_variables',
'orange_weights',
'orange_attributes')}
for obj in objs:
other_role = getattr(obj, 'orange_role', None)
if other_role is not None:
orange_role = other_role
for dname, dict_ in dicts.items():
other_dict = getattr(obj, dname, {})
dict_.update(other_dict)
object.__setattr__(self, 'orange_role', orange_role)
for dname, dict_ in dicts.items():
object.__setattr__(self, dname, dict_)
return self
pd.DataFrame.__finalize__ = __finalize__
def _is_discrete(s, force_nominal):
return (is_categorical_dtype(s) or
is_object_dtype(s) and (force_nominal or
s.nunique() < s.size ** .666))
def _is_datetime(s):
if is_datetime64_any_dtype(s):
return True
try:
if is_object_dtype(s):
# utc=True - to allow different timezones in a series object
pd.to_datetime(s, infer_datetime_format=True, utc=True)
return True
except Exception: # pylint: disable=broad-except
pass
return False
def _convert_datetime(series, var):
def col_type(dt):
"""Test if is date, time or datetime"""
dt_nonnat = dt[~pd.isnull(dt)] # nat == nat is False
if (dt_nonnat.dt.floor("d") == dt_nonnat).all():
# all times are 00:00:00.0 - pure date
return 1, 0
elif (dt_nonnat.dt.date == pd.Timestamp("now").date()).all():
# all dates are today's date - pure time
return 0, 1 # pure time
else:
# else datetime
return 1, 1
try:
dt = pd.to_datetime(series)
except ValueError:
# series with type object and different timezones will raise a
# ValueError - normalizing to utc
dt = pd.to_datetime(series, utc=True)
# set variable type to date, time or datetime
var.have_date, var.have_time = col_type(dt)
if dt.dt.tz is not None:
# set timezone if available and convert to utc
var.timezone = dt.dt.tz
dt = dt.dt.tz_convert("UTC")
if var.have_time and not var.have_date:
# if time only measure seconds from midnight - equal to setting date
# to unix epoch
return (
(dt.dt.tz_localize(None) - pd.Timestamp("now").normalize())
/ pd.Timedelta("1s")
).values
return (
(dt.dt.tz_localize(None) - pd.Timestamp("1970-01-01")) / pd.Timedelta("1s")
).values
def vars_from_df(df, role=None, force_nominal=False):
if role is None and hasattr(df, 'orange_role'):
_role = df.orange_role
else:
_role = role
# If df index is not a simple RangeIndex (or similar), put it into data
if (
# not range-like index - test first to skip slow startswith(_o) check
not (
df.index.is_integer()
and (df.index.is_monotonic_increasing or df.index.is_monotonic_decreasing)
)
# check that it does not contain Orange index
and (
# startswith is slow (for long drs) - firs check if col has strings
isinstance(df.index, pd.MultiIndex)
or not is_string_dtype(df.index)
or not any(str(i).startswith("_o") for i in df.index)
)
):
df = df.reset_index()
Xcols, Ycols, Mcols = [], [], []
Xexpr, Yexpr, Mexpr = [], [], []
attrs, class_vars, metas = [], [], []
contains_strings = _role == Role.Meta
for column in df.columns:
s = df[column]
if hasattr(df, 'orange_variables') and column in df.orange_variables:
original_var = df.orange_variables[column]
var = original_var.copy(compute_value=None)
if _role == Role.Attribute:
Xcols.append(column)
Xexpr.append(None)
attrs.append(var)
elif _role == Role.ClassAttribute:
Ycols.append(column)
Yexpr.append(None)
class_vars.append(var)
else: # if role == Role.Meta:
Mcols.append(column)
Mexpr.append(None)
metas.append(var)
elif _is_datetime(s):
var = TimeVariable(str(column))
attrs.append(var)
Xcols.append(column)
Xexpr.append(_convert_datetime)
elif _is_discrete(s, force_nominal):
discrete = s.astype('category').cat
var = DiscreteVariable(str(column),
discrete.categories.astype(str).tolist())
attrs.append(var)
Xcols.append(column)
def to_cat(s, _):
x = s.astype("category").cat.codes
# it is same than x.replace(-1, np.nan), but much faster
x = x.where(x != -1, np.nan)
return np.asarray(x)
Xexpr.append(to_cat)
elif is_numeric_dtype(s):
var = ContinuousVariable(
# set number of decimals to 0 if int else keeps default behaviour
str(column), number_of_decimals=(0 if is_integer_dtype(s) else None)
)
attrs.append(var)
Xcols.append(column)
Xexpr.append(None)
else:
contains_strings = True
var = StringVariable(str(column))
metas.append(var)
Mcols.append(column)
Mexpr.append(lambda s, _: np.asarray(s, dtype=object))
# if role isn't explicitly set, try to
# export dataframes into one contiguous block.
# for this all columns must be of the same role
if isinstance(df, OrangeDataFrame) \
and not role \
and contains_strings \
and not force_nominal:
attrs.extend(class_vars)
attrs.extend(metas)
metas = attrs
Xcols.extend(Ycols)
Xcols.extend(Mcols)
Mcols = Xcols
Xexpr.extend(Yexpr)
Xexpr.extend(Mexpr)
Mexpr = Xexpr
attrs, class_vars = [], []
Xcols, Ycols = [], []
Xexpr, Yexpr = [], []
XYM = []
for Avars, Acols, Aexpr in zip(
(attrs, class_vars, metas),
(Xcols, Ycols, Mcols),
(Xexpr, Yexpr, Mexpr)):
if not Acols:
A = None if Acols != Xcols else np.empty((df.shape[0], 0))
XYM.append(A)
continue
if not any(Aexpr):
Adf = df if all(c in Acols
for c in df.columns) else df[Acols]
if all(isinstance(a, SparseDtype) for a in Adf.dtypes):
A = csr_matrix(Adf.sparse.to_coo())
else:
A = np.asarray(Adf)
XYM.append(A)
continue
# we'll have to copy the table to resolve any expressions
# TODO eliminate expr (preprocessing for pandas -> table)
A = np.array([expr(df[col], var) if expr else np.asarray(df[col])
for var, col, expr in zip(Avars, Acols, Aexpr)]).T
XYM.append(A)
# Let the tables share memory with pandas frame
if XYM[1] is not None and XYM[1].ndim == 2 and XYM[1].shape[1] == 1:
XYM[1] = XYM[1][:, 0]
return XYM, Domain(attrs, class_vars, metas)
def table_from_frame(df, *, force_nominal=False):
XYM, domain = vars_from_df(df, force_nominal=force_nominal)
if hasattr(df, 'orange_weights') and hasattr(df, 'orange_attributes'):
W = [df.orange_weights[i] for i in df.index if i in df.orange_weights]
if len(W) != len(df.index):
W = None
attributes = df.orange_attributes
if isinstance(df.index, pd.MultiIndex) or not is_string_dtype(df.index):
# we can skip checking for Orange indices when MultiIndex an when
# not string dtype and so speedup the conversion
ids = None
else:
ids = [
int(i[2:])
if str(i).startswith("_o") and i[2:].isdigit()
else Table.new_id()
for i in df.index
]
else:
W = None
attributes = None
ids = None
return Table.from_numpy(
domain,
*XYM,
W=W,
attributes=attributes,
ids=ids
)
def table_from_frames(xdf, ydf, mdf):
dfs = xdf, ydf, mdf
if not all(df.shape[0] == xdf.shape[0] for df in dfs):
raise ValueError(f"Leading dimension mismatch "
f"(not {xdf.shape[0]} == {ydf.shape[0]} == {mdf.shape[0]})")
xXYM, xDomain = vars_from_df(xdf, role=Role.Attribute)
yXYM, yDomain = vars_from_df(ydf, role=Role.ClassAttribute)
mXYM, mDomain = vars_from_df(mdf, role=Role.Meta)
XYM = (xXYM[0], yXYM[1], mXYM[2])
domain = Domain(xDomain.attributes, yDomain.class_vars, mDomain.metas)
indexes = [df.index for df in dfs]
ids = [
int(x[2:])
if str(x).startswith("_o") and x[2:].isdigit() and x == y == m
else Table.new_id()
for x, y, m in zip(*indexes)
]
attributes = {}
W = None
for df in dfs:
if isinstance(df, OrangeDataFrame):
W = [df.orange_weights[i] for i in df.index
if i in df.orange_weights]
if len(W) != len(df.index):
W = None
else:
W = None
attributes.update(df.orange_attributes)
return Table.from_numpy(
domain,
*XYM,
W=W,
attributes=attributes,
ids=ids
)
def table_to_frame(tab, include_metas=False):
"""
Convert Orange.data.Table to pandas.DataFrame
Parameters
----------
tab : Table
include_metas : bool, (default=False)
Include table metas into dataframe.
Returns
-------
pandas.DataFrame
"""
def _column_to_series(col, vals):
result = ()
if col.is_discrete:
codes = pd.Series(vals).fillna(-1).astype(int)
result = (col.name, pd.Categorical.from_codes(
codes=codes, categories=col.values, ordered=True
))
elif col.is_time:
result = (col.name, pd.to_datetime(vals, unit='s').to_series().reset_index()[0])
elif col.is_continuous:
dt = float
# np.nan are not compatible with int column
# using pd.isnull since np.isnan fails on array with dtype object
# which can happen when metas contain column with strings
if col.number_of_decimals == 0 and not np.any(pd.isnull(vals)):
dt = int
result = (col.name, pd.Series(vals).astype(dt))
elif col.is_string:
result = (col.name, pd.Series(vals))
return result
def _columns_to_series(cols, vals):
return [_column_to_series(col, vals[:, i]) for i, col in enumerate(cols)]
x, y, metas = [], [], []
domain = tab.domain
if domain.attributes:
x = _columns_to_series(domain.attributes, tab.X)
if domain.class_vars:
y_values = tab.Y.reshape(tab.Y.shape[0], len(domain.class_vars))
y = _columns_to_series(domain.class_vars, y_values)
if domain.metas:
metas = _columns_to_series(domain.metas, tab.metas)
all_series = dict(x + y + metas)
all_vars = tab.domain.variables
if include_metas:
all_vars += tab.domain.metas
original_column_order = [var.name for var in all_vars]
unsorted_columns_df = pd.DataFrame(all_series)
return unsorted_columns_df[original_column_order]
def table_to_frames(table):
xdf = OrangeDataFrame(table, Role.Attribute)
ydf = OrangeDataFrame(table, Role.ClassAttribute)
mdf = OrangeDataFrame(table, Role.Meta)
return xdf, ydf, mdf
def amend_table_with_frame(table, df, role):
arr = Role.get_arr(role, table)
if arr.shape[0] != df.shape[0]:
raise ValueError(f"Leading dimension mismatch "
f"(not {arr.shape[0]} == {df.shape[0]})")
XYM, domain = vars_from_df(df, role=role)
if role == Role.Attribute:
table.domain = Domain(domain.attributes,
table.domain.class_vars,
table.domain.metas)
table.X = XYM[0]
elif role == Role.ClassAttribute:
table.domain = Domain(table.domain.attributes,
domain.class_vars,
table.domain.metas)
table.Y = XYM[1]
else: # if role == Role.Meta:
table.domain = Domain(table.domain.attributes,
table.domain.class_vars,
domain.metas)
table.metas = XYM[2]
if isinstance(df, OrangeDataFrame):
table.attributes.update(df.orange_attributes) | PypiClean |
/Dililatum-0.1.tar.gz/Dililatum-0.1/dililatum/walk.py | from datetime import datetime
import os.path
import pygame
from pygame.locals import *
class WalkTestAdvanced:
def __init__(self, sett):
self.directory = sett.directory
self.time = sett.time
self.direction = sett.direction
self.bgtype = sett.background
self.size = (sett.width, sett.height)
self.frames = {}
def file2surface(self, dirname, files):
for i in range(len(files)):
img = pygame.image.load(os.path.join(self.directory,
dirname, files[i]))
files[i] = (img.convert_alpha(),
img.get_width(),
img.get_height())
return files
def path_walk(self, name, dirname, files):
self.frames[name] = self.file2surface(dirname, sorted(files))
def start(self):
pygame.display.init()
self.screen = pygame.display.set_mode(self.size)
for x in 'lt', 'ct', 'rt', 'lm', 'rm', 'lb', 'cb', 'rb':
os.path.walk(os.path.join(self.directory, x), self.path_walk, x)
self.bgsurface = pygame.Surface(self.screen.get_size()).convert()
self.clock = pygame.time.Clock()
self.loop()
def microseconds(self, tdelta):
return tdelta.microseconds
def loop(self):
done = False
i = 0
cflow = self.bgtype == 'colorflow'
if cflow:
cr = 255
cg = 0
cb = 0
else:
color = self.bgtype
time = datetime.now()
while not done:
self.clock.tick(30)
if cflow:
color = (cr, cg, cb)
self.bgsurface.fill(color)
self.screen.blit(self.bgsurface, (0, 0))
if i >= len(self.frames[self.direction]):
i = 0
img = self.frames[self.direction][i]
self.screen.blit(img[0], ((300 - img[1]) / 2,
(600 - img[2]) / 2))
pygame.display.flip()
if cflow:
if cr == 255 and cg < 255 and cb == 0:
cg += 5
elif cr > 0 and cg == 255 and cb == 0:
cr -= 5
elif cr == 0 and cg == 255 and cb < 255:
cb += 5
elif cr == 0 and cg > 0 and cb == 255:
cg -= 5
elif cr < 255 and cg == 0 and cb == 255:
cr += 5
elif cr == 255 and cg == 0 and cb > 0:
cb -= 5
if cr > 255: cr = 255
elif cr < 0: cr = 0
if cg > 255: cg = 255
elif cg < 0: cg = 0
if cb > 255: cb = 255
elif cb < 0: cb = 0
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
done = True
elif event.key == K_KP7 or event.key == K_7:
self.direction = 'lt'
elif event.key == K_KP8 or event.key == K_8:
self.direction = 'ct'
elif event.key == K_KP9 or event.key == K_9:
self.direction = 'rt'
elif event.key == K_KP4 or event.key == K_4:
self.direction = 'lm'
elif event.key == K_KP6 or event.key == K_6:
self.direction = 'rm'
elif event.key == K_KP1 or event.key == K_1:
self.direction = 'lb'
elif event.key == K_KP2 or event.key == K_2:
self.direction = 'cb'
elif event.key == K_KP3 or event.key == K_3:
self.direction = 'rb'
if self.microseconds(datetime.now() - time) > self.time:
i = (i + 1) % len(self.frames[self.direction])
time = datetime.now() | PypiClean |
/GRobot-0.0.13.tar.gz/GRobot-0.0.13/grobot/javascripts/ui-element.js |
var UI_GLOBAL = {
UI_PREFIX: 'ui'
, XHTML_DOCTYPE: '<!DOCTYPE html PUBLIC '
+ '"-//W3C//DTD XHTML 1.0 Strict//EN" '
+ '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, XHTML_XMLNS: 'http://www.w3.org/1999/xhtml'
};
//*****************************************************************************
// Exceptions
function UIElementException(message)
{
this.message = message;
this.name = 'UIElementException';
}
function UIArgumentException(message)
{
this.message = message;
this.name = 'UIArgumentException';
}
function PagesetException(message)
{
this.message = message;
this.name = 'PagesetException';
}
function UISpecifierException(message)
{
this.message = message;
this.name = 'UISpecifierException';
}
function CommandMatcherException(message)
{
this.message = message;
this.name = 'CommandMatcherException';
}
//*****************************************************************************
// UI-Element core
/**
* The UIElement object. This has been crafted along with UIMap to make
* specifying UI elements using JSON as simple as possible. Object construction
* will fail if 1) a proper name isn't provided, 2) a faulty args argument is
* given, or 3) getLocator() returns undefined for a valid permutation of
* default argument values. See ui-doc.html for the documentation on the
* builder syntax.
*
* @param uiElementShorthand an object whose contents conform to the
* UI-Element builder syntax.
*
* @return a new UIElement object
* @throws UIElementException
*/
function UIElement(uiElementShorthand)
{
// a shorthand object might look like:
//
// {
// name: 'topic'
// , description: 'sidebar links to topic categories'
// , args: [
// {
// name: 'name'
// , description: 'the name of the topic'
// , defaultValues: topLevelTopics
// }
// ]
// , getLocator: function(args) {
// return this._listXPath +
// "/a[text()=" + args.name.quoteForXPath() + "]";
// }
// , getGenericLocator: function() {
// return this._listXPath + '/a';
// }
// // maintain testcases for getLocator()
// , testcase1: {
// // defaultValues used if args not specified
// args: { name: 'foo' }
// , xhtml: '<div id="topiclist">'
// + '<ul><li><a expected-result="1">foo</a></li></ul>'
// + '</div>'
// }
// // set a local element variable
// , _listXPath: "//div[@id='topiclist']/ul/li"
// }
//
// name cannot be null or an empty string. Enforce the same requirement for
// the description.
/**
* Recursively returns all permutations of argument-value pairs, given
* a list of argument definitions. Each argument definition will have
* a set of default values to use in generating said pairs. If an argument
* has no default values defined, it will not be included among the
* permutations.
*
* @param args a list of UIArguments
* @param inDocument the document object to pass to the getDefaultValues()
* method of each argument.
*
* @return a list of associative arrays containing key value pairs
*/
this.permuteArgs = function(args, inDocument) {
if (args.length == 0) {
return [];
}
var permutations = [];
var arg = args[0];
var remainingArgs = args.slice(1);
var subsequentPermutations = this.permuteArgs(remainingArgs,
inDocument);
var defaultValues = arg.getDefaultValues(inDocument);
// skip arguments for which no default values are defined. If the
// argument is a required one, then no permutations are possible.
if (defaultValues.length == 0) {
if (arg.required) {
return [];
}
else {
return subsequentPermutations;
}
}
for (var i = 0; i < defaultValues.length; ++i) {
var value = defaultValues[i];
var permutation;
if (subsequentPermutations.length == 0) {
permutation = {};
permutation[arg.name] = value + "";
permutations.push(permutation);
}
else {
for (var j = 0; j < subsequentPermutations.length; ++j) {
permutation = clone(subsequentPermutations[j]);
permutation[arg.name] = value + "";
permutations.push(permutation);
}
}
}
return permutations;
}
/**
* Returns a list of all testcases for this UIElement.
*/
this.getTestcases = function()
{
return this.testcases;
}
/**
* Run all unit tests, stopping at the first failure, if any. Return true
* if no failures encountered, false otherwise. See the following thread
* regarding use of getElementById() on XML documents created by parsing
* text via the DOMParser:
*
* http://groups.google.com/group/comp.lang.javascript/browse_thread/thread/2b1b82b3c53a1282/
*/
this.test = function()
{
var parser = new DOMParser();
var testcases = this.getTestcases();
testcaseLoop: for (var i = 0; i < testcases.length; ++i) {
var testcase = testcases[i];
var xhtml = UI_GLOBAL.XHTML_DOCTYPE + '<html xmlns="'
+ UI_GLOBAL.XHTML_XMLNS + '">' + testcase.xhtml + '</html>';
var doc = parser.parseFromString(xhtml, "text/xml");
if (doc.firstChild.nodeName == 'parsererror') {
safe_alert('Error parsing XHTML in testcase "' + testcase.name
+ '" for UI element "' + this.name + '": ' + "\n"
+ doc.firstChild.firstChild.nodeValue);
}
// we're no longer using the default locators when testing, because
// args is now required
var locator = parse_locator(this.getLocator(testcase.args));
var results;
if (locator.type == 'xpath' || (locator.type == 'implicit' &&
locator.string.substring(0, 2) == '//')) {
// try using the javascript xpath engine to avoid namespace
// issues. The xpath does have to be lowercase however, it
// seems.
results = eval_xpath(locator.string, doc,
{ allowNativeXpath: false, returnOnFirstMatch: true });
}
else {
// piece the locator back together
locator = (locator.type == 'implicit')
? locator.string
: locator.type + '=' + locator.string;
results = eval_locator(locator, doc);
}
if (results.length && results[0].hasAttribute('expected-result')) {
continue testcaseLoop;
}
// testcase failed
if (is_IDE()) {
var msg = 'Testcase "' + testcase.name
+ '" failed for UI element "' + this.name + '":';
if (!results.length) {
msg += '\n"' + (locator.string || locator) + '" did not match any elements!';
}
else {
msg += '\n' + results[0] + ' was not the expected result!';
}
safe_alert(msg);
}
return false;
}
return true;
};
/**
* Creates a set of locators using permutations of default values for
* arguments used in the locator construction. The set is returned as an
* object mapping locators to key-value arguments objects containing the
* values passed to getLocator() to create the locator.
*
* @param opt_inDocument (optional) the document object of the "current"
* page when this method is invoked. Some arguments
* may have default value lists that are calculated
* based on the contents of the page.
*
* @return a list of locator strings
* @throws UIElementException
*/
this.getDefaultLocators = function(opt_inDocument) {
var defaultLocators = {};
if (this.args.length == 0) {
defaultLocators[this.getLocator({})] = {};
}
else {
var permutations = this.permuteArgs(this.args, opt_inDocument);
if (permutations.length != 0) {
for (var i = 0; i < permutations.length; ++i) {
var args = permutations[i];
var locator = this.getLocator(args);
if (!locator) {
throw new UIElementException('Error in UIElement(): '
+ 'no getLocator return value for element "' + name
+ '"');
}
defaultLocators[locator] = args;
}
}
else {
// try using no arguments. Parse the locator to make sure it's
// really good. If it doesn't work, fine.
try {
var locator = this.getLocator();
parse_locator(locator);
defaultLocators[locator] = {};
}
catch (e) {
safe_log('debug', e.message);
}
}
}
return defaultLocators;
};
/**
* Validate the structure of the shorthand notation this object is being
* initialized with. Throws an exception if there's a validation error.
*
* @param uiElementShorthand
*
* @throws UIElementException
*/
this.validate = function(uiElementShorthand)
{
var msg = "UIElement validation error:\n" + print_r(uiElementShorthand);
if (!uiElementShorthand.name) {
throw new UIElementException(msg + 'no name specified!');
}
if (!uiElementShorthand.description) {
throw new UIElementException(msg + 'no description specified!');
}
if (!uiElementShorthand.locator
&& !uiElementShorthand.getLocator
&& !uiElementShorthand.xpath
&& !uiElementShorthand.getXPath) {
throw new UIElementException(msg + 'no locator specified!');
}
};
this.init = function(uiElementShorthand)
{
this.validate(uiElementShorthand);
this.name = uiElementShorthand.name;
this.description = uiElementShorthand.description;
// construct a new getLocator() method based on the locator property,
// or use the provided function. We're deprecating the xpath property
// and getXPath() function, but still allow for them for backwards
// compatability.
if (uiElementShorthand.locator) {
this.getLocator = function(args) {
return uiElementShorthand.locator;
};
}
else if (uiElementShorthand.getLocator) {
this.getLocator = uiElementShorthand.getLocator;
}
else if (uiElementShorthand.xpath) {
this.getLocator = function(args) {
return uiElementShorthand.xpath;
};
}
else {
this.getLocator = uiElementShorthand.getXPath;
}
if (uiElementShorthand.genericLocator) {
this.getGenericLocator = function() {
return uiElementShorthand.genericLocator;
};
}
else if (uiElementShorthand.getGenericLocator) {
this.getGenericLocator = uiElementShorthand.getGenericLocator;
}
if (uiElementShorthand.getOffsetLocator) {
this.getOffsetLocator = uiElementShorthand.getOffsetLocator;
}
// get the testcases and local variables
this.testcases = [];
var localVars = {};
for (var attr in uiElementShorthand) {
if (attr.match(/^testcase/)) {
var testcase = uiElementShorthand[attr];
if (uiElementShorthand.args &&
uiElementShorthand.args.length && !testcase.args) {
safe_alert('No args defined in ' + attr + ' for UI element '
+ this.name + '! Skipping testcase.');
continue;
}
testcase.name = attr;
this.testcases.push(testcase);
}
else if (attr.match(/^_/)) {
this[attr] = uiElementShorthand[attr];
localVars[attr] = uiElementShorthand[attr];
}
}
// create the arguments
this.args = []
this.argsOrder = [];
if (uiElementShorthand.args) {
for (var i = 0; i < uiElementShorthand.args.length; ++i) {
var arg = new UIArgument(uiElementShorthand.args[i], localVars);
this.args.push(arg);
this.argsOrder.push(arg.name);
// if an exception is thrown when invoking getDefaultValues()
// with no parameters passed in, assume the method requires an
// inDocument parameter, and thus may only be invoked at run
// time. Mark the UI element object accordingly.
try {
arg.getDefaultValues();
}
catch (e) {
this.isDefaultLocatorConstructionDeferred = true;
}
}
}
if (!this.isDefaultLocatorConstructionDeferred) {
this.defaultLocators = this.getDefaultLocators();
}
};
this.init(uiElementShorthand);
}
// hang this off the UIElement "namespace". This is a composite strategy.
UIElement.defaultOffsetLocatorStrategy = function(locatedElement, pageElement) {
var strategies = [
UIElement.linkXPathOffsetLocatorStrategy
, UIElement.preferredAttributeXPathOffsetLocatorStrategy
, UIElement.simpleXPathOffsetLocatorStrategy
];
for (var i = 0; i < strategies.length; ++i) {
var strategy = strategies[i];
var offsetLocator = strategy(locatedElement, pageElement);
if (offsetLocator) {
return offsetLocator;
}
}
return null;
};
UIElement.simpleXPathOffsetLocatorStrategy = function(locatedElement,
pageElement)
{
if (is_ancestor(locatedElement, pageElement)) {
var xpath = "";
var recorder = Recorder.get(locatedElement.ownerDocument.defaultView);
var locatorBuilders = recorder.locatorBuilders;
var currentNode = pageElement;
while (currentNode != null && currentNode != locatedElement) {
xpath = locatorBuilders.relativeXPathFromParent(currentNode)
+ xpath;
currentNode = currentNode.parentNode;
}
var results = eval_xpath(xpath, locatedElement.ownerDocument,
{ contextNode: locatedElement });
if (results.length > 0 && results[0] == pageElement) {
return xpath;
}
}
return null;
};
UIElement.linkXPathOffsetLocatorStrategy = function(locatedElement, pageElement)
{
if (pageElement.nodeName == 'A' && is_ancestor(locatedElement, pageElement))
{
var text = pageElement.textContent
.replace(/^\s+/, "")
.replace(/\s+$/, "");
if (text) {
var xpath = '/descendant::a[normalize-space()='
+ text.quoteForXPath() + ']';
var results = eval_xpath(xpath, locatedElement.ownerDocument,
{ contextNode: locatedElement });
if (results.length > 0 && results[0] == pageElement) {
return xpath;
}
}
}
return null;
};
// compare to the "xpath:attributes" locator strategy defined in the IDE source
UIElement.preferredAttributeXPathOffsetLocatorStrategy =
function(locatedElement, pageElement)
{
// this is an ordered listing of single attributes
var preferredAttributes = [
'name'
, 'value'
, 'type'
, 'action'
, 'alt'
, 'title'
, 'class'
, 'src'
, 'href'
, 'onclick'
];
if (is_ancestor(locatedElement, pageElement)) {
var xpathBase = '/descendant::' + pageElement.nodeName.toLowerCase();
for (var i = 0; i < preferredAttributes.length; ++i) {
var name = preferredAttributes[i];
var value = pageElement.getAttribute(name);
if (value) {
var xpath = xpathBase + '[@' + name + '='
+ value.quoteForXPath() + ']';
var results = eval_xpath(xpath, locatedElement.ownerDocument,
{ contextNode: locatedElement });
if (results.length > 0 && results[0] == pageElement) {
return xpath;
}
}
}
}
return null;
};
/**
* Constructs a UIArgument. This is mostly for checking that the values are
* valid.
*
* @param uiArgumentShorthand
* @param localVars
*
* @throws UIArgumentException
*/
function UIArgument(uiArgumentShorthand, localVars)
{
/**
* @param uiArgumentShorthand
*
* @throws UIArgumentException
*/
this.validate = function(uiArgumentShorthand)
{
var msg = "UIArgument validation error:\n"
+ print_r(uiArgumentShorthand);
// try really hard to throw an exception!
if (!uiArgumentShorthand.name) {
throw new UIArgumentException(msg + 'no name specified!');
}
if (!uiArgumentShorthand.description) {
throw new UIArgumentException(msg + 'no description specified!');
}
if (!uiArgumentShorthand.defaultValues &&
!uiArgumentShorthand.getDefaultValues) {
throw new UIArgumentException(msg + 'no default values specified!');
}
};
/**
* @param uiArgumentShorthand
* @param localVars a list of local variables
*/
this.init = function(uiArgumentShorthand, localVars)
{
this.validate(uiArgumentShorthand);
this.name = uiArgumentShorthand.name;
this.description = uiArgumentShorthand.description;
this.required = uiArgumentShorthand.required || false;
if (uiArgumentShorthand.defaultValues) {
var defaultValues = uiArgumentShorthand.defaultValues;
this.getDefaultValues =
function() { return defaultValues; }
}
else {
this.getDefaultValues = uiArgumentShorthand.getDefaultValues;
}
for (var name in localVars) {
this[name] = localVars[name];
}
}
this.init(uiArgumentShorthand, localVars);
}
/**
* The UISpecifier constructor is overloaded. If less than three arguments are
* provided, the first argument will be considered a UI specifier string, and
* will be split out accordingly. Otherwise, the first argument will be
* considered the path.
*
* @param uiSpecifierStringOrPagesetName a UI specifier string, or the pageset
* name of the UI specifier
* @param elementName the name of the element
* @param args an object associating keys to values
*
* @return new UISpecifier object
*/
function UISpecifier(uiSpecifierStringOrPagesetName, elementName, args)
{
/**
* Initializes this object from a UI specifier string of the form:
*
* pagesetName::elementName(arg1=value1, arg2=value2, ...)
*
* into its component parts, and returns them as an object.
*
* @return an object containing the components of the UI specifier
* @throws UISpecifierException
*/
this._initFromUISpecifierString = function(uiSpecifierString) {
var matches = /^(.*)::([^\(]+)\((.*)\)$/.exec(uiSpecifierString);
if (matches == null) {
throw new UISpecifierException('Error in '
+ 'UISpecifier._initFromUISpecifierString(): "'
+ this.string + '" is not a valid UI specifier string');
}
this.pagesetName = matches[1];
this.elementName = matches[2];
this.args = (matches[3]) ? parse_kwargs(matches[3]) : {};
};
/**
* Override the toString() method to return the UI specifier string when
* evaluated in a string context. Combines the UI specifier components into
* a canonical UI specifier string and returns it.
*
* @return a UI specifier string
*/
this.toString = function() {
// empty string is acceptable for the path, but it must be defined
if (this.pagesetName == undefined) {
throw new UISpecifierException('Error in UISpecifier.toString(): "'
+ this.pagesetName + '" is not a valid UI specifier pageset '
+ 'name');
}
if (!this.elementName) {
throw new UISpecifierException('Error in UISpecifier.unparse(): "'
+ this.elementName + '" is not a valid UI specifier element '
+ 'name');
}
if (!this.args) {
throw new UISpecifierException('Error in UISpecifier.unparse(): "'
+ this.args + '" are not valid UI specifier args');
}
uiElement = UIMap.getInstance()
.getUIElement(this.pagesetName, this.elementName);
if (uiElement != null) {
var kwargs = to_kwargs(this.args, uiElement.argsOrder);
}
else {
// probably under unit test
var kwargs = to_kwargs(this.args);
}
return this.pagesetName + '::' + this.elementName + '(' + kwargs + ')';
};
// construct the object
if (arguments.length < 2) {
this._initFromUISpecifierString(uiSpecifierStringOrPagesetName);
}
else {
this.pagesetName = uiSpecifierStringOrPagesetName;
this.elementName = elementName;
this.args = (args) ? clone(args) : {};
}
}
function Pageset(pagesetShorthand)
{
/**
* Returns true if the page is included in this pageset, false otherwise.
* The page is specified by a document object.
*
* @param inDocument the document object representing the page
*/
this.contains = function(inDocument)
{
var urlParts = parseUri(unescape(inDocument.location.href));
var path = urlParts.path
.replace(/^\//, "")
.replace(/\/$/, "");
if (!this.pathRegexp.test(path)) {
return false;
}
for (var paramName in this.paramRegexps) {
var paramRegexp = this.paramRegexps[paramName];
if (!paramRegexp.test(urlParts.queryKey[paramName])) {
return false;
}
}
if (!this.pageContent(inDocument)) {
return false;
}
return true;
}
this.getUIElements = function()
{
var uiElements = [];
for (var uiElementName in this.uiElements) {
uiElements.push(this.uiElements[uiElementName]);
}
return uiElements;
};
/**
* Returns a list of UI specifier string stubs representing all UI elements
* for this pageset. Stubs contain all required arguments, but leave
* argument values blank. Each element stub is paired with the element's
* description.
*
* @return a list of UI specifier string stubs
*/
this.getUISpecifierStringStubs = function()
{
var stubs = [];
for (var name in this.uiElements) {
var uiElement = this.uiElements[name];
var args = {};
for (var i = 0; i < uiElement.args.length; ++i) {
args[uiElement.args[i].name] = '';
}
var uiSpecifier = new UISpecifier(this.name, uiElement.name, args);
stubs.push([
UI_GLOBAL.UI_PREFIX + '=' + uiSpecifier.toString()
, uiElement.description
]);
}
return stubs;
}
/**
* Throws an exception on validation failure.
*/
this._validate = function(pagesetShorthand)
{
var msg = "Pageset validation error:\n"
+ print_r(pagesetShorthand);
if (!pagesetShorthand.name) {
throw new PagesetException(msg + 'no name specified!');
}
if (!pagesetShorthand.description) {
throw new PagesetException(msg + 'no description specified!');
}
if (!pagesetShorthand.paths &&
!pagesetShorthand.pathRegexp &&
!pagesetShorthand.pageContent) {
throw new PagesetException(msg
+ 'no path, pathRegexp, or pageContent specified!');
}
};
this.init = function(pagesetShorthand)
{
this._validate(pagesetShorthand);
this.name = pagesetShorthand.name;
this.description = pagesetShorthand.description;
var pathPrefixRegexp = pagesetShorthand.pathPrefix
? RegExp.escape(pagesetShorthand.pathPrefix) : "";
var pathRegexp = '^' + pathPrefixRegexp;
if (pagesetShorthand.paths != undefined) {
pathRegexp += '(?:';
for (var i = 0; i < pagesetShorthand.paths.length; ++i) {
if (i > 0) {
pathRegexp += '|';
}
pathRegexp += RegExp.escape(pagesetShorthand.paths[i]);
}
pathRegexp += ')$';
}
else if (pagesetShorthand.pathRegexp) {
pathRegexp += '(?:' + pagesetShorthand.pathRegexp + ')$';
}
this.pathRegexp = new RegExp(pathRegexp);
this.paramRegexps = {};
for (var paramName in pagesetShorthand.paramRegexps) {
this.paramRegexps[paramName] =
new RegExp(pagesetShorthand.paramRegexps[paramName]);
}
this.pageContent = pagesetShorthand.pageContent ||
function() { return true; };
this.uiElements = {};
};
this.init(pagesetShorthand);
}
/**
* Construct the UI map object, and return it. Once the object is instantiated,
* it binds to a global variable and will not leave scope.
*
* @return new UIMap object
*/
function UIMap()
{
// the singleton pattern, split into two parts so that "new" can still
// be used, in addition to "getInstance()"
UIMap.self = this;
// need to attach variables directly to the Editor object in order for them
// to be in scope for Editor methods
if (is_IDE()) {
Editor.uiMap = this;
Editor.UI_PREFIX = UI_GLOBAL.UI_PREFIX;
}
this.pagesets = new Object();
/**
* pageset[pagesetName]
* regexp
* elements[elementName]
* UIElement
*/
this.addPageset = function(pagesetShorthand)
{
try {
var pageset = new Pageset(pagesetShorthand);
}
catch (e) {
safe_alert("Could not create pageset from shorthand:\n"
+ print_r(pagesetShorthand) + "\n" + e.message);
return false;
}
if (this.pagesets[pageset.name]) {
safe_alert('Could not add pageset "' + pageset.name
+ '": a pageset with that name already exists!');
return false;
}
this.pagesets[pageset.name] = pageset;
return true;
};
/**
* @param pagesetName
* @param uiElementShorthand a representation of a UIElement object in
* shorthand JSON.
*/
this.addElement = function(pagesetName, uiElementShorthand)
{
try {
var uiElement = new UIElement(uiElementShorthand);
}
catch (e) {
safe_alert("Could not create UI element from shorthand:\n"
+ print_r(uiElementShorthand) + "\n" + e.message);
return false;
}
// run the element's unit tests only for the IDE, and only when the
// IDE is starting. Make a rough guess as to the latter condition.
if (is_IDE() && !editor.selDebugger && !uiElement.test()) {
safe_alert('Could not add UI element "' + uiElement.name
+ '": failed testcases!');
return false;
}
try {
this.pagesets[pagesetName].uiElements[uiElement.name] = uiElement;
}
catch (e) {
safe_alert("Could not add UI element '" + uiElement.name
+ "' to pageset '" + pagesetName + "':\n" + e.message);
return false;
}
return true;
};
/**
* Returns the pageset for a given UI specifier string.
*
* @param uiSpecifierString
* @return a pageset object
*/
this.getPageset = function(uiSpecifierString)
{
try {
var uiSpecifier = new UISpecifier(uiSpecifierString);
return this.pagesets[uiSpecifier.pagesetName];
}
catch (e) {
return null;
}
}
/**
* Returns the UIElement that a UISpecifierString or pageset and element
* pair refer to.
*
* @param pagesetNameOrUISpecifierString
* @return a UIElement, or null if none is found associated with
* uiSpecifierString
*/
this.getUIElement = function(pagesetNameOrUISpecifierString, uiElementName)
{
var pagesetName = pagesetNameOrUISpecifierString;
if (arguments.length == 1) {
var uiSpecifierString = pagesetNameOrUISpecifierString;
try {
var uiSpecifier = new UISpecifier(uiSpecifierString);
pagesetName = uiSpecifier.pagesetName;
var uiElementName = uiSpecifier.elementName;
}
catch (e) {
return null;
}
}
try {
return this.pagesets[pagesetName].uiElements[uiElementName];
}
catch (e) {
return null;
}
};
/**
* Returns a list of pagesets that "contains" the provided page,
* represented as a document object. Containership is defined by the
* Pageset object's contain() method.
*
* @param inDocument the page to get pagesets for
* @return a list of pagesets
*/
this.getPagesetsForPage = function(inDocument)
{
var pagesets = [];
for (var pagesetName in this.pagesets) {
var pageset = this.pagesets[pagesetName];
if (pageset.contains(inDocument)) {
pagesets.push(pageset);
}
}
return pagesets;
};
/**
* Returns a list of all pagesets.
*
* @return a list of pagesets
*/
this.getPagesets = function()
{
var pagesets = [];
for (var pagesetName in this.pagesets) {
pagesets.push(this.pagesets[pagesetName]);
}
return pagesets;
};
/**
* Returns a list of elements on a page that a given UI specifier string,
* maps to. If no elements are mapped to, returns an empty list..
*
* @param uiSpecifierString a String that specifies a UI element with
* attendant argument values
* @param inDocument the document object the specified UI element
* appears in
* @return a potentially-empty list of elements
* specified by uiSpecifierString
*/
this.getPageElements = function(uiSpecifierString, inDocument)
{
var locator = this.getLocator(uiSpecifierString);
var results = locator ? eval_locator(locator, inDocument) : [];
return results;
};
/**
* Returns the locator string that a given UI specifier string maps to, or
* null if it cannot be mapped.
*
* @param uiSpecifierString
*/
this.getLocator = function(uiSpecifierString)
{
try {
var uiSpecifier = new UISpecifier(uiSpecifierString);
}
catch (e) {
safe_alert('Could not create UISpecifier for string "'
+ uiSpecifierString + '": ' + e.message);
return null;
}
var uiElement = this.getUIElement(uiSpecifier.pagesetName,
uiSpecifier.elementName);
try {
return uiElement.getLocator(uiSpecifier.args);
}
catch (e) {
return null;
}
}
/**
* Finds and returns a UI specifier string given an element and the page
* that it appears on.
*
* @param pageElement the document element to map to a UI specifier
* @param inDocument the document the element appears in
* @return a UI specifier string, or false if one cannot be
* constructed
*/
this.getUISpecifierString = function(pageElement, inDocument)
{
var is_fuzzy_match =
BrowserBot.prototype.locateElementByUIElement.is_fuzzy_match;
var pagesets = this.getPagesetsForPage(inDocument);
for (var i = 0; i < pagesets.length; ++i) {
var pageset = pagesets[i];
var uiElements = pageset.getUIElements();
for (var j = 0; j < uiElements.length; ++j) {
var uiElement = uiElements[j];
// first test against the generic locator, if there is one.
// This should net some performance benefit when recording on
// more complicated pages.
if (uiElement.getGenericLocator) {
var passedTest = false;
var results =
eval_locator(uiElement.getGenericLocator(), inDocument);
for (var i = 0; i < results.length; ++i) {
if (results[i] == pageElement) {
passedTest = true;
break;
}
}
if (!passedTest) {
continue;
}
}
var defaultLocators;
if (uiElement.isDefaultLocatorConstructionDeferred) {
defaultLocators = uiElement.getDefaultLocators(inDocument);
}
else {
defaultLocators = uiElement.defaultLocators;
}
//safe_alert(print_r(uiElement.defaultLocators));
for (var locator in defaultLocators) {
var locatedElements = eval_locator(locator, inDocument);
if (locatedElements.length) {
var locatedElement = locatedElements[0];
}
else {
continue;
}
// use a heuristic to determine whether the element
// specified is the "same" as the element we're matching
if (is_fuzzy_match) {
if (is_fuzzy_match(locatedElement, pageElement)) {
return UI_GLOBAL.UI_PREFIX + '=' +
new UISpecifier(pageset.name, uiElement.name,
defaultLocators[locator]);
}
}
else {
if (locatedElement == pageElement) {
return UI_GLOBAL.UI_PREFIX + '=' +
new UISpecifier(pageset.name, uiElement.name,
defaultLocators[locator]);
}
}
// ok, matching the element failed. See if an offset
// locator can complete the match.
if (uiElement.getOffsetLocator) {
for (var k = 0; k < locatedElements.length; ++k) {
var offsetLocator = uiElement
.getOffsetLocator(locatedElements[k], pageElement);
if (offsetLocator) {
return UI_GLOBAL.UI_PREFIX + '=' +
new UISpecifier(pageset.name,
uiElement.name,
defaultLocators[locator])
+ '->' + offsetLocator;
}
}
}
}
}
}
return false;
};
/**
* Returns a sorted list of UI specifier string stubs representing possible
* UI elements for all pagesets, paired the their descriptions. Stubs
* contain all required arguments, but leave argument values blank.
*
* @return a list of UI specifier string stubs
*/
this.getUISpecifierStringStubs = function() {
var stubs = [];
var pagesets = this.getPagesets();
for (var i = 0; i < pagesets.length; ++i) {
stubs = stubs.concat(pagesets[i].getUISpecifierStringStubs());
}
stubs.sort(function(a, b) {
if (a[0] < b[0]) {
return -1;
}
return a[0] == b[0] ? 0 : 1;
});
return stubs;
}
}
UIMap.getInstance = function() {
return (UIMap.self == null) ? new UIMap() : UIMap.self;
}
//******************************************************************************
// Rollups
/**
* The Command object isn't available in the Selenium RC. We introduce an
* object with the identical constructor. In the IDE, this will be redefined,
* which is just fine.
*
* @param command
* @param target
* @param value
*/
if (typeof(Command) == 'undefined') {
function Command(command, target, value) {
this.command = command != null ? command : '';
this.target = target != null ? target : '';
this.value = value != null ? value : '';
}
}
/**
* A CommandMatcher object matches commands during the application of a
* RollupRule. It's specified with a shorthand format, for example:
*
* new CommandMatcher({
* command: 'click'
* , target: 'ui=allPages::.+'
* })
*
* which is intended to match click commands whose target is an element in the
* allPages PageSet. The matching expressions are given as regular expressions;
* in the example above, the command must be "click"; "clickAndWait" would be
* acceptable if 'click.*' were used. Here's a more complete example:
*
* new CommandMatcher({
* command: 'type'
* , target: 'ui=loginPages::username()'
* , value: '.+_test'
* , updateArgs: function(command, args) {
* args.username = command.value;
* }
* })
*
* Here, the command and target are fixed, but there is variability in the
* value of the command. When a command matches, the username is saved to the
* arguments object.
*/
function CommandMatcher(commandMatcherShorthand)
{
/**
* Ensure the shorthand notation used to initialize the CommandMatcher has
* all required values.
*
* @param commandMatcherShorthand an object containing information about
* the CommandMatcher
*/
this.validate = function(commandMatcherShorthand) {
var msg = "CommandMatcher validation error:\n"
+ print_r(commandMatcherShorthand);
if (!commandMatcherShorthand.command) {
throw new CommandMatcherException(msg + 'no command specified!');
}
if (!commandMatcherShorthand.target) {
throw new CommandMatcherException(msg + 'no target specified!');
}
if (commandMatcherShorthand.minMatches &&
commandMatcherShorthand.maxMatches &&
commandMatcherShorthand.minMatches >
commandMatcherShorthand.maxMatches) {
throw new CommandMatcherException(msg + 'minMatches > maxMatches!');
}
};
/**
* Initialize this object.
*
* @param commandMatcherShorthand an object containing information used to
* initialize the CommandMatcher
*/
this.init = function(commandMatcherShorthand) {
this.validate(commandMatcherShorthand);
this.command = commandMatcherShorthand.command;
this.target = commandMatcherShorthand.target;
this.value = commandMatcherShorthand.value || null;
this.minMatches = commandMatcherShorthand.minMatches || 1;
this.maxMatches = commandMatcherShorthand.maxMatches || 1;
this.updateArgs = commandMatcherShorthand.updateArgs ||
function(command, args) { return args; };
};
/**
* Determines whether a given command matches. Updates args by "reference"
* and returns true if it does; return false otherwise.
*
* @param command the command to attempt to match
*/
this.isMatch = function(command) {
var re = new RegExp('^' + this.command + '$');
if (! re.test(command.command)) {
return false;
}
re = new RegExp('^' + this.target + '$');
if (! re.test(command.target)) {
return false;
}
if (this.value != null) {
re = new RegExp('^' + this.value + '$');
if (! re.test(command.value)) {
return false;
}
}
// okay, the command matches
return true;
};
// initialization
this.init(commandMatcherShorthand);
}
function RollupRuleException(message)
{
this.message = message;
this.name = 'RollupRuleException';
}
function RollupRule(rollupRuleShorthand)
{
/**
* Ensure the shorthand notation used to initialize the RollupRule has all
* required values.
*
* @param rollupRuleShorthand an object containing information about the
* RollupRule
*/
this.validate = function(rollupRuleShorthand) {
var msg = "RollupRule validation error:\n"
+ print_r(rollupRuleShorthand);
if (!rollupRuleShorthand.name) {
throw new RollupRuleException(msg + 'no name specified!');
}
if (!rollupRuleShorthand.description) {
throw new RollupRuleException(msg + 'no description specified!');
}
// rollupRuleShorthand.args is optional
if (!rollupRuleShorthand.commandMatchers &&
!rollupRuleShorthand.getRollup) {
throw new RollupRuleException(msg
+ 'no command matchers specified!');
}
if (!rollupRuleShorthand.expandedCommands &&
!rollupRuleShorthand.getExpandedCommands) {
throw new RollupRuleException(msg
+ 'no expanded commands specified!');
}
return true;
};
/**
* Initialize this object.
*
* @param rollupRuleShorthand an object containing information used to
* initialize the RollupRule
*/
this.init = function(rollupRuleShorthand) {
this.validate(rollupRuleShorthand);
this.name = rollupRuleShorthand.name;
this.description = rollupRuleShorthand.description;
this.pre = rollupRuleShorthand.pre || '';
this.post = rollupRuleShorthand.post || '';
this.alternateCommand = rollupRuleShorthand.alternateCommand;
this.args = rollupRuleShorthand.args || [];
if (rollupRuleShorthand.commandMatchers) {
// construct the rule from the list of CommandMatchers
this.commandMatchers = [];
var matchers = rollupRuleShorthand.commandMatchers;
for (var i = 0; i < matchers.length; ++i) {
if (matchers[i].updateArgs && this.args.length == 0) {
// enforce metadata for arguments
var msg = "RollupRule validation error:\n"
+ print_r(rollupRuleShorthand)
+ 'no argument metadata provided!';
throw new RollupRuleException(msg);
}
this.commandMatchers.push(new CommandMatcher(matchers[i]));
}
// returns false if the rollup doesn't match, or a rollup command
// if it does. If returned, the command contains the
// replacementIndexes property, which indicates which commands it
// substitutes for.
this.getRollup = function(commands) {
// this is a greedy matching algorithm
var replacementIndexes = [];
var commandMatcherQueue = this.commandMatchers;
var matchCount = 0;
var args = {};
for (var i = 0, j = 0; i < commandMatcherQueue.length;) {
var matcher = commandMatcherQueue[i];
if (j >= commands.length) {
// we've run out of commands! If the remaining matchers
// do not have minMatches requirements, this is a
// match. Otherwise, it's not.
if (matcher.minMatches > 0) {
return false;
}
++i;
matchCount = 0; // unnecessary, but let's be consistent
}
else {
if (matcher.isMatch(commands[j])) {
++matchCount;
if (matchCount == matcher.maxMatches) {
// exhausted this matcher's matches ... move on
// to next matcher
++i;
matchCount = 0;
}
args = matcher.updateArgs(commands[j], args);
replacementIndexes.push(j);
++j; // move on to next command
}
else {
//alert(matchCount + ', ' + matcher.minMatches);
if (matchCount < matcher.minMatches) {
return false;
}
// didn't match this time, but we've satisfied the
// requirements already ... move on to next matcher
++i;
matchCount = 0;
// still gonna look at same command
}
}
}
var rollup;
if (this.alternateCommand) {
rollup = new Command(this.alternateCommand,
commands[0].target, commands[0].value);
}
else {
rollup = new Command('rollup', this.name);
rollup.value = to_kwargs(args);
}
rollup.replacementIndexes = replacementIndexes;
return rollup;
};
}
else {
this.getRollup = function(commands) {
var result = rollupRuleShorthand.getRollup(commands);
if (result) {
var rollup = new Command(
result.command
, result.target
, result.value
);
rollup.replacementIndexes = result.replacementIndexes;
return rollup;
}
return false;
};
}
this.getExpandedCommands = function(kwargs) {
var commands = [];
var expandedCommands = (rollupRuleShorthand.expandedCommands
? rollupRuleShorthand.expandedCommands
: rollupRuleShorthand.getExpandedCommands(
parse_kwargs(kwargs)));
for (var i = 0; i < expandedCommands.length; ++i) {
var command = expandedCommands[i];
commands.push(new Command(
command.command
, command.target
, command.value
));
}
return commands;
};
};
this.init(rollupRuleShorthand);
}
/**
*
*/
function RollupManager()
{
// singleton pattern
RollupManager.self = this;
this.init = function()
{
this.rollupRules = {};
if (is_IDE()) {
Editor.rollupManager = this;
}
};
/**
* Adds a new RollupRule to the repository. Returns true on success, or
* false if the rule couldn't be added.
*
* @param rollupRuleShorthand shorthand JSON specification of the new
* RollupRule, possibly including CommandMatcher
* shorthand too.
* @return true if the rule was added successfully,
* false otherwise.
*/
this.addRollupRule = function(rollupRuleShorthand)
{
try {
var rule = new RollupRule(rollupRuleShorthand);
this.rollupRules[rule.name] = rule;
}
catch(e) {
smart_alert("Could not create RollupRule from shorthand:\n\n"
+ e.message);
return false;
}
return true;
};
/**
* Returns a RollupRule by name.
*
* @param rollupName the name of the rule to fetch
* @return the RollupRule, or null if it isn't found.
*/
this.getRollupRule = function(rollupName)
{
return (this.rollupRules[rollupName] || null);
};
/**
* Returns a list of name-description pairs for use in populating the
* auto-populated target dropdown in the IDE. Rules that have an alternate
* command defined are not included in the list, as they are not bona-fide
* rollups.
*
* @return a list of name-description pairs
*/
this.getRollupRulesForDropdown = function()
{
var targets = [];
var names = keys(this.rollupRules).sort();
for (var i = 0; i < names.length; ++i) {
var name = names[i];
if (this.rollupRules[name].alternateCommand) {
continue;
}
targets.push([ name, this.rollupRules[name].description ]);
}
return targets;
};
/**
* Applies all rules to the current editor commands, asking the user in
* each case if it's okay to perform the replacement. The rules are applied
* repeatedly until there are no more matches. The algorithm should
* remember when the user has declined a replacement, and not ask to do it
* again.
*
* @return the list of commands with rollup replacements performed
*/
this.applyRollupRules = function()
{
var commands = editor.getTestCase().commands;
var blacklistedRollups = {};
// so long as rollups were performed, we need to keep iterating through
// the commands starting at the beginning, because further rollups may
// potentially be applied on the newly created ones.
while (true) {
var performedRollup = false;
for (var i = 0; i < commands.length; ++i) {
// iterate through commands
for (var rollupName in this.rollupRules) {
var rule = this.rollupRules[rollupName];
var rollup = rule.getRollup(commands.slice(i));
if (rollup) {
// since we passed in a sliced version of the commands
// array to the getRollup() method, we need to re-add
// the offset to the replacementIndexes
var k = 0;
for (; k < rollup.replacementIndexes.length; ++k) {
rollup.replacementIndexes[k] += i;
}
// build the confirmation message
var msg = "Perform the following command rollup?\n\n";
for (k = 0; k < rollup.replacementIndexes.length; ++k) {
var replacementIndex = rollup.replacementIndexes[k];
var command = commands[replacementIndex];
msg += '[' + replacementIndex + ']: ';
msg += command + "\n";
}
msg += "\n";
msg += rollup;
// check against blacklisted rollups
if (blacklistedRollups[msg]) {
continue;
}
// highlight the potentially replaced rows
for (k = 0; k < commands.length; ++k) {
var command = commands[k];
command.result = '';
if (rollup.replacementIndexes.indexOf(k) != -1) {
command.selectedForReplacement = true;
}
editor.view.rowUpdated(replacementIndex);
}
// get confirmation from user
if (confirm(msg)) {
// perform rollup
var deleteRanges = [];
var replacementIndexes = rollup.replacementIndexes;
for (k = 0; k < replacementIndexes.length; ++k) {
// this is expected to be list of ranges. A
// range has a start, and a list of commands.
// The deletion only checks the length of the
// command list.
deleteRanges.push({
start: replacementIndexes[k]
, commands: [ 1 ]
});
}
editor.view.executeAction(new TreeView
.DeleteCommandAction(editor.view,deleteRanges));
editor.view.insertAt(i, rollup);
performedRollup = true;
}
else {
// cleverly remember not to try this rollup again
blacklistedRollups[msg] = true;
}
// unhighlight
for (k = 0; k < commands.length; ++k) {
commands[k].selectedForReplacement = false;
editor.view.rowUpdated(k);
}
}
}
}
if (!performedRollup) {
break;
}
}
return commands;
};
this.init();
}
RollupManager.getInstance = function() {
return (RollupManager.self == null)
? new RollupManager()
: RollupManager.self;
} | PypiClean |
/Nitrous-0.9.3-py3-none-any.whl/turbogears/toolbox/catwalk/static/javascript/greybox/greybox_inline.js | var GB_HEADER = null;
var GB_WINDOW = null;
var GB_IFRAME = null;
var GB_OVERLAY = null;
var GB_TIMEOUT = null;
var GB_HEIGHT = 400;
var GB_WIDTH = 400;
var GB_caption = null;
//The url that was visited last
var GB_last_win_url = null;
function GB_show(caption, url /* optional */, height, width) {
try {
if(height != 'undefined')
GB_HEIGHT = height;
if(width != 'undefined')
GB_WIDTH = width;
initIfNeeded();
//GB_IFRAME.innerHTML= url;
GB_caption.innerHTML = caption;
var e = doSimpleXMLHttpRequest(url);
e.addCallback(GB_load_content);
GB_setPosition();
if(GB_ANIMATION) {
positionRightVertically(GB_HEADER, -(GB_HEIGHT));
positionRightVertically(GB_WINDOW, -(GB_HEIGHT+22));
}
showElement(GB_OVERLAY);
showElement(GB_HEADER);
showElement(GB_WINDOW);
GB_setWidth();
if(GB_ANIMATION) {
GB_animateOut(-GB_HEIGHT);
}
return false;
}
catch (e) {
return false;
}
}
var GB_load_content = function(request)
{
GB_IFRAME.innerHTML = request.responseText;
}
function GB_hide() {
//GB_IFRAME.src = "";
hideElement(GB_WINDOW);
hideElement(GB_HEADER);
hideElement(GB_OVERLAY);
}
function GB_setPosition() {
positionRightVertically(GB_HEADER, 0);
positionRightVertically(GB_WINDOW, 22);
}
function GB_animateOut(top) {
if(top+getScrollTop() < 0) {
positionRightVertically(GB_WINDOW, top+22);
positionRightVertically(GB_HEADER, top);
GB_TIMEOUT = window.setTimeout(function() { GB_animateOut(top+50); }, 1);
}
else {
GB_WINDOW.style.top = getScrollTop()+22+"px";
GB_HEADER.style.top = getScrollTop()+"px";
clearTimeout(GB_TIMEOUT);
}
}
function GB_setWidth() {
var array_page_size = GB_getWindowSize();
//Set size
GB_WINDOW.style.width = GB_WIDTH + "px";
GB_IFRAME.style.width = GB_WIDTH + "px";
GB_HEADER.style.width = GB_WIDTH + "px";
GB_WINDOW.style.height = GB_HEIGHT + "px";
GB_IFRAME.style.height = GB_HEIGHT - 5 + "px";
GB_OVERLAY.style.width = array_page_size[0] + "px";
var max_height = Math.max(getScrollTop()+array_page_size[1], getScrollTop()+GB_HEIGHT+30);
GB_OVERLAY.style.height = max_height + "px";
GB_WINDOW.style.left = ((array_page_size[0] - GB_WINDOW.offsetWidth) /2) + "px";
GB_HEADER.style.left = ((array_page_size[0] - GB_HEADER.offsetWidth) /2) + "px";
}
function GB_init() {
//Create the overlay
GB_OVERLAY = DIV({'id': 'GB_overlay'});
if(GB_overlay_click_close)
GB_OVERLAY.onclick = GB_hide;
getBody().insertBefore(GB_OVERLAY, getBody().firstChild);
//Create the window
GB_WINDOW = DIV({'id': 'GB_window'});
GB_HEADER = DIV({'id': 'GB_header'});
GB_caption = DIV({'id': 'GB_caption'}, "");
var close = DIV({'id': 'GB_close'}, IMG({'src': GB_IMG_DIR + 'close.gif', 'alt': 'Close window'}));
close.onclick = GB_hide;
ACN(GB_HEADER, close, GB_caption);
getBody().insertBefore(GB_WINDOW, GB_OVERLAY.nextSibling);
getBody().insertBefore(GB_HEADER, GB_OVERLAY.nextSibling);
}
function initIfNeeded() {
if(GB_OVERLAY == null) {
GB_init();
GB_addOnWinResize(GB_setWidth);
window.onscroll = function() { GB_setPosition(); };
}
var new_frame = DIV({'id': 'GB_frame', 'name': 'GB_frame'},'Hello there');
if (GB_IFRAME != null) removeElement(GB_IFRAME);
ACN(GB_WINDOW, new_frame);
GB_IFRAME = new_frame;
}
function GB_getWindowSize(){
var window_width, window_height;
if (self.innerHeight) { // all except Explorer
window_width = self.innerWidth;
window_height = self.innerHeight;
} else if (document.documentElement && document.documentElement.clientHeight) { // Explorer 6 Strict Mode
window_width = document.documentElement.clientWidth;
window_height = document.documentElement.clientHeight;
} else if (document.body) { // other Explorers
window_width = document.body.clientWidth;
window_height = document.body.clientHeight;
}
return [window_width, window_height];
}
function GB_addOnWinResize(func) {
var oldonrezise = window.onresize;
if (typeof window.onresize != 'function')
window.onresize = func;
else {
window.onresize = function() {
oldonrezise();
func();
}
}
}
function positionRightVertically(elm, value) {
elm.style.top = getScrollTop()+value+"px";
}
function getScrollTop() {
//From: http://www.quirksmode.org/js/doctypes.html
var theTop;
if (document.documentElement && document.documentElement.scrollTop)
theTop = document.documentElement.scrollTop;
else if (document.body)
theTop = document.body.scrollTop;
return theTop;
} | PypiClean |
/Nasse-1.1-py3-none-any.whl/nasse/docs/postman.py | import typing
from copy import deepcopy
# from uuid import uuid4
from nasse import docs, models
from nasse.utils.sanitize import sort_http_methods
def create_postman_data(app, section: str, endpoints: typing.List[models.Endpoint]):
postman_section = {
"info": {
# "_postman_id": str(uuid4()),
"name": section,
"description": "All of the endpoints under the '{section}' section of the {name} API Interface".format(section=section, name=app.name),
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"item": [],
"auth": {
"type": "apikey",
"apikey": [
{
"key": "value",
"value": "{{{{{id}_TOKEN}}}}".format(id=app.id.upper()),
"type": "string"
},
{
"key": "key",
"value": "Authorization",
"type": "string"
}
]
}
}
for endpoint in endpoints:
postman_section["item"].extend(create_postman_docs(endpoint))
return postman_section
def create_postman_docs(endpoint: models.Endpoint):
results = []
for method in sort_http_methods(endpoint.methods):
result = {
"name": str(endpoint.name),
"event": [],
"request": {
"method": str(method).upper(),
"header": [
{
"key": header.name,
"value": header.description or header.name,
"type": "text"
}
for header in endpoint.headers if header.all_methods or method in header.methods],
"url": {
"raw": "{{DOMAIN}}" + endpoint.path.replace("<", "{{").replace(">", "}}") + "?=" + '&'.join([param.name for param in endpoint.params if param.all_methods or method in param.methods]),
"host": [
"{{DOMAIN}}"
],
"path": [elem.replace("<", "{{").replace(">", "}}") for elem in endpoint.path.split("/") if elem != ""],
"query": [
{
"key": param.name,
"value": "<{param}:{type}>".format(param=param.name, type=param.type.__name__ if hasattr(param.type, "__name__") else str(param.type) if param.type is not None else "str"),
"description": param.description or param.name
}
for param in endpoint.params if param.all_methods or method in param.methods]
},
"description": docs.markdown.make_docs_for_method(endpoint=endpoint, method=method, postman=True)
},
"response": []
}
result["response"].append(deepcopy(result))
result["response"][0]["status"] = "OK"
result["response"][0]["code"] = 200
result["response"][0]["_postman_previewlanguage"] = "json"
result["response"][0]["header"] = []
result["response"][0]["cookie"] = []
result["response"][0]["body"] = docs.example.generate_example(
endpoint=endpoint, method=method)
login_rules = endpoint.login.get(method, endpoint.login.get("*", None))
if login_rules is None or login_rules.no_login:
result["request"]["auth"] = {
"type": "noauth"
}
results.append(result)
return results | PypiClean |
/ModuleZooTorch-1.1.3a0-py3-none-any.whl/moduleZoo/resblocks/invertedresidual.py | from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..convolution import (
ConvInvertedBlock1d,
ConvInvertedBlock2d,
ConvNormActivation1d,
ConvNormActivation2d,
)
class ConvInvertedResidualBlock2d(ConvInvertedBlock2d):
def __init__(self,
in_channels: int,
expansion_ratio: float,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = nn.ReLU6,
channel_shuffle: bool = False,
grouping: int = 1) -> None:
super().__init__(in_channels,
expansion_ratio,
kernel_size,
stride,
norm_layer,
activation_layer,
channel_shuffle,
grouping)
self.proj_type = 'id' if stride == 1 else 'projection'
self.projection = ConvNormActivation2d(in_channels,
in_channels,
1,
stride,
padding='stride_effective',
bias=False,
norm_layer=None,
activation_layer=None) if self.proj_type == 'projection' else None
def forward(self, x:torch.Tensor) -> torch.Tensor:
x_ = self.conv1(x)
if self.channel_shuffle is not None:
x_ = self.channel_shuffle(x_)
x_ = self.conv2(x_)
x_ = self.conv3(x_)
if self.projection is not None:
x = self.projection(x) + x_
x = self.activation(x) if self.activation is not None else x
return x
x = x + x_
x = self.activation(x) if self.activation is not None else x
return x
def shape(self, in_shape: Tuple[int, int]):
final_conv_shape = super().shape(in_shape)
final_proj_shape = self.projection.shape(in_shape) if self.projection is not None else in_shape
assert(final_conv_shape == final_proj_shape)
return final_conv_shape
class ConvInvertedResidualBlock1d(ConvInvertedBlock1d):
def __init__(self,
in_channels: int,
expansion_ratio: float,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = nn.ReLU6,
channel_shuffle: bool = False,
grouping: int = 1) -> None:
super().__init__(in_channels,
expansion_ratio,
kernel_size,
stride,
norm_layer,
activation_layer,
channel_shuffle,
grouping)
self.proj_type = 'id' if stride == 1 else 'projection'
self.projection = ConvNormActivation1d(in_channels,
in_channels,
1,
stride,
padding='stride_effective',
bias=False,
norm_layer=None,
activation_layer=None) if self.proj_type == 'projection' else None
def forward(self, x:torch.Tensor) -> torch.Tensor:
x_ = self.conv1(x)
if self.channel_shuffle is not None:
x_ = self.channel_shuffle(x_)
x_ = self.conv2(x_)
x_ = self.conv3(x_)
if self.projection is not None:
x = self.projection(x) + x_
x = self.activation(x) if self.activation is not None else x
return x
x = x + x_
x = self.activation(x) if self.activation is not None else x
return x
def shape(self, in_shape: Tuple[int, int]):
final_conv_shape = super().shape(in_shape)
final_proj_shape = self.projection.shape(in_shape) if self.projection is not None else in_shape
assert(final_conv_shape == final_proj_shape)
return final_conv_shape | PypiClean |
/Gravelamps-2.3.tar.gz/Gravelamps-2.3/gravelamps/core/file_handling.py | from configparser import ConfigParser
import os
import shutil
import numpy as np
from bilby_pipe.create_injections import create_injection_file as bilby_injection_file
from bilby_pipe.input import Input
from gravelamps.core.gravelog import gravelogger
def create_bilby_pipe_config(config, args, output_directories, **kwargs):
"""Generates a bilby_pipe configuration dictionary
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
args : argparse.Namespace
Object containing commandline arguments to the program
output_directories : dict
Contains the output directories for the run
injection_file : str, optional
Path of file containing injection data
analysis_waveform_arguments : dict, optional
Arguments dictionary to the analysis waveform generator
injection_waveform_arguments : dict, optional
Arguments dictionary for the injection waveform generator
Returns
-------
bilby_pipe_config : dict
Contains the configuration settings for a bilby_pipe run
"""
additional_information = {"injection_file": None,
"analysis_waveform_arguments": None,
"injection_waveform_arguments": None}
additional_information.update(**kwargs)
gravelogger.info("Generating bilby_pipe configuration")
bilby_pipe_config = {}
bilby_pipe_config["local"] = args.local or config.getboolean("run_settings",
"local",
fallback=False)
bilby_pipe_config["submit"] = False
bilby_pipe_config["accounting"] =\
config.get("condor_settings", "accounting_group", fallback=None)
if bilby_pipe_config["accounting"] is None:
gravelogger.warning("No accounting tag, jobs will not submit!")
for key, value in config.items("condor_settings"):
if key == "accounting":
continue
if "request_" in key:
bilby_pipe_config[key] = value.replace("GB", "").replace("MB", "")
else:
bilby_pipe_config[key] = value
bilby_pipe_config["label"] = config.get("output_settings", "label")
bilby_pipe_config["outdir"] = output_directories["outdir"]
for key, value in config.items("inference_settings"):
if key == "prior":
bilby_pipe_config["prior-file"] = value
else:
bilby_pipe_config[key.replace("_","-")] = value
for key, value in config.items("bilby_pipe_additional_settings"):
bilby_pipe_config[key.replace("_", "-")] = value
if "overwrite-outdir" not in bilby_pipe_config:
bilby_pipe_config["overwrite-outdir"] = True
bilby_pipe_config["injection"] = args.injection or\
config.getboolean("run_settings", "injection")
if bilby_pipe_config["injection"]:
bilby_pipe_config["gaussian-noise"] = True
bilby_pipe_config["injection_file"] = additional_information["injection_file"]
bilby_pipe_config["injection_waveform_arguments"] =\
additional_information["injection_waveform_arguments"]
bilby_pipe_config["waveform_arguments_dict"] =\
additional_information["analysis_waveform_arguments"]
gravelogger.info("bilby_pipe configuration generated")
return bilby_pipe_config
def create_final_dag(config, output_directories):
"""
Generate overall Gravelamps DAG.
This DAG will contain the jobs to be submitted to the HTCondor scheduler with correct parent
child linking. Lens generation jobs will be run first and may run with no linking to each
other. These jobs form the parents of the bilby_pipe inference runs using the lensed waveforms.
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
output_directories : dict
Contains the output directories for the run
Returns
-------
final_dag : str
Path to the gravelamps DAG file
"""
label = config.get("output_settings", "label")
final_dag_file = f"{output_directories['submit']}/gravelamps_inference.dag"
injection_generation_file =\
f"{output_directories['submit']}/generate_injection_interpolator_data.sub"
analysis_generation_file =\
f"{output_directories['submit']}/generate_analysis_interpolator_data.sub"
bilby_pipe_dag_file = f"{output_directories['submit']}/dag_{label}.submit"
with open(final_dag_file, "w", encoding="utf-8") as dag:
if os.path.isfile(injection_generation_file):
gravelogger.info("Adding injection lens generation to final DAG")
dag.write(f"JOB injection_lens_generation {injection_generation_file} \n")
if os.path.isfile(analysis_generation_file):
gravelogger.info("Adding analysis lens generation to final DAG")
dag.write(f"JOB analysis_lens_generation {analysis_generation_file} \n")
gravelogger.info("Adding bilby_pipe inference to final DAG")
dag.write(f"SUBDAG EXTERNAL bilby_pipe_dag {bilby_pipe_dag_file} \n")
if os.path.isfile(injection_generation_file):
gravelogger.info("Parent-Child linking injection lens generation")
dag.write("PARENT injection_lens_generation CHILD bilby_pipe_dag \n")
if os.path.isfile(analysis_generation_file):
gravelogger.info("Parent-Child linking analysis lens generation")
dag.write("PARENT analysis_lens_generation CHILD bilby_pipe_dag \n")
gravelogger.info("DAG file generated")
return final_dag_file
def create_injection_file(config):
"""
Generate bilby_pipe injection file.
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
Returns
-------
injection_file : str
Path to the created bilby_pipe injection file
"""
injection_file = f"{config.get('output_settings', 'outdir')}/data/injection.dat"
prior_dict = config.items("injection_parameters")
prior_file = injection_file.replace("injection.dat", "prior.dat")
with open(prior_file, "w", encoding="utf-8") as prior:
for key, value in prior_dict:
prior.write(f"{key} = {value} \n")
gpstuple = config.get("bilby_pipe_additional_settings", "gps_tuple", fallback=None)
gpsfile = config.get("bilby_pipe_additional_settings", "gps_file", fallback=None)
if gpstuple is not None:
gpstimes = Input.parse_gps_tuple(gpstuple)
elif gpsfile is not None:
gpstimes = Input.read_gps_file(gpsfile)
else:
gpstimes=None
n_injection = config.getint("bilby_pipe_additional_settings", "n-simulation")
trigger_time = config.getfloat("inference_settings", "trigger_time", fallback=0)
delta_t = config.getfloat("bilby_pipe_additional_settings", "deltaT", fallback=0.2)
duration = config.getfloat("inference_settings", "duration", fallback=4)
post_trigger_duration = config.getfloat("bilby_pipe_additional_settings",
"post_trigger_duration",
fallback=2)
generation_seed = config.getfloat("bilby_pipe_additional_settings",
"generation_seed",
fallback=None)
enforce_signal_duration = config.getboolean("bilby_pipe_additional_settings",
"enforce_signal_duration",
fallback=False)
bilby_injection_file(injection_file,
prior_file=prior_file,
prior_dict=None,
n_injection=n_injection,
trigger_time=trigger_time,
deltaT=delta_t,
gpstimes=gpstimes,
duration=duration,
post_trigger_duration=post_trigger_duration,
generation_seed=generation_seed,
enforce_signal_duration=enforce_signal_duration)
return injection_file
def get_config(args):
"""
Retrieves user INI configuration from arguments
Parameters
----------
args : argparse.Namespace
Object containing commandline arguments to program
Returns
-------
config : configparser.ConfigParser
Object containing settings from INI file
Raises
------
IOError
Where the INI file is not specified within the arguments or cannot be read
"""
config = ConfigParser()
if not os.path.isfile(args.ini):
raise IOError(f"{args.ini} is not a valid file")
try:
config.read(args.ini)
except IOError:
print(f"{args.ini} cannot be read!")
return config
def get_output_directories(config, from_config=True):
"""
Retrieves the output directories.
The output directories specified are the top level output directory, followed by data and submit
subdirectories with the specified names, 'data', and 'submit'.
The top level directory is typically specified within the user specified INI file. This will
fallback to the current working directory, or can be specified to directly run presuming such.
These folders will be created if they are not already extant.
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
from_config : bool, optional
Flag to ignore the INI and set the top level directory to the current directory
Returns
-------
output_dir_dict : dict
Contains the `output` top level directory and `submit` and `data` subdirectories in
the specified keys.
"""
if from_config:
outdir = config.get("output_settings", "outdir", fallback=".")
else:
outdir = os.path.dirname(os.getcwd())
data_subdirectory = f"{outdir}/data"
submit_subdirectory = f"{outdir}/submit"
output_dir_dict = {"outdir": outdir, "data": data_subdirectory, "submit": submit_subdirectory}
for _, value in output_dir_dict.items():
if not os.path.isdir(value):
os.mkdir(value)
return output_dir_dict
def retrieve_interpolator_files(config, args):
"""
Retrieves files necessary for generation of the lens interpolator
Will proceed if the file does not exist---specifying that it needs to be created. Will throw
exception if a file that does not exist is specified to exist.
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
args : argparse.Namespace
Object containing commandline arguments to program
Returns
-------
file_dict : dict
Contains `dimnesionless_frequency`, `source_position`, `amplification_factor_real`,
`amplification_factor_imag` keys. Each of these is a string path to file containing the
specified data for the lens interpolator generation.
Raises
------
IOError
In case where a file is specified in the INI that does not exist
"""
if args.injection:
lens_type = "injection"
else:
lens_type = "analysis"
value_types = ("dimensionless_frequency", "source_position",
"amplification_factor_real", "amplification_factor_imag")
file_dict = {}
for value in value_types:
file_location = config.get(f"{lens_type}_lens_generation_settings",
f"{value}_file", fallback=None)
if file_location is None or file_location == "None":
gravelogger.info("%s file not given", value.replace("_", " "))
file_location = None
else:
if not os.path.isfile(file_location):
raise IOError(f"{file_location} not found!")
gravelogger.info("%s file found at %s", value.replace("_", " ").title(), file_location)
file_dict[value] = file_location
gravelogger.info("Files found before handling: %s", file_dict)
return file_dict
def grid_file_handler(config, args, data_subdirectory, file_dict):
"""
Handles the interpolator grid file generation and locations
These files specify the dimensionless frequency and source position grid structure that is
interpolated over for the amplification factor data. These files may be directly specified
in the INI, or may be constructed if not. User will be warned if the amplification factor
files are defined without also defining the grid files, since the grid may not be accurate
if generated for pre-existing data.
Parameters
----------
config : configparser.ConfigParser
Object containing settings from INI file
args : argparse.Namesapce
Òbject containing commandline arguments to program
data_subdirectory : str
Path to the subdirectory containing data files
file_dict : dict
Contains either location of grid files, or None indicating these files require generation
Returns
-------
lens_file_dict : dict
Contains locations of completed grid files. Equivalent to file_dict if these are specified
in the data_subdirectory.
"""
if args.injection:
lens_type = "injection"
else:
lens_type = "analysis"
amplification_defined = bool(file_dict["amplification_factor_real"])\
or bool(file_dict["amplification_factor_imag"])
temp_dict = {}
for file_type in ("dimensionless_frequency", "source_position"):
default_outfile = f"{data_subdirectory}/{lens_type}_{file_type}.dat"
if file_dict[file_type] is None:
if amplification_defined:
gravelogger.warning(("Amplification factor files defined without corresponding "\
"%s file. Interpolator grid may not be accurate!"),
file_type.replace("_", " "))
gravelogger.info("Generating %s file", file_type.replace("_", " "))
min_value = config.getfloat(f"{lens_type}_lens_generation_settings",
f"minimum_{file_type}")
max_value = config.getfloat(f"{lens_type}_lens_generation_settings",
f"maximum_{file_type}")
num_values = config.getint(f"{lens_type}_lens_generation_settings",
f"length_{file_type}")
value_array = np.linspace(min_value, max_value, num_values)
np.savetxt(default_outfile, value_array)
temp_dict[file_type] = default_outfile
else:
shutil.copyfile(file_dict[file_type], default_outfile)
temp_dict[file_type] = default_outfile
lens_file_dict = dict(file_dict, **temp_dict)
gravelogger.info("Files after grid handling: %s", lens_file_dict)
return lens_file_dict
def data_file_handler(args,
data_subdirectory,
file_dict):
"""
Handles the interpolator data file generation and locations.
These files specify the real and imaginary components of the amplification factor data that
forms the base of the interpolator objects. These files may be directly specified in the INI
or may not be, specifying that they need generation. This handler does not run the generation
itself due to the computational complexity, instead it specifies the number of files that are
complete.
Parameters
----------
args : argparse.Namespace
Object containing commandline arguments to program
data_subdirectory : str
Path to the subdirectory containing the data files
file_dict : dict
Contains either the path to the files, or None to indicate they require generation
Returns
-------
lens_file_dict : dict
Contains the path to the files for construction of lens interpolator. Equivalent to
`file_dict` if these files are specified in `data_subdirectory`
complete_files : int
Number of data files that are complete
"""
if args.injection:
lens_type = "injection"
else:
lens_type = "analysis"
dimensionless_frequency_array = np.loadtxt(file_dict["dimensionless_frequency"])
source_position_array = np.loadtxt(file_dict["source_position"])
grid_shape = (len(dimensionless_frequency_array), len(source_position_array))
temp_dict = {}
complete_files = 0
for file_type in "amplification_factor_real", "amplification_factor_imag":
default_outfile = f"{data_subdirectory}/{lens_type}_{file_type}.dat"
if bool(file_dict[file_type]):
data_grid = np.loadtxt(file_dict[file_type])
if grid_shape in (data_grid.shape, data_grid.transpose().shape):
complete_files += 1
shutil.copyfile(file_dict[file_type], default_outfile)
temp_dict[file_type] = default_outfile
else:
temp_dict[file_type] = default_outfile
lens_file_dict = dict(file_dict, **temp_dict)
gravelogger.info("Files after data file handling: %s", lens_file_dict)
gravelogger.info("%s data file(s) complete", complete_files)
return lens_file_dict, complete_files | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/STIX/General/BoldItalic/Latin1Supplement.js | MathJax.Hub.Insert(MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS["STIXGeneral-bold-italic"],{160:[0,0,250,0,0],161:[494,205,389,19,320],162:[576,143,500,42,439],163:[683,12,500,-32,510],164:[542,10,500,-26,526],165:[669,0,500,33,628],166:[685,18,220,66,154],167:[685,143,500,36,459],168:[655,-525,333,55,397],169:[685,18,747,30,718],170:[685,-399,266,16,330],171:[415,-32,500,12,468],172:[399,-108,606,51,555],173:[282,-166,333,2,271],174:[685,18,747,30,718],175:[623,-553,333,51,393],176:[688,-402,400,83,369],177:[568,0,570,33,537],178:[683,-274,300,2,313],179:[683,-265,300,17,321],180:[697,-516,333,139,379],181:[449,207,576,-60,516],182:[669,193,617,60,679],183:[405,-257,250,51,199],184:[5,218,333,-80,156],185:[683,-274,300,30,301],186:[685,-400,300,56,347],187:[415,-32,500,12,468],188:[683,14,750,7,721],189:[683,14,750,-9,723],190:[683,14,750,7,726],191:[492,205,500,30,421],192:[947,0,667,-68,593],193:[947,0,667,-68,593],194:[940,0,667,-68,593],195:[905,0,667,-68,612],196:[905,0,667,-68,599],197:[1004,0,667,-68,593],198:[669,0,944,-64,918],199:[685,218,667,32,677],200:[947,0,667,-27,653],201:[947,0,667,-27,653],202:[940,0,667,-27,653],203:[905,0,667,-27,653],204:[947,0,389,-32,406],205:[947,0,389,-32,440],206:[940,0,389,-32,469],207:[905,0,389,-32,480],208:[669,0,722,-31,700],209:[905,15,722,-27,748],210:[947,18,722,27,691],211:[947,18,722,27,691],212:[940,18,722,27,691],213:[905,18,722,27,691],214:[905,18,722,27,691],215:[490,-16,570,48,522],216:[764,125,722,27,691],217:[947,18,722,67,744],218:[947,18,722,67,744],219:[940,18,722,67,744],220:[905,18,722,67,744],221:[947,0,611,71,659],222:[669,0,611,-27,573],223:[705,200,500,-200,473],224:[697,14,500,-21,456],225:[697,14,500,-21,456],226:[690,14,500,-21,475],227:[655,14,500,-21,497],228:[655,14,500,-21,485],229:[756,14,500,-21,456],230:[462,13,722,-5,673],231:[462,218,444,-24,392],232:[697,13,444,5,398],233:[697,13,444,5,419],234:[690,13,444,5,462],235:[655,13,444,5,470],236:[697,9,278,2,294],237:[697,9,278,2,310],238:[690,9,278,2,353],239:[655,9,278,2,362],240:[699,13,500,-3,454],241:[655,9,556,-6,507],242:[697,13,500,-3,441],243:[697,13,500,-3,441],244:[690,13,500,-3,462],245:[655,13,500,-3,485],246:[655,13,500,-3,470],247:[535,29,570,33,537],248:[560,119,500,-3,441],249:[697,9,556,15,493],250:[697,9,556,15,493],251:[690,9,556,15,493],252:[655,9,556,15,493],253:[697,205,444,-94,401],254:[699,205,500,-120,446],255:[655,205,444,-94,460]});MathJax.Ajax.loadComplete(MathJax.OutputJax["HTML-CSS"].fontDir+"/General/BoldItalic/Latin1Supplement.js"); | PypiClean |
/ELASTIC3rd-2.5.1.tar.gz/ELASTIC3rd-2.5.1/elastic3rd/energy/vasp.py | import re
import os
import shutil
import linecache
import numpy as np
import elastic3rd.energy.glue as glue
def get_base_vec(BaseName):
'''
Get the base vector from POSCAR
Parameter
--------
BaseName: str
Optional, not used. Just for consist with castep
Return
BaseVec: np.ndarray
The crystal vector
'''
BaseVec = np.zeros((3, 3))
FileName = "POSCAR"
fopen = open(FileName, 'r')
count = 0
for eachline in fopen:
if count > 1:
linei = eachline.strip('\n').strip()
linei = re.split('\s+', linei)
for j in range(0, 3):
BaseVec[count - 2][j] = float(linei[j])
if count > 3:
return BaseVec
count = count + 1
return BaseVec
def write_base_vec(BaseName, BaseVec):
'''
Write Base vector to POSCAR
Parameter
--------
BaseName: str
Any string, just for consist with castep
BaseVec: np.ndarray
The crystal vector
Return
None
'''
FileName = "POSCAR"
fopen = open(FileName, 'r')
tmpopen = open('tmpfile', 'a')
count = 1
lines = []
for eachline in fopen:
if (count > 2) and (count < 6):
for j in range(0, 3):
tmpopen.write(" ")
tmpopen.write("%.15f" % BaseVec[count - 3][j])
tmpopen.write("\n")
else:
tmpopen.write(eachline)
count = count + 1
fopen.close()
tmpopen.close()
os.remove(FileName)
os.rename('tmpfile', FileName)
def run(NP, BaseName):
'''
Run string for vasp.
Parameters
----------
BaseName: str
Any string, just for consist with castep
NP: int
NP is the total cores
Return
------
RunStr: str
The string for calling first principles code
'''
RunStr = "mpirun -np " + str(int(NP)) + " vasp_std"
return RunStr
def get_energy(BaseName):
'''
Get the energy from OSZICAR
Parameter
---------
BaseName: str
Optional, any string, just for consist with castep
Return
------
energy: float
energy in multi unit
'''
FileName = "OSZICAR"
fopen = open(FileName, 'r')
for eachline in fopen:
linei = eachline.split("=")
if len(linei) > 2:
energy = linei[2].strip().split(" ")[0]
fopen.close()
energy = float(energy)
# eV, Hartree, kJ/mol, kcal/mol
Energy = glue.multi_energy(energy)
return Energy
def copy_files(BaseName, Path):
'''
Copy required files(INCAR, POSCAR, POTCAR, KPOINTS) for run VASP
Parameters
----------
BaseName: str
Any string, just for consist with castep
Path: path-like str
The destination folder to store the files
Return
------
None
'''
#TODO: need to be improved
shutil.copyfile("INCAR", Path + "/" + "INCAR")
shutil.copyfile("POSCAR", Path + "/" + "POSCAR")
shutil.copyfile("POTCAR", Path + "/" + "POTCAR")
shutil.copyfile("KPOINTS", Path + "/" + "KPOINTS") | PypiClean |
/Flickr.API-0.4.4.tar.gz/Flickr.API-0.4.4/Flickr/API.py | __author__ = "Gilad Raphaelli"
__version__ = "0.4.4"
try:
import hashlib
except ImportError:
import md5 as hashlib
import mimetypes,urllib,urllib2
import warnings
import API
def encode_multipart_formdata(args):
""" Encode upload as multipart/form-data. From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 """
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in args.items():
if hasattr(value, 'read'):
if hasattr(value, 'name'):
filename = value.name
elif args.has_key('title'):
filename = args['title']
else:
filename = 'unknown'
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value.read())
else:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % BOUNDARY,
'Content-Length': len(body)
}
return (headers, body)
def encode_urlencode(args):
return ({},urllib.urlencode(args))
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def sign_args(secret, args):
""" Given a Flickr API secret and an array of args including an api_key key return an api_sig (string) """
sig = secret
for key in sorted(args.keys()):
sig += key
if args[key] is not None:
sig += str(args[key])
return hashlib.md5(sig).hexdigest()
class APIError(Exception): pass
class APIWarning(RuntimeWarning): pass
class API:
""" To access the Flickr API """
def __init__(self, key, secret=None):
self.key = key
self.secret = secret
def execute_method(self, method, args=None, sign=True):
""" Given a Flickr API method and arguments, construct a Flickr.API.Request and return a urllib2.addinfourl """
if args is None:
args = {}
args['method'] = method
return self.execute_request(Request(**args), sign)
def execute_upload(self, filename, args=None):
""" Given a Flickr API method and arguments, construct a Flickr.API.Request for uploading and return a urllib2.addinfourl """
if args is None:
args = {}
try:
photo = open(filename, mode='rb')
args['photo'] = photo
except IOError, (e.no, e.msg):
raise APIError, "Unable to open %s - %s: %s" % (filename, e.no, e.msg)
return self.execute_request(Request('http://api.flickr.com/services/upload/',**args), sign=True, encode=encode_multipart_formdata)
def execute_request(self, request, sign=True, encode=encode_urlencode):
""" Given a Flickr.API.Request return a Flickr.API.Response, altering
the original Request. The request will silently not sign if no secret
is available. """
request.args['api_key'] = self.key
if sign and self.secret is not None:
# Sign args as they are now, except photo
args_to_sign = {}
for (k,v) in request.args.items():
if k not in ('photo'):
args_to_sign[k] = v
request.args['api_sig'] = self._sign_args(args_to_sign)
request.add_header('Host', request.get_host())
(headers, body) = encode(request.args)
for (header, value) in headers.items():
request.add_header(header, value)
# urllib2 method goes POST when data is added (but make sure)
request.add_data(body)
if (request.get_method() != "POST"):
raise Exception, "not a POST? Something is wrong here"
return urllib2.urlopen(request)
def get_authurl(self, perms, **kwargs):
""" Get a client authentication url for web-based and non-web based clients
New in 0.4.1, use this method instead of get_auth_url """
args = {
'perms': perms,
'api_key': self.key,
}
kwargs.update(args)
kwargs['api_sig'] = sign_args(self.secret, kwargs)
return "http://flickr.com/services/auth/?%s" % (urllib.urlencode(kwargs),)
def get_auth_url(self, frob, perms):
""" Given a frob obtained via a 'flickr.auth.getFrob' and perms
(currently read, write, or delete) return a url for desktop client api
authentication
Deprecated in 0.4.1, use get_authurl for new applications """
return get_authurl(perms, frob=frob)
def _sign_args(self, args):
return sign_args(self.secret, args)
class Request(urllib2.Request):
""" A request to the Flickr API subclassed from urllib2.Request allowing for custom proxy, cache, headers, etc """
def __init__(self, apiurl='http://api.flickr.com/services/rest/', **args):
urllib2.Request.__init__(self, url=apiurl)
self.args = args
if (__name__ == '__main__'):
import sys
try:
key = sys.argv[1]
secret = sys.argv[2]
except IndexError:
print "Usage: %s <key> <secret>" % (sys.argv[0],)
sys.exit(1)
api = API(key, secret)
res = api.execute_method(method='flickr.test.echo', args={'foo':'bar'}) | PypiClean |
/Galaxy-ML-0.10.0.tar.gz/Galaxy-ML-0.10.0/galaxy_ml/preprocessors/_protein_one_hot_encoder.py | import numpy as np
from ._genome_one_hot_encoder import GenomeOneHotEncoder
from ..externals import selene_sdk
class ProteinOneHotEncoder(GenomeOneHotEncoder):
"""Convert protein sequences to one-hot encoded 2d array
Paramaters
----------
fasta_path : str, default None
File path to the fasta file. There could two other ways to set up
`fasta_path`. 1) through fit_params; 2) set_params(). If fasta_path is
None, we suppose the sequences are contained in first column of X.
padding : bool, default is False
All sequences are expected to be in the same length, but sometimes not.
If True, all sequences use the same length of first entry by either
padding or truncating. If False, raise ValueError if different seuqnce
lengths are found.
seq_length : None or int
Sequence length. If None, determined by the the first entry.
"""
BASE_TO_INDEX = {
'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4, 'E': 5, 'Q': 6,
'G': 7, 'H': 8, 'I': 9, 'L': 10, 'K': 11, 'M': 12, 'F': 13,
'P': 14, 'S': 15, 'T': 16, 'W': 17, 'Y': 18, 'V': 19
}
UNK_BASE = 'X'
def transform(self, X):
"""convert index in X into one-hot encoded 2d array
Parameter
---------
X : array, (n_samples, 1)
Contains the index numbers of fasta sequnce in the fasta file.
Returns
-------
Transformed X in 3d array, (n_sequences, sequence_length, 20)
"""
# One base encodes for 4 byts
sequences_endcoding = np.zeros((X.shape[0],
self.seq_length_,
20))
for i in range(X.shape[0]):
cur_sequence = str(self.fasta_file[int(X[i, 0])]) \
if self.fasta_file else str(X[i, 0])
cur_sequence = str(cur_sequence)
if len(cur_sequence) > self.seq_length_:
cur_sequence = selene_sdk.predict._common._truncate_sequence(
cur_sequence,
self.seq_length_)
elif len(cur_sequence) < self.seq_length_:
cur_sequence = selene_sdk.predict._common._pad_sequence(
cur_sequence,
self.seq_length_,
ProteinOneHotEncoder.UNK_BASE)
cur_sequence_encodeing = selene_sdk.sequences._sequence.\
_fast_sequence_to_encoding(
cur_sequence,
ProteinOneHotEncoder.BASE_TO_INDEX,
20)
sequences_endcoding[i, :, :] = cur_sequence_encodeing
return sequences_endcoding | PypiClean |
/Mopidy-Mopify-1.7.3.tar.gz/Mopidy-Mopify-1.7.3/mopidy_mopify/static/debug/src/app/music/artist/artist.controller.js | 'use strict';
angular.module('mopify.music.artist', [
'ngRoute',
'mopify.services.spotifylogin',
'mopify.services.servicemanager',
'llNotifier',
'spotify',
'mopify.services.mopidy',
'mopify.services.station',
'mopify.widgets.directive.artist'
]).config([
'$routeProvider',
function ($routeProvider) {
$routeProvider.when('/music/artist/:artistId', {
templateUrl: 'music/artist/artist.tmpl.html',
controller: 'ArtistController'
});
}
]).controller('ArtistController', [
'$scope',
'$routeParams',
'mopidyservice',
'stationservice',
'notifier',
'Spotify',
'SpotifyLogin',
'ServiceManager',
function ArtistController($scope, $routeParams, mopidyservice, stationservice, notifier, Spotify, SpotifyLogin, ServiceManager) {
$scope.artistId = $routeParams.artistId;
// Determine the currentview
$scope.currentview = {
id: 'music',
name: 'Music'
};
$scope.followingArtist = false;
// Define the view
$scope.setView = function (name) {
switch (name) {
case 'music':
$scope.currentview = {
id: 'music',
name: 'Music'
};
break;
case 'related':
$scope.currentview = {
id: 'related',
name: 'Related Artists'
};
break;
case 'bio':
$scope.currentview = {
id: 'bio',
name: 'Biography'
};
break;
}
};
if (ServiceManager.isEnabled('spotify') && SpotifyLogin.connected) {
// First get the album's tracks
Spotify.userFollowingContains('artist', $scope.artistId.replace('spotify:artist:', '')).then(function (response) {
$scope.followingArtist = response.data[0];
});
}
// Load artist data
$scope.artist = {};
Spotify.getArtist($scope.artistId).then(function (response) {
$scope.artist = response.data;
});
// Get data from echonest
/*Echonest.artists.get({
id: $routeParams.artistId
}).then(function(artist){
$scope.artist = artist;
artist.getBiographies();
// Get images from artist
artist.getImages().then(function(data){
var random = Math.floor(Math.random() * data.images.length);
$scope.artist.coverimage = data.images[random].url;
});
artist.getBiographies().then(function(data){
var bios = data.biographies;
for(var x = 0; x < bios.length; x++){
if(bios[x].truncated === false || bios[x].truncated === undefined){
$scope.artist.bio = bios[x];
break;
}
}
});
});*/
// Get related artists from spotify
Spotify.getRelatedArtists($scope.artistId).then(function (response) {
$scope.related = response.data.artists.splice(0, 18);
});
// Init an empty toptracks object
$scope.toptracks = [];
// Get the artist's top tracks
Spotify.getArtistTopTracks($scope.artistId, 'NL').then(function (response) {
$scope.toptracks = response.data.tracks;
});
// Get info from mopidy
var options = {
album_type: 'album,single',
country: 'NL',
limit: 50
};
Spotify.getArtistAlbums($scope.artistId, options).then(function (response) {
$scope.albums = response.data.items;
});
/**
* Start a station for the artist
*/
$scope.startStation = function () {
stationservice.startFromSpotifyUri($scope.artistId);
};
/**
* Follow or unfollow the current artist on Spotify
*/
$scope.toggleFollowArtist = function () {
if (ServiceManager.isEnabled('spotify') && SpotifyLogin.connected) {
if ($scope.followingArtist) {
// unfollow
Spotify.unfollow('artist', $scope.artistId.replace('spotify:artist:', '')).then(function (data) {
notifier.notify({
type: 'custom',
template: 'Artist succesfully unfollowed.',
delay: 5000
});
$scope.followingArtist = false;
}, function (data) {
notifier.notify({
type: 'custom',
template: 'Something wen\'t wrong, please try again.',
delay: 5000
});
});
} else {
// follow
Spotify.follow('artist', $scope.artistId.replace('spotify:artist:', '')).then(function (data) {
notifier.notify({
type: 'custom',
template: 'Artist succesfully followed.',
delay: 5000
});
$scope.followingArtist = true;
}, function (data) {
notifier.notify({
type: 'custom',
template: 'Something wen\'t wrong, please try again.',
delay: 5000
});
});
}
} else {
notifier.notify({
type: 'custom',
template: 'Can\'t follow/unfollow artist. Are you connected with Spotify?',
delay: 5000
});
}
};
}
]); | PypiClean |
/DAFI-1.0.2.tar.gz/DAFI-1.0.2/dafi/random_field/field.py | import warnings
# third party imports
import numpy as np
from scipy import sparse as sp
from scipy.sparse import linalg as splinalg
from scipy import interpolate
from scipy import spatial
# KL decomposition
def calc_kl_modes(cov, nmodes=None, weight_field=None, eps=1e-8,
normalize=True):
""" Calculate the first N Karhunen-Loève modes for a covariance
field.
Converts the covariance to a sparse matrix if it is not one yet.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nmodes : int
Number of KL modes to obtain.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
normalize : bool
Whether to normalize (norm = 1) the KL modes.
Returns
-------
eig_vals : ndarray
Eigenvalue associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
kl_modes : ndarray
KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
if nmodes is None:
nmodes = nstate-1
weight_field = _preprocess_field(weight_field, nstate, 1.0)
# add small value to diagonal
cov = cov + sp.eye(cov.shape[0], format='csc')*eps
weight_field = np.squeeze(weight_field)
weight_vec = np.atleast_2d(weight_field)
weight_mat = np.sqrt(np.dot(weight_vec.T, weight_vec))
cov_weighted = cov.multiply(weight_mat)
# perform the eig-decomposition
eig_vals, eig_vecs = sp.linalg.eigsh(cov_weighted, k=nmodes)
# sort the eig-value and eig-vectors in a descending order
ascending_order = eig_vals.argsort()
descending_order = ascending_order[::-1]
eig_vals = eig_vals[descending_order]
eig_vecs = eig_vecs[:, descending_order]
# normalized KL modes
weight_diag = np.diag(np.sqrt(weight_field))
kl_modes = np.dot(np.linalg.inv(weight_diag), eig_vecs) # normalized
# check if negative eigenvalues
for imode in np.arange(nmodes):
neg_eigv = False
if eig_vals[imode] < 0:
neg_eigv = True
warn_message = f'Negative eigenvalue for mode {imode}.'
warnings.warn(warn_message)
kl_modes[:, imode] *= 0.
if neg_eigv:
warn_message = 'Some modes have negative eigenvalues. ' + \
'The number of KL modes might be too large. ' + \
"Alternatively, use a larger value for 'eps'."
# weight by appropriate variance
if not normalize:
kl_modes = scale_kl_modes(eig_vals, kl_modes)
return eig_vals, kl_modes
def calc_kl_modes_coverage(cov, coverage, weight_field=None, eps=1e-8,
max_modes=None, normalize=True):
""" Calculate all KL modes and return only those required to achieve
a certain coverage of the variance.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
coverage : float
Desired percentage coverage of the variance. Value between 0-1.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
normalize : bool
Whether to normalize (norm = 1) the KL modes.
Returns
-------
eig_vals : ndarray
Eigenvalue associated with each mode. For the first N modes such
that the desired coverage of the variance is achieved.
*dtype=float*, *ndim=1*, *shape=(N)*
kl_modes : ndarray
first N KL modes (eigenvectors) such that the desired coverage
of the variance is achieved.
*dtype=float*, *ndim=2*, *shape=(nstate, N)*
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
weight_field = _preprocess_field(weight_field, nstate, 1.0)
if max_modes is None:
max_modes = nstate - 1
# get the first max_modes KL modes
eig_vals, kl_modes = calc_kl_modes(
cov, max_modes, weight_field, eps, normalize)
# return only those KL modes required for desired coverage
cummalative_variance = kl_coverage(cov, eig_vals, weight_field)
coverage_index = np.argmax(cummalative_variance >= coverage)
if coverage_index == 0:
coverage_index = max_modes
return eig_vals[:coverage_index], kl_modes[:, :coverage_index]
def scale_kl_modes(eig_vals, kl_modes_norm):
""" Weight the KL modes by the appropriate variance.
Parameters
----------
eig_vals : ndarray
Eigenvalue associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
kl_modes_norm : ndarray
Normalized (norm = 1) KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
Returns
-------
kl_modes_weighted : ndarray
KL modes with correct magnitude.
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
"""
nmodes = len(eig_vals)
kl_modes_weighted = kl_modes_norm.copy()
for imode in np.arange(nmodes):
kl_modes_weighted[:, imode] *= np.sqrt(eig_vals[imode])
return kl_modes_weighted
def kl_coverage(cov, eig_vals, weight_field=None):
""" Calculate the percentage of the covariance covered by the the
first N KL modes for N from 1-nmodes.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
eig_vals : ndarray
Eigenvalues associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coverage: ndarray
Cumulative variance coverage of the first N modes. Each value
is 0-1 and increasing.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
# make sparse if its not already
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
weight_field = _preprocess_field(weight_field, nstate, 1.0)
# calculate coverage
weight_vec = np.atleast_2d(weight_field)
weight_mat = np.sqrt(np.dot(weight_vec.T, weight_vec))
cov_weighted = cov.multiply(weight_mat)
cov_trace = np.sum(cov_weighted.diagonal())
return np.cumsum(eig_vals) / cov_trace
def reconstruct_kl(modes, coeffs, mean=None):
""" Reconstruct a field using KL modes and given coefficients.
Can create multiple fields by providing two dimensional array of
coefficients.
Parameters
----------
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
coeffs : ndarray
Array of coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# number of modes, samples, and states
if len(coeffs.shape) == 1:
coeffs = np.expand_dims(coeffs, 1)
nmodes, nsamps = coeffs.shape
nstate = modes.shape[0]
# mean vector
mean = _preprocess_field(mean, nstate, 0.0)
mean = np.expand_dims(np.squeeze(mean), axis=1)
# create samples
fields = np.tile(mean, [nsamps])
for imode in range(nmodes):
vec1 = np.atleast_2d(coeffs[imode, :])
vec2 = np.atleast_2d(modes[:, imode])
fields += np.dot(vec1.T, vec2).T
return fields
def project_kl(field, modes, weight_field=None, mean=None):
""" Project a field onto a set of modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
nstate, nmode = modes.shape
mean = _preprocess_field(mean, nstate, 0.0)
coeffs = []
for imode in range(nmode):
mode = modes[:, imode]
coeffs.append(projection_magnitude(field-mean, mode, weight_field))
return np.array(coeffs)
def _preprocess_field(field, nstate, default):
"""Pre-process provided weight field. """
# default value
if field is None:
field = np.ones(nstate)*default
# constant value
if len(np.atleast_1d(np.squeeze(np.array(field)))) == 1:
field = np.ones(nstate)*field
return field
# linear algebra on scalar fields
def integral(field, weight_field):
""" Calculate the integral of a field.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_integral : float
The integral of the field over the domain.
"""
field = np.squeeze(field)
assert field.ndim == 1
nstate = len(field)
weight_field = _preprocess_field(weight_field, nstate, 1.0)
return np.sum(field * weight_field)
def inner_product(field_1, field_2, weight_field):
""" Calculate the inner product between two fields.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
One scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Another scalar field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
product : float
The inner product between the two fields.
"""
return integral(field_1 * field_2, weight_field)
def norm(field, weight_field):
""" Calculate the L2-norm of a field.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_norm : float
The norm of the field.
"""
return np.sqrt(inner_product(field, field, weight_field))
def unit_field(field, weight_field):
""" Calculate the unit field (norm = 1) in same direction.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_normed : ndarray
Normalized (norm = 1) scalar field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
"""
return field / norm(field, weight_field)
def projection_magnitude(field_1, field_2, weight_field):
""" Get magnitude of projection of field_1 onto field_2.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
Scalar field being projected.
*dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Scalar field used for projection direction.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes.
*dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
magnitude : float
magnitude of the projected field.
"""
magnitude = inner_product(field_1, field_2, weight_field) / \
(norm(field_2, weight_field)**2)
return magnitude
def projection(field_1, field_2, weight_field):
""" Project field_1 onto field_2.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
Scalar field being projected.
*dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Scalar field used for projection direction.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes.
*dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
projected_field : ndarray
Projected field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
"""
magnitude = projection_magnitude(field_1, field_2, weight_field)
direction = unit_field(field_2, weight_field)
return magnitude*direction
# interpolation
def interpolate_field_rbf(data, coords, kernel, length_scale):
""" Interpolate data using a radial basis function (RBF) to create a
field from sparse specifications.
This is used for instance to specify a variance field based on
expert knowledge.
Parameters
----------
data : ndarray
Sparse data to create interpolation from. For an NxM array, the
number of data points is N, the number of dimensions
(coordinates) is M-1, and the Mth column is the data value.
*dtype=float*, *ndim=2*, *shape=(N, M)*
coords : ndarray
Coordinates of the cell centers of the full discretized field.
The RBF will be evaluated at these points.
*dtype=float*, *ndim=2*, *shape=(ncells, M-1)*
kernel : str
Kernel (function) of the RBF. See *'function'* input of
`scipy.interpolate.Rbf`_ for list of options.
length_scale : float
Length scale parameter (epsilon in `scipy.interpolate.Rbf`_)
in the RBF kernel.
Returns
-------
field : ndarray
Full field. *dtype=float*, *ndim=1*, *shape=(ncells)*
"""
args1 = []
args2 = []
ncoord = coords.shape[1]
for icoord in range(ncoord):
args1.append(data[:, icoord])
args2.append(coords[:, icoord])
interp_func = interpolate.Rbf(
*args1, function=kernel, epsilon=length_scale)
return interp_func(*args2)
def inverse_distance_weights(coords, connectivity, points, tol=1e-6):
""" Create linear interpolation matrix (observation operatror H).
"""
# get host cell (cell centre closest to point)
tree = spatial.cKDTree(coords)
distances, indexes = tree.query(list(points))
npoints = points.shape[0]
ncells = coords.shape[0]
# calculate weights
mat = sp.lil_matrix((npoints, ncells))
for i in range(npoints):
id = indexes[i]
if distances[i] < tol:
# if location is cell centre
mat[i, id] = 1.0
else:
point = np.expand_dims(np.squeeze(points[i, :]), 0)
neighbours = coords[connectivity[id], :]
dist = spatial.distance.cdist(point, neighbours)
weight = 1 / dist
wsum = np.sum(weight) + 1 / distances[i]
weight /= wsum
# host cell
mat[i, id] = (1 / distances[i]) / wsum
# neighbour cells
mat[i, connectivity[id]] = weight
return sp.csc_matrix(mat)
# Gaussian process: generate samples
def gp_samples_cholesky(cov, nsamples, mean=None, eps=1e-8):
""" Generate samples of a Gaussian Process using Cholesky
decomposition.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# make sparse if its not already
cov = sp.csc_matrix(cov)
nstate = cov.shape[0]
# add small value to diagonal
cov = cov + sp.eye(nstate, format='csc')*eps
# mean vector
mean = _preprocess_field(mean, nstate, 0.0)
mean = np.expand_dims(np.squeeze(mean), axis=1)
# Create samples using Cholesky Decomposition
L = sparse_cholesky(cov)
a = np.random.normal(size=(nstate, nsamples))
perturb = L.dot(a)
return mean + perturb
def sparse_cholesky(cov):
""" Compute the Cholesky decomposition for a sparse (scipy) matrix.
Adapted from `gist.github.com/omitakahiro`_.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
Returns
-------
lower: scipy.sparse.csc_matrix
Lower triangular Cholesky factor of the covariance matrix.
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# LU decomposition
LU = splinalg.splu(cov, diag_pivot_thresh=0)
# check the matrix is positive definite.
n = cov.shape[0]
posd = (LU.perm_r == np.arange(n)).all() and (LU.U.diagonal() > 0).all()
if not posd:
raise ValueError('The matrix is not positive definite')
return LU.L.dot(sp.diags(LU.U.diagonal()**0.5))
def gp_samples_kl(cov, nsamples, weight_field, nmodes=None, mean=None,
eps=1e-8):
""" Generate samples of a Gaussian Process using KL decomposition.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
nmodes : int
Number of modes to use when generating samples. *'None'* to use
all modes.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# KL decomposition
eigv, modes = calc_kl_modes(cov, nmodes, weight_field, eps, False)
if nmodes is None:
nmodes = len(eigv)
# create samples
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(modes, coeffs, mean)
def gp_samples_klmodes(modes, nsamples, mean=None):
""" Generate samples of a Gaussian Process using the given KL
modes.
Parameters
----------
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# create samples
nmodes = modes.shape[1]
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(modes, coeffs, mean)
def gp_samples_kl_coverage(cov, nsamples, weight_field, coverage=0.99,
max_modes=None, mean=None, eps=1e-8):
""" Generate samples of a Gaussian Process using KL decomposition.
Only the firs N modes required to get the desired variance coverage
are used.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
coverage : float
Desired percentage coverage of the variance. Value between 0-1.
max_modes : int
Maximum number of modes used. This is the number of modes that
is calculated. If less are needed to achieve the desired
coverage the additional ones are discarded.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
nmodes : int
Number of modes used to achieve the requested coverage.
"""
# KL decomposition
eigv, klmodes = calc_kl_modes_coverage(
cov, coverage, weight_field, eps, max_modes, False)
nmodes = len(eigv)
# create samples
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(klmodes, coeffs, mean), nmodes
def gp_sqrexp_samples(nsamples, coords, stddev, length_scales, mean=None,
weight_field=None, max_modes=None):
""" Generate samples from a Gaussian Process with square exponential
correlation kernel.
This is a convinience function for new users or simple cases.
It create the covariance matrix, does the KL decomposition, keeps
the required modes for 99% coverage, and create the samples.
Parameters
----------
nsamples : int
Number of samples to generate.
coords : ndarray
Array of coordinates. Each row correspond to a different point
and the number of columns is the number of physical dimensions
(e.g. 3 for (x,y,z)).
*dtype=float*, *ndim=2*, *shape=(npoints, ndims)*
stddev : ndarray
Standard deviation of each state. Alternatively, provide a float
for a constant standard deviation.
*dtype=float*, *ndim=1*, *shape=(nstate)*
length_scales : list
Length scale for each physical dimensions. List length is ndims.
Each entry is either a one dimensional ndarray of length nstate
(length scale field) or a float (constant length scale).
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
max_modes : int
Maximum number of modes used. This is the number of modes that
is calculated. If less are needed to achieve 99% coverage the
additional ones are discarded.
"""
from dafi.random_field.covariance import generate_cov
cov = generate_cov(
'sqrexp', stddev, coords=coords, length_scales=length_scales)
samples, _ = gp_samples_kl_coverage(
cov, nsamples, weight_field, 0.99, max_modes, mean)
return samples
# Random field class
class GaussianProcess(object):
""" Gaussian process class.
Also allows for the creation of a function of a Gaussian process.
E.g. see *'Lognormal'* class.
"""
def __init__(self, klmodes, mean=None, weights=None, func=None,
funcinv=None):
""" Initialize Gaussian process class.
Parameters
----------
klmodes : ndarray
KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
mean : ndarray
Mean vector. Default zero (0).
*dtype=float*, *ndim=1*, *shape=(nstate)*
weights : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
func: function
Function to create a random process that is a function of
a Gaussian process. Default is identity function (GP).
funcinv: function
Inverse of func.
"""
nstate = klmodes.shape[0]
self.klmodes = klmodes
self.ncell, self.nmodes = self.klmodes.shape
self.mean = _preprocess_field(mean, self.ncell, 0.0)
self.weights = _preprocess_field(mean, nstate, 1.0)
def func_identity(x):
return x
if func is None:
func = func_identity
if funcinv is None:
funcinv = func_identity
self.func = func
self.funcinv = funcinv
def sample_coeffs(self, nsamples):
""" Create Karhunen-Loève (KL) coefficents for random samples.
Parameters
----------
nsamples : int
Number of samples for which to generate KL coefficients.
Returns
-------
coeffs : ndarray
Matrix of samples KL coefficients for the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
return np.random.normal(0, 1, [self.nmodes, nsamples])
def sample_gp(self, nsamples, mean=None):
""" Generate samples of the Gaussian process.
Parameters
----------
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Sample fields from Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
coeffs : ndarray
Matrix of samples KL coefficients for the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
coeffs = self.sample_coeffs(nsamples)
return reconstruct_kl(self.klmodes, coeffs, mean), coeffs
def sample_func(self, nsamples, mean=None):
""" Generate samples of the function of the Gaussian process.
Parameters
----------
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Sample fields from the function of the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
coeffs = self.sample_coeffs(nsamples)
samps_gp = reconstruct_kl(self.klmodes, coeffs, mean)
return self.func(samps_gp), coeffs
def reconstruct_gp(self, coeffs, mean=None):
""" Reconstruct the Gaussian process field from given
KL coefficients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
return reconstruct_kl(self.klmodes, coeffs, mean)
def reconstruct_func(self, coeffs, mean=None):
""" Reconstruct the function of the Gaussian process field
from given KL coefficients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
val_gp = reconstruct_kl(self.klmodes, coeffs, mean)
return self.func(val_gp)
def pdf(self, coeffs):
""" Probaility density function (PDF).
PDF(x) where x is a field (point in sample space) specified by
KL coeffiecients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
Returns
-------
pdf : ndarray
Value of the PDF function for the given point in the sample space.
"""
return np.exp(logpdf(coeffs))
def logpdf(self, coeffs):
""" Logarithm of the probability density function.
log(PDF(x)) where x is a field (point in sample space)
specified by KL coeffiecients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
Returns
-------
logpdf : ndarray
Logarithm of the value of the PDF function for the given
point in the sample space.
"""
if len(coeffs.shape) == 1:
coeffs = np.expand_dims(coeffs, 1)
norm_coeff = np.linalg.norm(coeffs, axis=0)
const = np.log((2*np.pi)**(-self.ncell/2))
return const + -0.5*norm_coeff**2
def project_gp_field(self, field, mean=None):
""" Project a field onto the KL modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
return project_kl(field, self.klmodes, self.weights, mean)
def project_func_field(self, field, mean=None):
""" Project a field from the function of the Gaussian process
onto the KL modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
field = self.funcinv(field)
mean = _preprocess_field(mean)
mean = self.funcinv(mean)
return project_kl(field, self.klmodes, self.weights, mean)
class LogNormal(GaussianProcess):
""" Log-normal process class. """
def __init__(self, klmodes_gp, median=1.0, weights=None):
""" Initialize log-normal process class.
Parameters
----------
klmodes_gp : ndarray
KL modes (eigenvectors) of the underlying Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
median : ndarray
Median vector. Default one (1).
*dtype=float*, *ndim=1*, *shape=(nstate)*
weights : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
"""
nstate = klmodes_gp.shape[0]
median = _preprocess_field(median, nstate, 1.0)
self.median_func = np.expand_dims(np.squeeze(median), 1)
def func(x):
return self.median_func * np.exp(x)
def funcinv(y):
return np.log(y / self.median_func)
mean = 0.0
super(self.__class__, self).__init__(
klmodes_gp, mean, weights, func, funcinv) | PypiClean |
/Mtrax-2.2.07.zip/Mtrax-2.2.07/mtrax/chooseorientations.py | import wx
from wx import xrc
import motmot.wxvalidatedtext.wxvalidatedtext as wxvt
import numpy as num
from params import params
import pkg_resources # part of setuptools
RSRC_FILE = pkg_resources.resource_filename( __name__, "chooseorientations.xrc" )
class ChooseOrientations:
def __init__(self,parent,targets,interactive=True):
self.interactive = interactive
self.targets = targets
if self.interactive:
rsrc = xrc.XmlResource( RSRC_FILE )
self.frame = rsrc.LoadFrame(parent,"orientationframe")
self.InitControlHandles()
self.InitializeValues()
self.BindCallbacks()
def InitControlHandles(self):
if self.interactive:
self.weight_input = self.control('weight')
self.max_weight_input = self.control('max_weight')
def InitializeValues(self):
self.weight = params.velocity_angle_weight
self.max_weight = params.max_velocity_angle_weight
if self.weight is None:
self.weight = .5/params.max_jump
if self.max_weight is None:
self.max_weight = 1
if self.interactive:
self.weight_input.SetValue('%.3f'%self.weight)
self.max_weight_input.SetValue('%.3f'%self.max_weight)
def BindCallbacks(self):
if self.interactive:
wxvt.setup_validated_float_callback(self.weight_input,
xrc.XRCID('weight'),
self.ValidateWeight,
pending_color=params.wxvt_bg)
wxvt.setup_validated_float_callback(self.max_weight_input,
xrc.XRCID('weight'),
self.ValidateWeight,
pending_color=params.wxvt_bg)
def control(self,ctrlname):
return xrc.XRCCTRL(self.frame,ctrlname)
def ValidateWeight(self,evt):
self.weight = float(self.weight_input.GetValue())
self.max_weight = float(self.max_weight_input.GetValue())
if self.weight < 0:
self.weight = 0
if self.max_weight < 0:
self.max_weight = 0
if self.max_weight > 1:
self.max_weight = 1
self.weight_input.SetValue('%.3f'%self.weight)
self.max_weight_input.SetValue('%.3f'%self.max_weight)
def anglemod(self,theta):
return ( ( (theta+num.pi)%(2.*num.pi) )-num.pi )
def angledist(self,theta1,theta2):
return abs( ( (theta1-theta2+num.pi)%(2.*num.pi) ) - num.pi)
def ChooseOrientations(self):
params.velocity_angle_weight = self.weight
params.max_velocity_angle_weight = self.max_weight
N = len(self.targets)
startframes = num.zeros(params.nids,dtype=int)
startframes[:] = -1
endframes = num.zeros(params.nids,dtype=int)
endframes[:] = -1
keystostart = set(range(params.nids))
keystoend = set([])
allkeys = set(range(params.nids))
for t in range(N):
keys = set(self.targets[t].keys())
newstarts = keystostart & keys
for i in newstarts:
startframes[i] = t
keystostart -= newstarts
keystoend = keystoend | newstarts
keys = allkeys - keys
newends = keystoend & keys
for i in newends:
endframes[i] = t
keystoend -= newends
for i in keystoend:
endframes[i] = N
for i in range(params.nids):
if startframes[i] < endframes[i]-1:
self.ChooseOrientationsPerID(i,startframes[i],endframes[i])
def ChooseOrientationsPerID(self,id,startframe,endframe):
# we will set the angle to theta_t = phi_t + s_t * pi
# we want to choose s_t to minimize
# \sum_t cost(s_t|s_{t-1})
# cost(s_t|s_{t-1}) = [(1 - w(||v_t||^2))*d(\theta_t,\theta_{t-1}) +
# w(||v_t||^2)*d(\theta_t,angle(v_t))]
# where w(||v_t||^2) = \min{1, c*||v_t||^2}
# we will find the most likely states s_t using the recursion
# cost_t(s_t) = min_{s_{t-1}} { cost_{t-1}(s_{t-1}) + cost(s_t|s_{t-1})
N = endframe - startframe
# allocate space for storing the optimal path
stateprev = num.zeros((N-1,2),dtype=bool)
# allocate space for computing costs
tmpcost = num.zeros(2)
costprevnew = num.zeros(2)
# initialize first frame
costprev = num.zeros(2)
# compute iteratively
for tloc in range(1,N):
t = tloc + startframe
# compute velocity
xcurr = self.targets[t][id].center.x
ycurr = self.targets[t][id].center.y
xprev = self.targets[t-1][id].center.x
yprev = self.targets[t-1][id].center.y
vx = xcurr - xprev
vy = ycurr - yprev
# compute angle of velocity
velocityangle = num.arctan2(vy,vx)
# compute weight for velocity term
w = num.minimum(float(params.max_velocity_angle_weight),params.velocity_angle_weight*num.sqrt(vx**2+vy**2))
# compute for both possible states
for scurr in [0,1]:
thetacurr = self.targets[t][id].angle + scurr*num.pi
# try both previous states
for sprev in [0,1]:
thetaprev = self.targets[t-1][id].angle + sprev*num.pi
costcurr = (1.-w)*self.angledist(thetaprev,thetacurr) + \
w*self.angledist(thetacurr,velocityangle)
tmpcost[sprev] = costprev[sprev] + costcurr
# end loop over sprev
# choose the minimum
sprev = num.argmin(tmpcost)
# set pointer for path
stateprev[tloc-1,scurr] = sprev
# set cost
costprevnew[scurr] = tmpcost[sprev]
# end loop over scurr
costprev[:] = costprevnew[:]
# end loop over frames
# choose the best last state
scurr = num.argmin(costprev)
if scurr == 1:
self.targets[endframe-1][id].angle += num.pi
self.targets[endframe-1][id].angle = self.anglemod(self.targets[endframe-1][id].angle)
# choose the best states
for tloc in range(N-2,-1,-1):
t = tloc + startframe
scurr = stateprev[tloc,scurr]
if scurr == 1:
self.targets[t][id].angle += num.pi
self.targets[t][id].angle = self.anglemod(self.targets[t][id].angle) | PypiClean |
/Eve-2.1.0.tar.gz/Eve-2.1.0/eve/methods/put.py | from cerberus.validator import DocumentError
from flask import abort
from flask import current_app as app
from werkzeug import exceptions
from eve.auth import auth_field_and_value, requires_auth
from eve.methods.common import (build_response_document, get_document,
marshal_write_response, oplog_push, parse)
from eve.methods.common import payload as payload_
from eve.methods.common import (pre_event, ratelimit, resolve_document_etag,
resolve_embedded_fields,
resolve_sub_resource_path,
resolve_user_restricted_access,
store_media_files, utcnow)
from eve.methods.post import post_internal
from eve.utils import config, debug_error_message, parse_request
from eve.versioning import (insert_versioning_documents, late_versioning_catch,
resolve_document_version)
@ratelimit()
@requires_auth("item")
@pre_event
def put(resource, payload=None, **lookup):
"""
Default function for handling PUT requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to :func:`put_internal`
.. versionchanged:: 0.5
Split into put() and put_internal().
"""
return put_internal(
resource, payload, concurrency_check=True, skip_validation=False, **lookup
)
def put_internal(
resource, payload=None, concurrency_check=False, skip_validation=False, **lookup
):
"""Intended for internal put calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Performs a document replacement.
Updates are first validated against the resource schema. If validation
passes, the document is replaced and an OK status update is returned.
If validation fails a set of validation issues is returned.
:param resource: the name of the resource to which the document belongs.
:param payload: alternative payload. When calling put() from your own code
you can provide an alternative payload. This can be useful,
for example, when you have a callback function hooked to a
certain endpoint, and want to perform additional put()
callsfrom there.
Please be advised that in order to successfully use this
option, a request context must be available.
:param concurrency_check: concurrency check switch (bool)
:param skip_validation: skip payload validation before write (bool)
:param **lookup: document lookup query.
.. versionchanged:: 0.6
Create document if it does not exist. Closes #634.
Allow restoring soft deleted documents via PUT
.. versionchanged:: 0.5
Back to resolving default values after validation as now the validator
can properly validate dependency even when some have default values. See
#353.
Original put() has been split into put() and put_internal().
You can now pass a pre-defined custom payload to the funcion.
ETAG is now stored with the document (#369).
Catching all HTTPExceptions and returning them to the caller, allowing
for eventual flask.abort() invocations in callback functions to go
through. Fixes #395.
.. versionchanged:: 0.4
Allow abort() to be invoked by callback functions.
Resolve default values before validation is performed. See #353.
Raise 'on_replace' instead of 'on_insert'. The callback function gets
the document (as opposed to a list of just 1 document) as an argument.
Support for document versioning.
Raise `on_replaced` after the document has been replaced
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise pre_<method> event.
explicitly resolve default values instead of letting them be resolved
by common.parse. This avoids a validation error when a read-only field
also has a default value.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
Item-identifier wrapper stripped from both request and response payload.
.. versionadded:: 0.1.0
"""
resource_def = app.config["DOMAIN"][resource]
schema = resource_def["schema"]
validator = app.validator(
schema, resource=resource, allow_unknown=resource_def["allow_unknown"]
)
if payload is None:
payload = payload_()
# Retrieve the original document without checking user-restricted access,
# but returning the document owner in the projection. This allows us to
# prevent PUT if the document exists, but is owned by a different user
# than the currently authenticated one.
original = get_document(
resource,
concurrency_check,
check_auth_value=False,
force_auth_field_projection=True,
**lookup
)
if not original:
if config.UPSERT_ON_PUT:
id = lookup[resource_def["id_field"]]
# this guard avoids a bson dependency, which would be needed if we
# wanted to use 'isinstance'. Should also be slightly faster.
if schema[resource_def["id_field"]].get("type", "") == "objectid":
id = str(id)
payload[resource_def["id_field"]] = id
return post_internal(resource, payl=payload)
abort(404)
# If the document exists, but is owned by someone else, return
# 403 Forbidden
auth_field, request_auth_value = auth_field_and_value(resource)
if auth_field and original.get(auth_field) != request_auth_value:
abort(403)
last_modified = None
etag = None
issues = {}
object_id = original[resource_def["id_field"]]
response = {}
if config.BANDWIDTH_SAVER is True:
embedded_fields = []
else:
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
try:
document = parse(payload, resource)
resolve_sub_resource_path(document, resource)
if skip_validation:
validation = True
else:
validation = validator.validate_replace(document, object_id, original)
# Apply coerced values
document = validator.document
if validation:
# sneak in a shadow copy if it wasn't already there
late_versioning_catch(original, resource)
# update meta
last_modified = utcnow()
document[config.LAST_UPDATED] = last_modified
document[config.DATE_CREATED] = original[config.DATE_CREATED]
if resource_def["soft_delete"] is True:
# PUT with soft delete enabled should always set the DELETED
# field to False. We are either carrying through un-deleted
# status, or restoring a soft deleted document
document[config.DELETED] = False
# id_field not in document means it is not being automatically
# handled (it has been set to a field which exists in the
# resource schema.
if resource_def["id_field"] not in document:
document[resource_def["id_field"]] = object_id
resolve_user_restricted_access(document, resource)
store_media_files(document, resource, original)
resolve_document_version(document, resource, "PUT", original)
# notify callbacks
getattr(app, "on_replace")(resource, document, original)
getattr(app, "on_replace_%s" % resource)(document, original)
resolve_document_etag(document, resource)
# write to db
try:
app.data.replace(resource, object_id, document, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description="Client and server etags don't match")
# update oplog if needed
oplog_push(resource, document, "PUT")
insert_versioning_documents(resource, document)
# notify callbacks
getattr(app, "on_replaced")(resource, document, original)
getattr(app, "on_replaced_%s" % resource)(document, original)
# build the full response document
build_response_document(document, resource, embedded_fields, document)
response = document
if config.IF_MATCH:
etag = response[config.ETAG]
else:
issues = validator.errors
except DocumentError as e:
# TODO should probably log the error and abort 400 instead (when we
# got logging)
issues["validator exception"] = str(e)
except exceptions.HTTPException as e:
raise e
except Exception as e:
# consider all other exceptions as Bad Requests
app.logger.exception(e)
abort(400, description=debug_error_message("An exception occurred: %s" % e))
if issues:
response[config.ISSUES] = issues
response[config.STATUS] = config.STATUS_ERR
status = config.VALIDATION_ERROR_STATUS
else:
response[config.STATUS] = config.STATUS_OK
status = 200
# limit what actually gets sent to minimize bandwidth usage
response = marshal_write_response(response, resource)
return response, last_modified, etag, status | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/view/widgets/ISMWidget.py | import numpy as np
import pyqtgraph as pg
from qtpy import QtCore, QtWidgets
from imswitch.imcontrol.view import guitools
from .basewidgets import NapariHybridWidget
class ISMWidget(NapariHybridWidget):
""" Widget containing ISM interface. """
sigISMShowSinglePattern = QtCore.Signal(bool) # (enabled)
sigISMShowLast = QtCore.Signal(bool) # (enabled)
sigISMStop = QtCore.Signal(bool) # (enabled)
sigISMStart = QtCore.Signal(bool) # (enabled)
sigShowToggled = QtCore.Signal(bool) # (enabled)
sigPIDToggled = QtCore.Signal(bool) # (enabled)
sigUpdateRateChanged = QtCore.Signal(float) # (rate)
sigSliderLaser1ValueChanged = QtCore.Signal(float) # (value)
def __post_init__(self):
#super().__init__(*args, **kwargs)
self.ISMFrame = pg.GraphicsLayoutWidget()
# initialize all GUI elements
# ROI
self.ISMLabelROI = QtWidgets.QLabel('ROI X(min/max), Y(min/max):')
self.ISMValueROIxMin = QtWidgets.QLineEdit('0')
self.ISMValueROIxMax = QtWidgets.QLineEdit('255')
self.ISMValueROIyMin = QtWidgets.QLineEdit('0')
self.ISMValueROIyMax = QtWidgets.QLineEdit('255')
# Spacing
self.ISMLabelSteps = QtWidgets.QLabel('STEPS (X/Y): ')
self.ISMValueStepsX = QtWidgets.QLineEdit('25')
self.ISMValueStepsY = QtWidgets.QLineEdit('25')
# Timing
self.ISMLabelExposure = QtWidgets.QLabel('Laser Exposure (µs):')
self.ISMValueExposure = QtWidgets.QLineEdit('500')
self.ISMLabelDelay = QtWidgets.QLabel('Laser Delay (µs):')
self.ISMValueLabelDelay = QtWidgets.QLineEdit('500')
# z-stack
self.ISMLabelZStack = QtWidgets.QLabel('Z-Stack (min,max,steps):')
self.ISMValueZmin = QtWidgets.QLineEdit('0')
self.ISMValueZmax = QtWidgets.QLineEdit('100')
self.ISMValueZsteps = QtWidgets.QLineEdit('10')
# Laser 1
valueDecimalsLaser = 1
valueRangeLaser = (0,2**15)
tickIntervalLaser = 1
singleStepLaser = 1
self.ISMLabelLaser1 = QtWidgets.QLabel('Intensity (Laser 1):')
valueRangeMinLaser, valueRangeMaxLaser = valueRangeLaser
self.sliderLaser1 = guitools.FloatSlider(QtCore.Qt.Horizontal, self, allowScrollChanges=False,
decimals=valueDecimalsLaser)
self.sliderLaser1.setFocusPolicy(QtCore.Qt.NoFocus)
self.sliderLaser1.setMinimum(valueRangeMinLaser)
self.sliderLaser1.setMaximum(valueRangeMaxLaser)
self.sliderLaser1.setTickInterval(tickIntervalLaser)
self.sliderLaser1.setSingleStep(singleStepLaser)
self.sliderLaser1.setValue(0)
self.sliderLaser1.valueChanged.connect(
lambda value: self.sigSliderLaser1ValueChanged.emit(value)
)
self.ISMLabelFileName = QtWidgets.QLabel('FileName:')
self.ISMEditFileName = QtWidgets.QLineEdit('ISM')
self.ISMNImages = QtWidgets.QLabel('Number of images: ')
self.ISMStartButton = guitools.BetterPushButton('Start')
self.ISMStartButton.setCheckable(False)
self.ISMStartButton.toggled.connect(self.sigISMStart)
self.ISMStopButton = guitools.BetterPushButton('Stop')
self.ISMStopButton.setCheckable(False)
self.ISMStopButton.toggled.connect(self.sigISMStop)
self.ISMShowLastButton = guitools.BetterPushButton('Show Last')
self.ISMShowLastButton.setCheckable(False)
self.ISMShowLastButton.toggled.connect(self.sigISMShowLast)
self.ISMShowSinglePatternButton = guitools.BetterPushButton('Show Single ISM pattern')
self.ISMShowSinglePatternButton.setCheckable(True)
self.ISMShowSinglePatternButton.toggled.connect(self.sigISMShowSinglePattern)
#self.ISMDoZStack = QtWidgets.QCheckBox('Perform Z-Stack')
#self.ISMDoZStack.setCheckable(True)
# Defining layout
self.grid = QtWidgets.QGridLayout()
self.setLayout(self.grid)
self.grid.addWidget(self.ISMLabelROI, 0, 0, 1, 1)
self.grid.addWidget(self.ISMValueROIxMin, 0, 1, 1, 1)
self.grid.addWidget(self.ISMValueROIxMax, 0, 2, 1, 1)
self.grid.addWidget(self.ISMValueROIyMax, 0, 3, 1, 1)
self.grid.addWidget(self.ISMValueROIyMax, 0, 4, 1, 1)
self.grid.addWidget(self.ISMLabelSteps, 1, 0, 1, 1)
self.grid.addWidget(self.ISMValueStepsX, 1, 1, 1, 1)
self.grid.addWidget(self.ISMValueStepsY, 1, 2, 1, 1)
self.grid.addWidget(self.ISMLabelLaser1, 2, 0, 1, 1)
self.grid.addWidget(self.sliderLaser1, 2, 1, 1, 3)
self.grid.addWidget(self.ISMLabelExposure, 3, 0, 1, 1)
self.grid.addWidget(self.ISMValueExposure, 3, 1, 1, 3)
self.grid.addWidget(self.ISMLabelDelay, 3, 2, 1, 3)
self.grid.addWidget(self.ISMValueLabelDelay, 3, 3, 1, 1)
self.grid.addWidget(self.ISMLabelFileName, 4, 0, 1, 1)
self.grid.addWidget(self.ISMEditFileName, 4, 1, 1, 1)
self.grid.addWidget(self.ISMNImages, 4, 2, 1, 1)
self.grid.addWidget(self.ISMStartButton, 5, 0, 1, 1)
self.grid.addWidget(self.ISMStopButton, 5, 1, 1, 1)
self.grid.addWidget(self.ISMShowLastButton, 5, 2, 1, 1)
self.grid.addWidget(self.ISMShowSinglePatternButton, 5, 3, 1, 1)
#self.grid.addWidget(self.ISMDoZStack, 5, 3, 1, 1)
self.layer = None
def getImage(self):
if self.layer is not None:
return self.img.image
def setImage(self, im, colormap="gray", name=""):
if self.layer is None or name not in self.viewer.layers:
self.layer = self.viewer.add_image(im, rgb=False, colormap=colormap, name=name, blending='additive')
self.layer.data = im
def getTimingValues(self):
ISMValueLabelDelay = float(self.ISMValueLabelDelay.text())
ISMValueExposure = int(self.ISMValueExposure.text())
return ISMValueLabelDelay, ISMValueExposure
def getFilename(self):
ISMEditFileName = self.ISMEditFileName.text()
return ISMEditFileName
def setText(self, text):
self.ISMNImages.setText(text)
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/NNGT-2.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl/nngt/database/db_generation.py | from collections import namedtuple
from peewee import *
from playhouse.fields import CompressedField
from playhouse.migrate import *
import nngt
from .pickle_field import PickledField
__all__ = [
'Activity',
'Computer',
'Connection',
'db_migrator',
'ignore',
'migrate',
'NeuralNetwork',
'Neuron',
'Simulation',
'Synapse',
'val_to_field',
]
# ---------------- #
# Database classes #
# ---------------- #
class LongCompressedField(CompressedField):
db_field = 'longblob'
class BaseModel(Model):
class Meta:
database = nngt._main_db
class Computer(BaseModel):
'''
Class containing informations about the conputer.
'''
name = TextField()
''' : Name from ``platform.node()`` or ``"unknown"`` '''
platform = TextField()
''' System information from ``platform.platform()`` '''
python = TextField()
''' Python version given by ``platform.python_version()`` '''
cores = IntegerField()
''' Number of cores returned by ``psutil.cpu_count()`` or ``-1`` '''
ram = BigIntegerField()
''' Total memory given by ``psutil.virtual_memory().total`` (long) or
``-1`` '''
class NeuralNetwork(BaseModel):
'''
Class containing informations about the neural network.
'''
network_type = TextField(null=True)
''' Type of the network from Graph.type '''
directed = BooleanField(null=True)
''' Whether the graph is directed or not '''
nodes = IntegerField(null=True)
''' Number of nodes. '''
edges = IntegerField(null=True)
''' Number of edges. '''
weighted = BooleanField(null=True)
''' Whether the graph is weighted or not. '''
weight_distribution = TextField(null=True)
''' Name of the weight_distribution used. '''
compressed_file = LongCompressedField(null=True)
''' Compressed (bz2) string of the graph from ``str(graph)``; once
uncompressed, can be loaded using ``Graph.from_file(name,
from_string=True)``. '''
class Neuron(BaseModel):
'''
Base class that will be modified to contain all the properties of the
neurons used during a simulation.
'''
pass
class Synapse(BaseModel):
'''
Base class that will be modified to contain all the properties of the
synapses used during a simulation.
'''
pass
class Connection(BaseModel):
'''
Class detailing the existing connections in the network: a couple of pre-
and post-synaptic neurons and a synapse.
'''
pre = ForeignKeyField(Neuron, null=True, related_name='out_connections')
post = ForeignKeyField(Neuron, null=True, related_name='int_connections')
synapse = ForeignKeyField(Synapse, null=True, related_name='connections')
class Activity(BaseModel):
'''
Class detailing the network's simulated activity.
'''
raster = PickledField(null=True)
''' Raster of the simulated activity. '''
class Simulation(BaseModel):
'''
Class containing all informations about the simulation properties.
'''
start_time = DateTimeField()
''' Date and time at which the simulation started. '''
completion_time = DateTimeField()
''' Date and time at which the simulation ended. '''
simulated_time = FloatField()
''' Virtual time that was simulated for the neural network. '''
resolution = FloatField()
''' Timestep used to simulate the components of the neural network '''
simulator = TextField()
''' Name of the neural simulator used (NEST, Brian...) '''
grnd_seed = IntegerField(null=True)
''' Master seed of the simulation. '''
local_seeds = PickledField(null=True)
''' List of the local threads seeds. '''
computer = ForeignKeyField(Computer, related_name='simulations', null=True)
''' Computer table entry where the computer used is defined. '''
network = ForeignKeyField(NeuralNetwork, related_name='simulations', null=True)
''' Network table entry where the simulated network is described. '''
activity = ForeignKeyField(Activity, related_name='simulations', null=True)
''' Activity table entry where the simulated activity is described. '''
connections = ForeignKeyField(Connection, related_name='simulations', null=True)
''' Connection table entry where the connections are described. '''
population = PickledField()
''' Pickled list containing the neural group names. '''
pop_sizes = PickledField()
''' Pickled list containing the group sizes. '''
#-----------------------------------------------------------------------------#
# Generate the custom Neuron and Synapse classes
#------------------------
#
ignore = {
'global_id': True,
'gsl_error_tol': True,
'local_id': True,
'recordables': True,
'thread': True,
'thread_local_id': True,
'vp': True,
'synaptic_elements': True,
'sizeof': True,
'source': True,
'target': True,
}
val_to_field = {
'int': IntegerField,
'INTEGER': IntegerField,
'bigint': IntegerField,
'tinyint': IntegerField,
'long': PickledField,
'blob': PickledField,
'BLOB': PickledField,
'datetime': DateTimeField,
'DATETIME': DateTimeField,
'str': TextField,
'TEXT': TextField,
'longtext': TextField,
'SLILiteral': TextField,
'float': FloatField,
'REAL': FloatField,
'float64': FloatField,
'float32': FloatField,
'bool': BooleanField,
'lst': PickledField,
'dict': PickledField,
'ndarray': PickledField,
'compressed': LongCompressedField
}
db_migrator = {
'SqliteDatabase': SqliteMigrator,
'PostgresqlDatabase': PostgresqlMigrator,
'MySQLDatabase': MySQLMigrator,
} | PypiClean |
/LiquPy-0.13.1.0-py3-none-any.whl/liqupy/boreholes.py |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from math import erf
def normcdf(x):
# Cumulative distribution function for the standard normal distribution
return (1.0 + erf(x / np.sqrt(2.0))) / 2.0
class Borehole:
# borehole object
number_of_holes = 0
# customize visualization
viz_liquefied_text_kwargs = {'color': (0, 0, 0, 0.4), 'horizontalalignment': 'center', 'verticalalignment': 'center'}
viz_dashed_guidelines = {'color': (0, 0, 1, 0.05), 'ls': '--'}
def __init__(self, bore_log_data, name=None, units='metric'):
Borehole.number_of_holes += 1
self.bore_log_data = bore_log_data
self.name = name
if units == 'metric':
self.units_length = 'm'
self.units_area = '$m^2$'
elif units == 'british':
self.units_length = 'ft'
self.units_area = '$ft^2$'
def __del__(self):
Borehole.number_of_holes -= 1
def simplified_liquefaction_triggering_fos(self, Pa, M, Zw=0, sampler_correction_factor=1,
liner_correction_factor=1., hammer_energy=60, rod_extension=1, output='fs',
rd_method='Idriss1999', fc_method = 'BI2004', fs_threshold=1., prob_threshold=0.5):
""" simplified liquefaction triggering analysis - stress-based
Parameters
----------
Pa : float
Peak ground acceleration (g)
M : float
Earthquake magnitude
Zw : float, default=0
water table depth (in self.units_length units)
sampler_correction_factor : float, default=1.
liner_correction_factor : float, default=1.
hammer_energy : float, default=60.
rod_extension : float, default=1.
output : 'fs' or 'probability', default='fs'
determines the approach, deterministic or probabilistic
rd_method : in ['Idriss1999', 'LiaoWhitman1986', 'Golesorkhi1989'], default= 'Idriss1999'
Method of shear stress reduction factor
fc_method : in ['BI2004', 'cetin2004'] , default= 'BI2004'
Method of adjustments for fines content
fs_threshold : float, default=1.
Factor of safety threshold to consider soild as liqufied
prob_threshold : float, default=0.5
Probability threshold to consider soild as liqufied
"""
self.Pa = Pa
self.M = M
self.Zw = Zw
self.sampler_correction_factor = sampler_correction_factor
self.liner_correction_factor = liner_correction_factor
self.hammer_energy = hammer_energy
self.rod_extension = rod_extension
self.rd_method= rd_method
self.fc_method = fc_method
self.output = output
self.fs_threshold = fs_threshold
self.prob_threshold = prob_threshold
output = []
sigmavp = 0
sigmav = 0
depth = 0
hydro_pore_pressure = 0
gamma = self.bore_log_data.iloc[0, 6]
for i, row in self.bore_log_data.iterrows():
rod_length = row[1] + rod_extension
Nspt = row[2]
ce = hammer_energy / 60
if rod_length < 3:
cr = 0.75
elif rod_length < 4:
cr = 0.8
elif rod_length < 6:
cr = 0.85
elif rod_length < 10:
cr = 0.95
else:
cr = 1
cs = sampler_correction_factor
cb = liner_correction_factor
N60 = Nspt * ce * cr * cs * cb
sigmav = sigmav + (row[1] - depth)*0.5*(row[6]+gamma)
if row[1] > Zw:
hydro_pore_pressure = (row[1]-Zw) * 9.81
sigmavp = sigmav - hydro_pore_pressure
if row[4] == 1:
N60 = 'n.a.'
N160 = 'n.a.'
N160cs = 'n.a.'
else:
if sigmavp == 0:
CN = 1
else:
CN = min(1.7, np.sqrt(100 / sigmavp))
N160 = CN*N60 # use of (N1)60 proposed by Liao and Whitman (1986)
# Adjustments for fines content
if self.fc_method == 'BI2004':
# Boulanger & Idriss (2004), default
delta_n = np.exp(1.63 + 9.7/(row[5]+0.01) - (15.7/(row[5]+0.01))**2)
N160cs = N160 + delta_n
elif self.fc_method == 'cetin2004':
# Cetin et al. (2004)
if row[5] > 5 or row[5] < 35:
warnings.warn('Cetin et al 2004 method of adjustments for fines content is only applicable to fines content in the range of 5% to 35%')
c_fines = 1 + 0.004*row[5] + 0.05*row[5]/N160
N160cs = N160*c_fines
# Shear stress reduction factor (depth in meters)
if row[1] > 20:
warnings.warn('CSR (or equivalent rd values) at depths greater than about 20 m should be based on site response studies (Idriss and Boulanger, 2004)')
if self.rd_method=='Idriss1999':
# Idriss (1999), default value
if row[1] <= 34:
rd = np.exp((-1.012-1.126*np.sin(row[1]/11.73+5.133)) + (0.106+0.118*np.sin(row[1]/11.28+5.142))*M)
else:
rd = 0.12*exp(0.22*M)
elif self.rd_method == 'LiaoWhitman1986':
# Liao and Whitman (1986)
if row[1] <= 9.15:
rd = 1 - 0.00765*row[1]
else:
rd = 1.174 - 0.0267*row[1]
elif self.rd_method == 'Golesorkhi1989':
# Golesorkhi (1989)
if row[1] <= 24:
rd = np.exp((-1.012-1.126*np.sin(row[1]/38.5+5.133)) + (0.106+0.118*np.sin(row[1]/37+5.142))*M)
# Earthquake-induced cyclic stress ratio (CSR)
CSR = 0.65*sigmav/sigmavp*Pa*rd
if row[4] == 1 or row[1] < Zw:
CRR0 = 'n.a.'
CRR = 'n.a.'
FoS = 'n.a.'
MSF = 'n.a'
k_sigma = 'n.a.'
Probability = 'n.a.'
else:
# Magnitude scaling factor
# Idriss (1999), default value
MSF = min(1.8, 6.9 * np.exp(-M / 4) - 0.058)
# Overburden correction factor
# Boulanger and Idriss (2004)
k_sigma = min(1.1, 1 - (min(0.3, 1 / (18.9 - 2.55 * np.sqrt(N160cs)))) * np.log(sigmavp / 100))
# SPT Triggering correlation of liquefaction in clean sands
# Idriss and Boulanger (2004) & Idriss and Boulanger (2008)
if N160cs < 37.5:
CRR0 = np.exp(N160cs / 14.1 + (N160cs / 126) ** 2 - (N160cs / 23.6) ** 3 + (N160cs / 25.4) ** 4 - 2.8)
else:
CRR0 = 2
# Cyclic resistance ratio (CRR)
CRR = min(2., CRR0*MSF*k_sigma)
if self.output == 'fs':
if CRR/CSR > 2:
FoS = 2
else:
FoS = CRR/CSR
else:
# Cetin et al. (2004)
Probability = normcdf(-1*(N160cs - 13.32*np.log(CSR) - 29.53*np.log(M) - 3.7*np.log(sigmavp / 100) + 16.85)/2.7)
depth = row[1]
gamma = row[6]
if self.output == 'fs':
if self.fc_method == 'BI2004':
output.append([row[1], ce, cb, cr, cs, N60, sigmav, sigmavp, CN, N160, delta_n, N160cs, rd, CSR, MSF, k_sigma, CRR0, CRR, FoS])
elif self.fc_method == 'cetin2004':
output.append([row[1], ce, cb, cr, cs, N60, sigmav, sigmavp, CN, N160, c_fines, N160cs, rd, CSR, MSF, k_sigma, CRR0, CRR, FoS])
else:
if self.fc_method == 'BI2004':
output.append([row[1], ce, cb, cr, cs, N60, sigmav, sigmavp, CN, N160, delta_n, N160cs, rd, CSR, MSF, k_sigma, CRR0, CRR, Probability])
elif self.fc_method == 'cetin2004':
output.append([row[1], ce, cb, cr, cs, N60, sigmav, sigmavp, CN, N160, c_fines, N160cs, rd, CSR, MSF, k_sigma, CRR0, CRR, Probability])
if self.output == 'fs':
if self.fc_method == 'BI2004':
self.new_bore_log_data = pd.DataFrame(output, columns=['depth', 'ce', 'cb', 'cr', 'cs', 'N60', 'sigmav', 'sigmavp', 'CN', 'N160', 'delta_n', 'N160cs', 'rd', 'CSR', 'MSF', 'k_simga', 'CRR0', 'CRR', 'FS'])
elif self.fc_method == 'cetin2004':
self.new_bore_log_data = pd.DataFrame(output, columns=['depth', 'ce', 'cb', 'cr', 'cs', 'N60', 'sigmav', 'sigmavp', 'CN', 'N160', 'c_fines', 'N160cs', 'rd', 'CSR', 'MSF', 'k_simga', 'CRR0', 'CRR', 'FS'])
else:
if self.fc_method == 'BI2004':
self.new_bore_log_data = pd.DataFrame(output, columns=['depth', 'ce', 'cb', 'cr', 'cs', 'N60', 'sigmav', 'sigmavp', 'CN', 'N160', 'delta_n', 'N160cs', 'rd', 'CSR', 'MSF', 'k_simga', 'CRR0', 'CRR', 'Probability'])
elif self.fc_method == 'cetin2004':
self.new_bore_log_data = pd.DataFrame(output, columns=['depth', 'ce', 'cb', 'cr', 'cs', 'N60', 'sigmav', 'sigmavp', 'CN', 'N160', 'c_fines', 'N160cs', 'rd', 'CSR', 'MSF', 'k_simga', 'CRR0', 'CRR', 'Probability'])
# visualization of the liquefaction analysis
def visualize(self):
# subplot of SPT blow counts
fig, ax = plt.subplots(ncols=3, figsize=(12, 6))
[ax[x].xaxis.tick_top() for x in range(ax.shape[0])]
[ax[x].xaxis.set_label_position('top') for x in range(ax.shape[0])]
total_depth = max(self.new_bore_log_data['depth'])*-1.1
soil_type_0 = ''
depth_0 = 0
self.new_bore_log_data.N160 = self.new_bore_log_data.N160.astype('object')
spt_plot_max_x = max(self.new_bore_log_data.loc[self.new_bore_log_data.loc[:, 'N160'] != 'n.a.', 'N160'])*1.05
for i, row in self.bore_log_data.iterrows():
soil_type_1 = row[3]
depth_1 = -row[1]
ax[0].text(spt_plot_max_x*.05, depth_1, soil_type_1, color=(0, 0, 0, 0.4), verticalalignment='center')
if not soil_type_0 == soil_type_1:
ax[0].plot([0, spt_plot_max_x], [(depth_1+depth_0)*.5, (depth_1+depth_0)*.5], color=(0, 0, 0, 0.15))
ax[0].plot([0, spt_plot_max_x], [depth_1, depth_1], **self.viz_dashed_guidelines)
depth_0 = depth_1
soil_type_0 = soil_type_1
ax[0].scatter(self.bore_log_data.iloc[:, 2], -self.bore_log_data.iloc[:, 1], marker='x', label='$N_{SPT}$')
ax[0].scatter(self.new_bore_log_data.loc[self.new_bore_log_data.loc[:, 'N160'] != 'n.a.', 'N160'],
-self.bore_log_data.ix[self.new_bore_log_data.loc[:, 'N160'] != 'n.a.', 1], marker='+', s=75, label='$(N_1)_{60}$')
ax[0].legend(loc='lower right')
ax[0].set(xlabel='SPT BLOW COUNT', ylabel='DEPTH ({})'.format(self.units_length), xlim=[0, spt_plot_max_x])
ax[0].set_ylim(top=0, bottom=total_depth)
# subplot of CSR & CRR
depth_0 = 0
layer_change_0 = 0
liquefiable_0 = False
csr_0 = 0
crr_0 = 0
na_0 = False
csrcrr_plot_max_x = 1
if self.output == 'fs':
fos_plot_max_x = self.new_bore_log_data.loc[self.new_bore_log_data.loc[:, 'FS'] != 'n.a.', 'FS'].max()*1.1
else:
fos_plot_max_x = 1
for i, row in self.new_bore_log_data.iterrows():
depth_1 = -row[0]
ax[1].plot([0, csrcrr_plot_max_x], [depth_1, depth_1], **self.viz_dashed_guidelines)
ax[2].plot([0, fos_plot_max_x], [depth_1, depth_1], **self.viz_dashed_guidelines)
na_1 = False
csr_1 = row['CSR']
crr_1 = row['CRR']
if self.output == 'fs':
if row['FS'] == 'n.a.':
na_1 = True
liquefiable_1 = False
elif row['FS'] > self.fs_threshold:
liquefiable_1 = False
else:
liquefiable_1 = True
else:
if row['Probability'] == 'n.a.':
na_1 = True
liquefiable_1 = False
elif row['Probability'] <= self.prob_threshold:
liquefiable_1 = False
else:
liquefiable_1 = True
if i > 0:
if not na_1 and not na_0:
ax[1].plot([csr_0, csr_1], [depth_0, depth_1], 'k--')
ax[1].plot([crr_0, crr_1], [depth_0, depth_1], 'k-')
if not liquefiable_0 == liquefiable_1:
layer_change_1 = (depth_1+depth_0)*.5
if not liquefiable_1:
ax[2].text(0.5*fos_plot_max_x, (layer_change_0+layer_change_1)*0.5, 'LIQUEFIED ZONE', **Borehole.viz_liquefied_text_kwargs)
else:
ax[2].text(0.5*fos_plot_max_x, (layer_change_0 + layer_change_1) * 0.5, 'NON-LIQUEFIED ZONE', **Borehole.viz_liquefied_text_kwargs)
ax[1].plot([-1, csrcrr_plot_max_x], [(depth_1+depth_0)*.5, (depth_1+depth_0)*.5], color=(0, 0, 0, 0.15))
ax[2].plot([0, fos_plot_max_x], [(depth_1 + depth_0) * .5, (depth_1 + depth_0) * .5], color=(0, 0, 0, 0.15))
layer_change_0 = layer_change_1
liquefiable_0 = liquefiable_1
depth_0 = depth_1
csr_0 = csr_1
crr_0 = crr_1
na_0 = na_1
if liquefiable_1:
ax[2].text(0.5*fos_plot_max_x, (total_depth+layer_change_1)*0.5, 'LIQUEFIED ZONE', **Borehole.viz_liquefied_text_kwargs)
else:
ax[2].text(0.5 * fos_plot_max_x, (total_depth + layer_change_1) * 0.5, 'NON-LIQUEFIED ZONE', **Borehole.viz_liquefied_text_kwargs)
ax[1].plot([0, 0], [0, 0], 'k--', label='CSR')
ax[1].plot([0, 0], [0, 0], 'k-', label='Earthquake-induced CRR')
ax[1].legend(loc='lower right')
ax[1].set(xlabel='CSR & CRR', xlim=[0, csrcrr_plot_max_x])
ax[1].set_ylim(top=0, bottom=total_depth)
if self.output == 'fs':
# subplot of Factor of safety
depth_0 = 0
fs_0 = 0
for i, row in self.new_bore_log_data.iterrows():
fs_1 = row['FS']
depth_1 = -row['depth']
if i > 0 and fs_1 != 'n.a.' and fs_0 != 'n.a.':
ax[2].plot([fs_0, fs_1], [depth_0, depth_1], 'k-')
fs_0 = fs_1
depth_0 = depth_1
ax[2].plot([self.fs_threshold, self.fs_threshold], [0, total_depth], '--', color=(0, 0, 0, 0.1))
ax[2].set(xlabel='FACTOR OF SAFETY', xlim=[0, fos_plot_max_x])
ax[2].set_ylim(top=0, bottom=total_depth)
else:
# subplot of Liquefaction Probability
depth_0 = 0
fs_0 = 0
for i, row in self.new_bore_log_data.iterrows():
fs_1 = row['Probability']
depth_1 = -row['depth']
if i > 0 and fs_1 != 'n.a.' and fs_0 != 'n.a.':
ax[2].plot([fs_0, fs_1], [depth_0, depth_1], 'k-')
fs_0 = fs_1
depth_0 = depth_1
ax[2].plot([self.prob_threshold, self.prob_threshold], [0, total_depth], '--', color=(0, 0, 0, 0.1))
ax[2].set(xlabel='Probability', xlim=[0, 1])
ax[2].set_ylim(top=0, bottom=total_depth)
if self.name != None:
fig.suptitle(self.name, fontsize=14, y=.99)
plt.show()
def save_to_file(self, file_name):
self.new_bore_log_data.to_excel(file_name + '.xls')
print(file_name + '.xls has been saved.')
# Analytical methods for lateral spread and settlement analysis ************************************************************
# Zhang, G., Robertson, P. K., & Brachman, R. W. I. (2004). Estimating liquefaction-induced lateral displacements using the standard penetration test or cone penetration test. Journal of Geotechnical and Geoenvironmental Engineering, 130(8), 861-871.
def calc_ls_zhang2004(self, save_to_file=False, file_name='lateral_spread_analysis'):
try:
for i, row in self.new_bore_log_data.iterrows():
if i == 0:
self.new_bore_log_data.loc[i, 'dHi'] = self.new_bore_log_data.loc[i, 'depth']
elif i < len(self.new_bore_log_data) - 1:
self.new_bore_log_data.loc[i, 'dHi'] = (self.new_bore_log_data.loc[i + 1, 'depth'] - self.new_bore_log_data.loc[i - 1, 'depth']) / 2
else:
self.new_bore_log_data.loc[i, 'dHi'] = self.new_bore_log_data.loc[i, 'depth'] - self.new_bore_log_data.loc[i - 1, 'depth']
if self.new_bore_log_data.loc[i, 'FS'] == 'n.a.':
self.new_bore_log_data.loc[i, 'gamma_lim'] = 0
self.new_bore_log_data.loc[i, 'f_alpha'] = 0
self.new_bore_log_data.loc[i, 'gamma_max'] = 0
self.new_bore_log_data.loc[i, 'de'] = 0
self.new_bore_log_data.loc[i, 'de'] = 0
else:
self.new_bore_log_data.loc[i, 'gamma_lim'] = max(0, min(0.5, 1.859*(1.1 - np.sqrt(self.new_bore_log_data.loc[i, 'N160cs'] / 45)) ** 3))
self.new_bore_log_data.loc[i, 'f_alpha'] = 0.032 + 0.69 * np.sqrt(max(7, self.new_bore_log_data.loc[i, 'N160cs'])) - 0.13 * max(7, self.new_bore_log_data.loc[i, 'N160cs'])
if row['FS'] > 2:
self.new_bore_log_data.loc[i, 'gamma_max'] = 0
elif row['FS'] < self.new_bore_log_data.loc[i, 'f_alpha']:
self.new_bore_log_data.loc[i, 'gamma_max'] = self.new_bore_log_data.loc[i, 'gamma_lim']
else:
self.new_bore_log_data.loc[i, 'gamma_max'] = min(self.new_bore_log_data.loc[i, 'gamma_lim'],
0.035 * (1 - self.new_bore_log_data.loc[i, 'f_alpha']) * (2 - row['FS']) / (
row['FS'] - self.new_bore_log_data.loc[i, 'f_alpha']))
self.new_bore_log_data.loc[i, 'de'] = 1.5 * np.exp(-0.369 * np.sqrt(self.new_bore_log_data.loc[i, 'N160cs'])) * min(0.08,
self.new_bore_log_data.loc[
i, 'gamma_max'])
self.new_bore_log_data.loc[i, 'dLDIi'] = self.new_bore_log_data.loc[i, 'dHi'] * self.new_bore_log_data.loc[i, 'gamma_max']
self.new_bore_log_data.loc[i, 'dSi'] = self.new_bore_log_data.loc[i, 'dHi'] * self.new_bore_log_data.loc[i, 'de']
print('LDI = {}, settlement = {}'.format(sum(self.new_bore_log_data.dLDIi.values), sum(self.new_bore_log_data.dSi.values)))
if save_to_file:
self.new_bore_log_data.to_excel(file_name + '.xls')
print(file_name + '.xls has been saved.')
except AttributeError:
warnings.warn('Lateral spread and settlement analysis could not be done! Simplified liquefaction triggering analysis needs to be done first.')
if __name__ == '__main__':
# ***********************************************************************************
# Example on how to use this Python module with SPT-based liquefaction triggering analysis for a single boring (Soil Liquefaction During Earthquake textbook by Idriss and Boulanger)
# ***********************************************************************************
# 1. load the borehole data as a Panda's dataframe
spt_idriss_boulanger_bore_data_appendix_a = pd.read_excel('default_datasets/spt_Idriss_Boulanger.xlsx')
# 2. create a borehole object given the bore log data from Appendix A of Idriss and Boulanger textbook
log1 = Borehole(spt_idriss_boulanger_bore_data_appendix_a)
# 3. run simplified liquefaction triggering method on log1 to calculate factors of safety
log1.simplified_liquefaction_triggering_fos(Pa=0.280, M=6.9, Zw=1.8, sampler_correction_factor=1, liner_correction_factor=1, hammer_energy=75, rod_extension=1.5, output='fs')
# 4. (optional) visualize the output
log1.visualize()
# 5. (optional) save the output to an EXCEL file
log1.save_to_file('triggering_analysis_on_log_from_Idriss_and_Boulanger')
# 6. (optional) run lateral spread and settlement analysis based on Zhang & Robinson's model
# NOTE: you need to run simplified_liquefaction_triggering_fos() Method on the log before running this Method
log1.calc_ls_zhang2004(save_to_file=True, file_name='zhang2004_lateral_spread_analysis') | PypiClean |
/FP-SMC-ALS-test1-0.0.1.tar.gz/FP-SMC-ALS-test1-0.0.1/smc/policy/layer3.py | from smc.base.model import ElementCreator, LoadElement
from smc.api.exceptions import (
CreatePolicyFailed,
ElementNotFound,
LoadPolicyFailed,
CreateElementFailed,
)
from smc.policy.policy import Policy
from smc.policy.rule import IPv4Rule, IPv6Rule
from smc.policy.rule_nat import IPv4NATRule, IPv6NATRule
from smc.base.collection import rule_collection
class FirewallRule(object):
"""
Encapsulates all references to firewall rule related entry
points. This is referenced by multiple classes such as
FirewallPolicy and FirewallPolicyTemplate.
"""
@property
def fw_ipv4_access_rules(self):
"""
IPv4 rule entry point
:rtype: rule_collection(IPv4Rule)
"""
return rule_collection(self.get_relation("fw_ipv4_access_rules"), IPv4Rule)
@property
def fw_ipv4_nat_rules(self):
"""
IPv4NAT Rule entry point
:rtype: rule_collection(IPv4NATRule)
"""
return rule_collection(self.get_relation("fw_ipv4_nat_rules"), IPv4NATRule)
@property
def fw_ipv6_access_rules(self):
"""
IPv6 Rule entry point
:rtype: rule_collection(IPv6Rule)
"""
return rule_collection(self.get_relation("fw_ipv6_access_rules"), IPv6Rule)
@property
def fw_ipv6_nat_rules(self):
"""
IPv6NAT Rule entry point
:rtype: rule_collection(IPv6NATRule)
"""
return rule_collection(self.get_relation("fw_ipv6_nat_rules"), IPv6NATRule)
class FirewallPolicy(FirewallRule, Policy):
"""
FirewallPolicy represents a set of rules installed on layer 3
devices. Layer 3 engine's support either ipv4 or ipv6 rules.
They also have NAT rules and reference to an Inspection and
File Filtering Policy.
:ivar template: which policy template is used
Instance Resources:
:ivar fw_ipv4_access_rules: :py:class:`~FirewallRule.fw_ipv4_access_rules`
:ivar fw_ipv4_nat_rules: :py:class:`~FirewallRule.ipv4_nat_rules`
:ivar fw_ipv6_access_rules: :py:class:`~FirewallRule.ipv6_access_rules`
:ivar fw_ipv6_nat_rules: :py:class:`~FirewallRule.ipv6_nat_rules`
"""
typeof = "fw_policy"
@classmethod
def create(cls, name, template="Firewall Inspection Template"):
"""
Create Firewall Policy. Template policy is required for the
policy. The template parameter should be the name of the
firewall template.
This policy will then inherit the Inspection and File Filtering
policy from the specified template.
:param str name: name of policy
:param str template: name of the NGFW engine template to base policy on
:raises LoadPolicyFailed: Cannot load the policy after creation
:raises CreatePolicyFailed: policy creation failed with message
:return: FirewallPolicy
To use after successful creation, reference the policy to obtain
context::
FirewallPolicy('newpolicy')
"""
try:
if cls.typeof == "fw_template_policy" and template is None:
fw_template = None
else:
fw_template = FirewallTemplatePolicy(template).href
except ElementNotFound:
raise LoadPolicyFailed("Cannot find specified firewall template: {}".format(template))
json = {"name": name, "template": fw_template}
try:
return ElementCreator(cls, json)
except CreateElementFailed as err:
raise CreatePolicyFailed(err)
def update(self, cautious_update=True, **kwargs):
"""
Update Firewall Policy. By default this will load the etag from the API.
This is to handle cases where a subelement has changed the etag of the
policy. If the policy is updated prior to these additions then
cautious_update can be turned off.
:cautious_update: True to load etag from API before updating.
"""
if cautious_update and "etag" not in kwargs:
etag = LoadElement(href=self.href, only_etag=True)
result = super(FirewallPolicy, self).update(etag=etag, **kwargs)
else:
result = super(FirewallPolicy, self).update(**kwargs)
return result
class FirewallSubPolicy(Policy):
"""
A Firewall Sub Policy is a rule section within a firewall policy
that provides a container to create rules that are referenced from
a 'jump' rule. Typically rules in a sub policy are similar in some
fashion such as applying to a specific service. Sub Policies can also
be delegated from an administrative perspective.
Firewall Sub Policies only provide access to creating IPv4 rules. NAT
is done on the parent firewall policy::
p = FirewallSubPolicy('MySubPolicy')
p.fw_ipv4_access_rules.create(
name='newule',
sources='any',
destinations='any',
services=[TCPService('SSH')],
action='discard')
"""
typeof = "sub_ipv4_fw_policy"
@classmethod
def create(cls, name):
"""
Create a sub policy. Only name is required. Other settings are
inherited from the parent firewall policy (template, inspection
policy, etc).
:param str name: name of sub policy
:raises CreateElementFailed: failed to create policy
:rtype: FirewallSubPolicy
"""
return ElementCreator(cls, json={"name": name})
@property
def fw_ipv4_access_rules(self):
"""
IPv4 rule entry point
:rtype: rule_collection(IPv4Rule)
"""
return rule_collection(self.get_relation("fw_ipv4_access_rules"), IPv4Rule)
class FirewallIPv6SubPolicy(FirewallSubPolicy):
typeof = "sub_ipv6_fw_policy"
@property
def fw_ipv6_access_rules(self):
"""
IPv6 rule entry point
:rtype: rule_collection(IPv4Rule)
"""
return rule_collection(self.get_relation("fw_ipv6_access_rules"), IPv6Rule)
class FirewallTemplatePolicy(FirewallPolicy):
"""
All Firewall Policies will reference a firewall policy template.
Most templates will be pre-configured best practice configurations
and rarely need to be modified. However, you may want to view the
details of rules configured in a template or possibly insert additional
rules.
For example, view rules in firewall policy template after loading the
firewall policy::
policy = FirewallPolicy('Amazon Cloud')
for rule in policy.template.fw_ipv4_access_rules.all():
print rule
"""
typeof = "fw_template_policy"
def upload(self):
pass # Not supported on the template | PypiClean |
/LoupeTool-0.1.18.tar.gz/LoupeTool-0.1.18/README.md | <p align="center">
<img src="./public/loupe.jpg" alt="Loupe" align="center" width="180" />
</p>
<h1 align="center">
LoupeTool
</h1>
<h1 align="center">
iGEM SYSU-Software 2022
</h1>
## 🚩Introduction
[](https://badge.fury.io/py/LoupeTool) [](https://badge.fury.io/py/LoupeTool) [](https://github.com/sysu-software-2022/LoupeTool)
**An integrate python package version of LoupeRunner**
We strongly recommend you execute **LoupeTool** on **high performance computing platform(HPC).**
## 🌟Download Python Package
```python
pip install LoupeTool
```
## 🔌Dependences Installation (CRITICAL)
✩ **Dependent Tools**: The following **4** tools and **5** python packages are significantly critical for your successful execution of LoupeTool.
✩ **Acceptable Operating System:** We strongly recommend you execute **LoupeTool** in **Linux** or **macOS**.
#### 1.blast+
- ##### Install with source code package & Configuration
You can click [Latest blast+](https://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/) and choose corresponding package (**suffix: tar.gz**) which is applicable to your OS (Linux/macOS)
Or you can just use `wget` to install your package:
> Linux
```shell
wget https://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/ncbi-blast-2.13.0+-x64-linux.tar.gz
tar -zxvf ncbi-blast-2.13.0+-x64-linux.tar.gz
```
```shell
mv ncbi-blast-2.13.0+ blast
echo "export PATH=$(pwd)/blast/bin:\$PATH" >> ~/.bashrc
source ~/.bashrc
```
> macOS
```shell
wget https://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/ncbi-blast-2.13.0+-x64-macosx.tar.gz
tar -zxvf ncbi-blast-2.13.0+-x64-macosx.tar.gz
```
```shell
mv ncbi-blast-2.13.0+ blast
echo "export PATH=$(pwd)/blast/bin:\$PATH" >> ~/.zshrc
source ~/.zshrc
```
**! `$(pwd)` is the path where you installed blast+ in.**
#### 2.muscle (v5.1)
See [muscle Version 5.1](https://github.com/rcedgar/muscle/releases/tag/v5.1) for installation.
Then type the following commands:
> Linux
```shell
chmod +x muscle5.1.linux_intel64
mv muscle5.1.linux_intel64 muscle
ln -s muscle /etc/bin
```
> macOS
```shell
chmod + muscle5.1.macos_arm64 # or muscle5.1.macos_intel64
mv muscle5.1.macos_arm64 muscle
ln -s muscle /usr/local/bin
```
For more details see [Muscle5](https://drive5.com/muscle5/)
#### 3.MMseqs2
Please refer to official installation user guide [MMseqs2 User Guide](https://github.com/soedinglab/mmseqs2/wiki#installation)
#### 4.parallel
> Linux
```shell
sudo apt install parallel
```
> macOS
```shell
brew install parallel
```
You should install [Homebrew](https://brew.sh) first if you don't have one in your Mac.
#### 5.Python Packages:
##### bio, pandas, numpy, sklearn, imblearn
You can install these python packages by running `pip install -r requirements.txt`
## 👾Quick Example
**! Make sure you have already downloaded all dependencies**
```python
from LoupeTool import Loupe
import os
if __name__ == "__main__":
Loupe.LoupeRunner(DefenseSystem_Name="Cas",
DefenseSystem_FilePath="./",
PTYFile=os.path.join("./", "Cas_INPUT/Database/CDS.pty"),
PathToDatabase=os.path.join("./", "Cas_INPUT/Database/ProteinDB"),
SeedPath=os.path.join("./", "Cas_INPUT/Archaea_Cas.csv"),
NeighborhoodVicinitySize=10000,
PermissiveClusteringThreshold=0.3,
SortingOverlapThreshold=0.4,
SortingCoverageThresold=0.25,
ThreadNum=os.cpu_count())
```
##### I. Parameters guide:
1. DefenseSystem_Name: ABI, RM, TA, DND, Cas;
2. DefenseSystem_FilePath: Your working directory;
3. PTYFile: your **.pty** file path;
4. SeedPath: your seed **.csv** file path;
5. NeighborhoodVicinitySize: change the bidirectional search domain of seed, if this increase, the search domain will be expand correspondingly. Our Suggestion: CRISPR-Cas: 10000,TA: 2000
6. PermissiveClusteringThreshold: this adjust mmseqs cluster parameter(i.e. --min-seq-id) in **step 9**, this will affect sequence similarity. For more details, see: [MMseqs2 User Guide](https://github.com/soedinglab/mmseqs2/wiki)
7. SortingOverlapThreshold and SortingCoverageThresold: these parameters are used to filter **Low matching hit** produced by **PSIBLAST** in **step12**, increase them will result in the spurt of specificity.
- (1) SortingOverlapThreshold:
Overlap threshold; hits are subject to sorting between two profiles if they overlap by more than
the threshold value
- (2) SortingCoverageThresold:
Coverage threshold; hits are stored if they cover original profile by more than the
threshold value
8. ThreadNum: thread number should be contingent on your **CPU core number**.
Hint: the most convenient way of managing these relevant paths is create a new directory for processing your data or use existing one and include all your files in this directory.
##### II. For users:
For processing large **seeds** by executing **LoupeTool,** you may have to wait for longer time, which is contingent on your CPU core number (some bottleneck steps in **LoupeTool** are optimized by **parallelization** and the performance is positively correlated with the CPU core number)
> e.g. 48 CPU cores usage in high performance computing platform when processing bulk data during paralleled period.

You can download **htop** to monitor **LoupeRunner** processing real-time situation just like the above.
## 🧩Documentation
### Experimental Design
> Input and Output files path can be specified in `config.py`
The entire procedure of **LoupeRunner** can be separated into 14 steps pipeline:
- In order to demonstrate every steps precisely, input and output file names are referenced from our example data.
### **Step1: Download genomic data from Refseq/GeneBank**
You need access to genomic data. Genomic data include genomic protein sequence (protein.faa) and protein annotation (genomic.gbff). If you are using the FTP Refseq (https://ftp.ncbi.nlm.nih.gov/refseq/release/archaea/), we recommend using the following code to download large data.
First use the following python scrpit to get url:
```python
# get url from Refseq relese,there we show how to download all genomic data of prokaryotes
# pip install requests
import os
import re
import requests
def load_url_context(type ,url):
# get url
request = requests.get(url)
raw_list = re.compile(r'<a.*?>(.*?)</a>').finditer(request.text.strip())
file_name = "_".join([type,"context.txt"])
with open(file_name, "w") as f:
for i in raw_list:
x = i.group(1)
if x.endswith("genomic.gbff.gz") or x.endswith("protein.faa.gz"):
file_https = ''.join([url,x])
#start with rsync
file_ftp = file_https.replace("https","rsync")
#print(file_ftp)
f.write(file_ftp)
f.write('\n')
f.close()
if __name__ == "__main__":
archaea_url = "https://ftp.ncbi.nlm.nih.gov/refseq/release/archaea/"
bacteria_url = "https://ftp.ncbi.nlm.nih.gov/refseq/release/bacteria/"
load_url_context("archaea", archaea_url)
load_url_context("bacteria", bacteria_url)
```
Then use rsync to download the data:
```bash
#!/bin/bash
# you can modify TXT file to change which data you want to download
while read line
do
rsync --copy-links --recursive --times --verbose $line archaea/
done < archaea_context.txt
```
It may cost much more time than use wget.
### **Step2: Preparation of BLAST database**
Unzip the above files and merge all .faa files into a single file (ProteinSequences.faa):
```bash
cat *protein.faa > YourWorkPath/ProteinSequences.faa
```
Create a BLAST database using the makeblastdb command:
```bash
makeblastdb -in ProteinSequences.faa -out ProteinsDB -dbtype prot -parse_seqids
```
### **Step3: Preparation of CDS**
The annotation data is very large, especially the bacterial genome data, but there is a lot of information we do not need, so we need to further operation to simplify the content.
We use CDS_extract.py to import .gbff file. The output data format(Text file, delimited by Tab) is like:
| LocusTag | ORFStart:ORFStop | Strand | OrganismID | ContigID | Accession |
| ------------- | ---------------- | ------ | -------------------------------------- | ----------- | -------------- |
| METBO_RS00005 | 51:1401 | + | Methanobacterium lacus-GCF_000191585.1 | NC_015216.1 | WP_013643605.1 |
### **Step4: Preparation of Seeds**
Prepare your seed data as the format like:
| Assembly | LociID | Accession | ContigID | Start | End |
| :-------------- | ------------- | -------------- | ----------------- | ------ | ------ |
| GCF_001729285.1 | A9507_RS00880 | WP_069582310.1 | NZ_LZPM01000003.1 | 122396 | 122990 |
if your seed data if from Genebank (GCA_XXXXXXXXX.1) without ContigID, we recommand you run Step 5 to supplementary data from CDS.
### **Step5: Extracting seeds**
- Input: `Archaea_Cas.csv`, `CDS.pty`
Fetching seeds of interest (e.g. Archaea_Cas.csv) in from (e.g. CDS.pty) provided in database, essential attribute includes:
assembly_accession, locus_tag, product_accession, contigID, start, end.
Our example show in the table below:
> Archaea_Cas.csv (partial)
| Assembly_accession | Locus_tag | Product_accession | ContigID | Start | End |
| ------------------ | -------------- | ----------------- | ---------- | ------- | ------- |
| GCA_000230715.3 | Natgr_1399 | AFZ72610.1 | (Optional) | 1390703 | 1391386 |
| GCA_000970265.1 | MSLAZ_2290 | AKB75551.1 | (Optional) | 2975643 | 2978066 |
| GCA_900079115.1 | SSOP1_1525 | SAI85079.1 | (Optional) | 1340732 | 1341376 |
| GCA_000189935.2 | AABMKBHA_00165 | AABMKBHA_00165 | (Optional) | 33470 | 34630 |
| GCA_000979385.1 | EO92_18095 | KKG11218.1 | (Optional) | 53457 | 54251 |
- Output: `Seeds_Cas.tsv`
> Seeds_Cas.tsv (partial)
| Assembly | LociID | Accession | ContigID | Start | End |
| :-------------- | ------------- | -------------- | ----------------- | ------- | ------- |
| GCF_001729285.1 | A9507_RS00880 | WP_069582310.1 | NZ_LZPM01000003.1 | 122396 | 122990 |
| GCF_000214725.1 | MSWAN_RS07020 | WP_013825929.1 | NC_015574.1 | 1538607 | 1539333 |
| GCF_900095295.1 | MCBB_RS06490 | MCBB_RS06490 | NZ_LT607756.1 | 1386026 | 1387868 |
| GCF_900095295.1 | MCBB_RS06465 | WP_071908025.1 | NZ_LT607756.1 | 1380806 | 1381325 |
| GCF_000302455.1 | A994_RS11405 | WP_004031769.1 | NZ_AMPO01000012.1 | 2073 | 2592 |
### **Step6: Selecting neighborhoods**
Select neighborhood around seeds
- Input: `Seeds_Cas.tsv`
- Parameter:
- NeighborhoodVicinitySize(default: 10000): change the bidirectional search domain of seed(i.e. offset), if this increase, the search domain will be expand correspondingly.
- Output: `Vicinity_Cas` (list of proteins in vicinity of seeds)
```
===
WP_013644337.1 708731..710093 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644338.1 710089..710788 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644339.1 711103..711739 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644340.1 711758..712142 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644341.1 712125..712428 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644342.1 712458..714444 - Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644343.1 714508..714814 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644344.1 714975..716799 - Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644345.1 717225..717567 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644346.1 717609..718158 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644347.1 718126..718936 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644348.1 718970..719564 - Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644349.1 719950..720817 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644350.1 720894..721371 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644351.1 721367..722756 - Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644352.1 723448..724663 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644353.1 724962..725496 - Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644354.1 725806..726502 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644355.1 726503..726824 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644356.1 726849..727638 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644357.1 728039..728633 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644358.1 728644..729043 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
WP_013644359.1 729210..732126 + Methanobacterium lacus-GCF_000191585.1 NC_015216.1
===
WP_013825920.1 1526093..1526849 + Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825921.1 1527015..1527132 + Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825922.1 1527204..1527612 + Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825923.1 1527926..1528274 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825924.1 1528859..1529774 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825925.1 1529770..1530721 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825926.1 1536731..1536995 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_048188005.1 1537000..1537945 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_048188364.1 1538060..1538582 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825929.1 1538607..1539333 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825930.1 1539392..1541603 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825931.1 1541612..1542230 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825932.1 1542226..1543225 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825933.1 1543225..1544599 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825934.1 1545276..1546743 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_052296851.1 1546885..1547275 + Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825936.1 1547556..1548792 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825937.1 1549070..1550777 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825938.1 1551299..1552286 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825939.1 1552375..1553728 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
WP_013825940.1 1553724..1554780 - Methanobacterium paludis-GCF_000214725.1 NC_015574.1
===
```
### **Step7: Collecting protein IDs**
- Input: `Vicinity_Cas` (list of proteins in vicinity of seeds)
Extract and sort proteins in vicinity of seeds in ascending order forming VicinityIDs in `.lst` file.
- Output: `VicinityIDs_Cas.lst ` (partial)
```
WP_004030635.1
WP_004030636.1
WP_004030637.1
WP_004030638.1
WP_004030640.1
WP_004030642.1
WP_004030643.1
WP_004030644.1
WP_004030645.1
WP_004030646.1
.....
```
### **Step8: Fetching protein sequences**
Tool `blastdbcmd`required
- Input: `VicinityIDs_Cas.lst`, Database
using the file generated in Step 7 ( `VicinityIDs_Cas.lst`) to get protein sequences from database.
- Output: `Vicinity_Cas.faa` (partial)
```
>ref|WP_004030635.1| glycosyltransferase family 4 protein [Methanobacterium formicicum]
MDKIAISVVVDIFDDEGTTVRPKRVAELLKNNFDTCFINRSSSDLKEINGIPVHIVKPAGTKLWNIKLFGLLSGNDFDFV
YCSSDWFGFLTYFMLKRFYDYKIIFEAHTIISEEFKERKAHPFKVFFFQVLEKFAIKHSDYVVALSENIYDYYSYNKNIE
LVHVFIDEELFISDVKRKINDDKKVIGLIGPFDEFSNQYFLEFLRKNIDQFDDRISFRIIGKCQDKIQHPRIEYTGYMNS
IHDYVNVLSSLDGLLVPSRVATLGPLNKIIEAMACSVPVFTTPKGIVGLYNIKPGQEIYVLEEDDLVCGLNNHVFSDELI
NIAKNARLYVEKYYSKKANEKKLLRIFNRLNEG
>ref|WP_004030636.1| glycosyltransferase family 4 protein [Methanobacterium formicicum]
MIIGYFSSTFPYSVSNPKYFCGGSSLATHSLVNEISNSDIDIKVFTTSADSEDHLDMDGRMGIYRYATKIKLLTSPISLG
LFHKPLEHDVDLVHVSFDMPPGPFAAYRYARKKSLPLILTYHGDWDPDYGSFVRKVGVSINNKFVSDLLSYADIIISPSK
LYAKKSKYLSKYLDKIRVIPNGIDLDEFQLNYSQSECREKLNLPLECKIILFFGYLTPYKGPDILLGAFREVLKNQPDTV
LLFAGNGNMEDELKKLARQWNIQDNVIFAGFVDKKMRSLYYKSADIFCLPSTMSTECYPLAILEAMASGVPVVASDIGGI
PDIIENNVNGLLVTPTNPEKLEDNLNLLLQNPEIRAKFSENALKGIKKYSWKNIATETLKLYESLLENR
```
### **Step9: Clustering protein sequences**
- Input: `Vicinity_Cas.faa`
- Parameter:
- PermissiveClusteringThreshold(default: 0.3)
Run the following command to cluster protein sequences contained in the file `Vicinity_Cas.faa` using a
sequence similarity cutoff value of 0.3 and save results in the `VicinityPermissiveClustsLinear_Cas.tsv` file:
- Output: `VicinityPermissiveClustsLinear_Cas.tsv` (partial)
```
WP_048191534.1 WP_048191534.1 WP_004030642.1 WP_023992731.1
WP_071907103.1 WP_071907103.1 WP_013644342.1 WP_013826017.1
WP_004031781.1 WP_004031781.1 WP_100906549.1
WP_013825921.1 WP_013825921.1 WP_095651998.1 WP_100906253.1 WP_095651996.1 WP_023992734.1 WP_013826664.1 WP_095651994.1 WP_095651997.1 WP_179288731.1 WP_023992735.1 WP_100906252.1 WP_023992122.1 WP_232727999.1
WP_013826016.1 WP_013826016.1 WP_013644343.1 WP_071907102.1
```
### **Step10: Making profiles** (Parallelized)
▷ `blastdbcmd`, `muscle` tools required
- Input: `VicinityPermissiveClustsLinear_Cas.tsv`, Database
Make profiles for the clusters in `VicinityPermissiveClustsLinear_Cas.tsv`,
This step will create a protein profile for each permissive cluster for proteins from the database using the Muscle program and will save the profiles to the CLUSTERS\_\${DefenseSystem_Name} folder(\${DefenseSystem_Name} is a variable which is configured in `config.py` ) with an ‘.ali’ extension and CLUSTER_ prefix with line number after the prefix as cluster ID (this step will create the CLUSTERS\_\${DefenseSystem_Name} folder if it doesn't exist in the current directory).
▷ For different configuration of muscle in this step, see [muscle documents](https://drive5.com/muscle5/manual/).
- Output: ``CLUSTERS_Cas/CLUSTER_*.ali``
> CLUSTER_5.ali
```
>ref|WP_071907103.1| NFACT family protein [Methanobacterium congolense]
MKTMSNVDVYAICTELKDTLKDARVDKAYQPTKDTVLIRFHIPGKGRTDVVFQAGTRVHTTQYPPENPKIPPSFPMLLRK
HLKGGTITDVRQHHFDRIMELDIQKEHRYTLVVELFSKGNIILVDEEGTIILPLKRKLWQDRKISSKEIYKYPPENEFNP
LKAEKEDIKKLFMDSDRDVVRTLAGSGLGGLYAEEIVLRSDVDKKKSATDLEEAELEAIYNAFQELFQPLKDHAFHPRII
SGEKEDVLPLELRKYEGFESKTFETYNQAADEFYSSRVGEDIKKVHEDIWAREIGKYEKRMKIQLETLENFKKTIVESTI
KGDALYAHYHEVQDMINTIMEARKNYSWAEVSSTIKKAKKHGAAGLESIEAVDKMGVMDLNLEGVRVQVDSNIGIPENAE
KYYNKGKKAKRKINGVNIAIEKTQAEIDKAKNKREIAMEKVLVPQKRVRKELKWFEKLRWFVSSDGNLVIGGRDATTNEM
VVKKHLENRDVYFHSDIHGAASVVVKGGEGEISEETLIEAASFSASFSSAWQKGFSTHDVYWVHPDQVSKTPQSGEFVAK
GAFIIRGSRNYMRGVPLLVAVGIVDYEGERVMAGPPEAVSAYTDNYAVIKPGYTKKEEMARQIRNKIDNEGVLSIEDVVR
VLPSGKCDFVDKRSLKW----KR
>ref|WP_013644342.1| NFACT family protein [Methanobacterium lacus]
MKAMSNVDVYAICKELGEVLKDARVQKAYQPTKDTVLIRFHVPGKGRVDVVFQAGFRVHTTQYPPQNPKIPPNFPMLLRK
YIKGGTVTAVKQHNFDRIMRIDIQKEEKFSLVVELFAKGNIILLDHEDKIILPLKRKVWQDRKISSKEEYKYPPERGMNP
LEVDKEELKTILTNSDRDIIRTLARNGLGGLYAEEIALRSDVAKNKTADEITDEDVEAIQSAINSIFDPLKTFNFNPQIV
KGKKEDVLPLDLLMYKDFEKESFESFNDAADEFYSSIVGEDIVNVNEEVWSGEVGKFEKRLNIQLETLEKFEKTVKDSKI
KGEAIYSDYQAIENILNIIHSARETNSWLEIIATVKKAKKDKVPGLEIIESIDKMGVLTLNLDGVRVNIDSSMGIPENAE
IYYNKGKKAKRKIKGVHIAIEKTRKEIDKAKNKREIEMEKVLVPQKRVKKDLKWYEKLRWFVTSDGLLAIGGRDATTNEM
VVKKHMENRDIYFHSDIHGASSVILKAGEGEIPERSINETAAFAACFSSAWSKGLGSTDVYWVHPEQVSKTPQSGEFVAK
GAFIIRGSRNYMRGLPLTLSLGIVDYEGSRIMAGPPEAVSNLTEKYVTVKPGYIKKEEIARQIRNNIDDEKLLSIEDVVR
VLPSGKCDFLDSKGFKR--NKKR
>ref|WP_013826017.1| NFACT family protein [Methanobacterium paludis]
MKAMSNVDIYTICNELKEILKDARVDKAYQPTRDTVLIRFHVPGKGRVDVVFQAGLRVHTTQYPPENPQIPPSFPMILRK
HLKGGNVTCVKQHNFDRILKINIQKEHKYSLVIELFAKGNIILLDEEGTIIMPLKRKLWEDRNISSKEEYKYPPERGINP
LEVTKEELETLFAESDRDLIRTLASSGLGGLYAEEVMLRSGVKKDKPSSDITPEELDFIHNAMSDVFSPLKTAQFHPQII
SSEKDDVLPLNLTKYEKYEKKTFETFNQAADEFYSSIVGDDIKQVHEDVWAAEVGKFEKRLKIQMETLEKFKDTIVKTKI
KGEAIYSNYQNIQNILDIIHNARETYSWLDIIDIIKKGKKEKVSGLDIIESLDKMGVLTLNLDGTIVNVDSNMSIPENAE
IYYNKGKKAKRKISGVNIAIEKTMKEVERAKNKREIAMEKVLVPQKRVRKELKWFEKLRWFLSSDGLLVIGGRDATTNEM
IVKKHMENRDIYFHSDIHGAASVVVKAGEGEVPESTLNETASFAGSFSSAWSAGFGSTDVYWVHPDQVSKTPQSGEFVGK
GAFIIRGSRNFIRNAPLLVAVGIVDYEGKRIMAGPPEALVKYTDNYVVIKPGYTKKEEMARQIRHKIDEEKLLSIEDVVR
VLPSGKCDFVDKRQFKGRDFKRK
```
### **Step11: Running PSI-BLAST for profiles** (Parallelized)
▷ `psiblast` tool required
- Input: ``CLUSTERS_Cas/CLUSTER_*.ali``, Database
- Parameter:
- ThreadNum: Number of threads (CPUs) to use in blast search.
The script in of step executes a PSIBLAST search of the genomic database with the
profiles created at Step 10 used as queries and save results for each cluster with a ‘.hits’ extension in
the CLUSTERS\_\${DefenseSystem_Name} folder.
For more information, see [BLAST® Command Line Applications User Manual](https://scicomp.ethz.ch/public/manual/BLAST/BLAST.pdf)
- Output: ``CLUSTERS_Cas/CLUSTER_*.hits``
> CLUSTER_1.hits
```
# PSIBLAST 2.13.0+
# Iteration: 1
# Query: ref|WP_013825920.1| hypothetical protein [Methanobacterium paludis]
# Database: ./Cas_INPUT/Database/ProteinDB
# Fields: query id, subject id, subject length, s. start, s. end, evalue, query seq, subject seq, q. start, q. end, score
# 29 hits found
ref|WP_013825920.1| ref|WP_013825920.1| 251 1 251 0.0 MGLQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMSPGE MGLQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMSPGE 1 251 1286
ref|WP_013825920.1| ref|WP_004031972.1| 114 6 112 4.61e-27 VLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYF--RDVKAYPYHDSIDPSMKGTVLLPMSPG LLIGLPIFGVCLLLLLGLHDIPAE----IYVGNSGFDPNVTNIYPSKVTWTNNDSQIHRIISDDGLFDSGNLSPGENYTYDFSYHKNKIYKYHDSTNTSLKGTIQIEMGPG 142 250 251
ref|WP_013825920.1| ref|WP_008517151.1| 114 27 112 1.08e-24 PDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYF--RDVKAYPYHDSIDPSMKGTVLLPMSPG PTEIYVGNSGFDLNVTNIYPSKVTWTNNDSQIHRIVSDDGLFDSGNLSPGENYTYDFSYHKNRIYKYHDSTNTSLKGTIQIEMGPG 167 250 235
ref|WP_013825920.1| ref|WP_052374236.1| 116 2 112 1.86e-22 KKNILKNVLISILFVGMMSFIFVGLIFMPFGNP--DIIIIQSSGFSPNSTLISP--STVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMSP KRNLSVWIVLSILFV-------VGISGCTFKQPTNDTVVIQNEGFSP-SALIVPVNTTVTWINKDPVTQNLVSDTGLFESGNLSNGQSFNYTFNQTGSYHYYSNLYPNMKGSIIVTTSP 135 249 220
ref|WP_013825920.1| ref|WP_223790141.1| 128 4 106 7.56e-21 SILFVGMMSFIFVGLIFMP---FGNPDIIIIQSSGFSPNSTLISP-STVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL NLIFVGV--FLIFGIVAVSGCTSSQTSIVTIQNSSFNPSTLNVQVGTTVTWINKDTTTHDVVSDTGLFNSGNLTNGMSYNYTFNQTGSFAYHSAIQPSMTGTIVV 145 245 210
ref|WP_013825920.1| ref|WP_081882600.1| 130 4 108 8.80e-19 SILFVGMMSFIFVGLIFMPF-----GNPDIIIIQSSGFSPNSTLISP-STVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL NLIFVGV--FLVLGIVAVSGCTSNQTSGNTVTIQNMAFNPSTLNVKVGTTVTWINKDSVTHDVVSDTGLFNSGNLTNGMSYNYTFNQTGSFPYHCAIHPSMTGTIVV 145 245 196
ref|WP_013825920.1| ref|WP_081882599.1| 128 31 110 3.19e-18 IIIQSSGFSPNSTLISP-STVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMS VTIQNMAFNPSTLNVQVGTTVMWINKDSTTHHVVSDTGVFDSGDLATGQSYNYTFNQTGSFPYHCSIHPSMTGTIVVSTS 170 248 192
ref|WP_013825920.1| ref|WP_052374129.1| 161 4 109 7.24e-17 ISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPST-VTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMS INFIFLGILLTIGIVAVSGCTSQSSTVTIQNMAFNPSTVHITGSTTIIWINKDNIEHEVVSDTGLFDSGVLAPGESFNYTFNQAGDYAYHCAIHPSMVGIIVVSSS 144 248 185
ref|WP_013825920.1| ref|WP_223792080.1| 98 10 90 4.31e-16 NPDII--IIQSSGFSPNSTLISP--STVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTV NPQTIQLLYKIEAFSP-STLIVPVNTTVTWINKDPVTQNLVSDTGLFESGNLSNGQSFNYTFNQTGSYHYHSNIHPNIKGSI 166 243 175
ref|WP_013825920.1| ref|WP_052375909.1| 145 68 144 5.14e-15 IIIQSSGFSPNS-TLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL ISIQNMAFNPNKITVKSGTNVQWINNDNTQHQIVSDSGAFQSNTLNPGDSYNFFFDKTGIYGYHDALNSTITGTIVV 170 245 171
ref|WP_013825920.1| ref|WP_069583285.1| 145 68 144 1.37e-13 IIIQSSGFSPNS-TLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL ISIGNMAFNPNKITVKSGTNVQWINNDNTQHQIVSDTGAFQSTILNPGDSYNFFFAKTGIYGYHDALNSTITGTIIV 170 245 161
ref|WP_013825920.1| ref|WP_071906376.1| 370 1 145 2.85e-13 LQLHPIISIPFGVITTIVFLQVFGIPTLPLG-------GNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGL MKFHPAISIILGIVTILMWFILAGILGLDFSKSISNTSGGATLIILI-----LGGFVATYFTE--DKKIRYSIYEGLIF---TAFVGLSKNLKL--IFAAFIAYVLFIGIGGFIGKMTDNKERQNFK-------NHFEKGFNPIITIVMGFIVANFFYYLLLGI 3 159 167
ref|WP_013825920.1| ref|WP_071906376.1| 370 123 313 1.07e-11 HPIISIPFG-VITTIVFLQVFGIPTLPLGGN--AGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKE---YIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLF NPIITIVMGFIVANFFYYLLLGITNIYTSYNIKTAALTIAVISNVIGGFTATFFA--KEKKIQYGIYTGLIILISSLAMKLIHGTLHVNYSSISI--VEYLLFAGIGGFIGKITDNTGRQSLK---KRFNNGYNPIITIVMGYFIATFFNNSILLITCTYNSNPFGVTQFIV------AAISFVIGGFTATFFAKEKKI-----QYGIY 6 208 155
ref|WP_013825920.1| ref|WP_071906376.1| 370 249 359 4.74e-07 HPIISIPFGVITTIVFLQVFGIPTLPLGGNA-GILILIPVAIIF--GGFTATYFTDTNDKKIIYSICVGIIISFITLIL----GLKEYIGYNDVVVMFISFCVMAGIGGFLGK NPIITIVMGYFIATFFNNSILLITCTYNSNPFGVTQFIVAAISFVIGGFTATFFA--KEKKIQYGIYTGMIILIVNLVLQLIYGPTIHEPYYIKIGKIAGYLIASGIGGYLGK 6 111 119
ref|WP_013825920.1| ref|WP_048082919.1| 253 1 147 8.06e-13 LQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAI-IFGGFTATYFTDTNDKKIIYSICVGIIIS--FITLILGLKEYIGYNDVVVMFISFCVM--AGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGM MKVHPVISIILGIIAGIILLI---ISIKLFSGNALVSAATNFAISIIGGFIATYFA--KEKKIRYGIYEGIILSIMFISLVSLIHTTYIYFLIALVGIIFEMLLPATIGGFIGKMTEGNNRKSFKMKY--LNRNLHPIITIIAGILVTIVLMSL 3 151 161
ref|WP_013825920.1| ref|WP_048082919.1| 253 128 243 6.37e-11 LHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGII-------ISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADE LHPIITIIAGILVTIVLMSLFGSFHLKISMGITYFLMATIFFAAGGFVTAFL--AREKKMLYGIYEGIVAVIYTILARYIGIIMGLNTTVDYYLIIGAVIGYFLAAAIGSYLGKAAGE 5 115 147
ref|WP_013825920.1| ref|WP_069584028.1| 141 1 106 2.36e-12 LQLHPIISIPFGVITTIVFLQ---VFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYND---VVVMFISFCVMAGIGGFLGKI MKLHPLISIILGLFVTLLLVMIPLVFDAP--PLVGNAMFIF----AFILGGFIATYF--SKDKKIRYSIYMGLIAAVLFSIIESPD--GFNKLPAILLGFIQFPGMSLIGGLPGKI 3 112 152
ref|WP_013825920.1| ref|WP_052374005.1| 111 30 109 2.26e-11 IIIIQSSGFSPNSTLISPSTVTWINNDTKI-HRVVSDYGL--FDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL IIIAHETLTWTNSTIKVGNNVTWINHDFAVNHEIVSDSANYPFDSGVLKNGQSYNLTFTQPGTYNYHDKLNPNLKGTIIV 169 245 143
ref|WP_013825920.1| ref|WP_052375935.1| 109 3 108 2.98e-11 MSFIFVGLIFMPFG-----------NPDIIIIQSSGFSPNSTLISPSTVT-WINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL LNFIRIGIILLVIGVISISGCTQEKQTNTIIIQNFTFKPNPMHVKAGDVVRWTSHDNAPHKIVSDTGNFESPDLNNGDTFTYTFDKKGEFNYHDELDSSIKGKVIV 152 245 142
ref|WP_013825920.1| ref|WP_157197598.1| 120 3 117 7.33e-11 QLHPIISIPFGVITTIVFLQV-FGIPTLPLGGNAGIL--ILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFI--TLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNR KFNPVISIISGIIVTITMAYVGFLIIDTP---NFGILDIILLCFSLVIGGFISTYFTE--KRRIVYGVCEGLILSIMCATYVVGTGKGLSYINYIAAYINVALGFVSATYIGSILGRKNR 4 118 140
ref|WP_013825920.1| ref|WP_069583157.1| 356 1 110 1.14e-10 LQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGII--ISFIT--LILGLKEYIGYNDVVVMFIS----FCVMAGIGGFLGKI MRLHPIKSIIIGAVTAITLL---GISTLTFYNSLAFGILNFAAPLIGGFIATYF--TSKRMVRYGACAGIISAAAFVAFEFILG---NIGLEAIPLMFISSSIIFGVIAGLGGITGEI 3 112 147
ref|WP_013825920.1| ref|WP_169740445.1| 93 16 92 2.74e-10 IIIQSSGFSPNSTLISPSTVT-WINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL IIIQNFTFKPNPMHVKAGDVVRWTSHDNAPHKIVSDTGNFESPDLNNGDTFTYTFDKKGEFNYHDELDSSIKGKVIV 170 245 134
ref|WP_013825920.1| ref|WP_048082918.1| 356 1 115 3.03e-09 LQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGII--ISFIT--LILGLKEYIGYNDVVVMFISFCVMAG-IGGFLGKIADEVNRKI MRLHPIKSIILGAVIAITLL---GISALVFYNSLAFGILNFVAPLIGGFIATYF--TSKRMARYGACAGIISAAAFVAFEFILG---NIGEDAIIIMFISFSFIFGVIAGLIGIIGGIISNRV 3 120 136
ref|WP_013825920.1| ref|WP_069585799.1| 375 1 172 7.70e-09 LHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLF MKPFLSIILALITTIL-LFICEI-SLSVALNIYLGSLTVFLFILGGGIATWFAA--GKKIRYSIYYGLILAVITLVLG--------DYRVLIFA-PIFAGIGGFLGKMADKDSRQTFNGYHPVIAIIVGIIVMYIYNVFLGSV-TGAYDLSSSGLIGFVIG---AITLAVGGF----------TTTFLSKEKKI-----QYGIY 5 208 133
ref|WP_013825920.1| ref|WP_145975997.1| 117 5 107 2.23e-08 HPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKI-IYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGK HPVIAIILGNIIT-GFLGGFVI-ILPISLLSHILVIF--IFVLGGFSATYLSRTNKATIGFYNSLLYSISSLIGAIFIFKTGLTPNKVLILFIYFPILGLIGGFIAK 6 111 122
ref|WP_013825920.1| ref|WP_223790876.1| 127 46 125 7.08e-08 IIIIQSSGFSPNSTLISPSTVTWINNDTKI-HRVVSDYG--LFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL IIITHETLTWNNSTIKVGNNITWINRDFAIEHEIVSNTSNYAFDSGVLKNGQSFSLNFTKAGTYNYYDKLHPNLSGIIIV 169 245 119
ref|WP_013825920.1| ref|WP_218105063.1| 123 7 88 1.23e-07 VFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYND---VVVMFISFCVMAGIGGFLGKI VFDAP--PLVGNAMFIF----AFILGGFIATYF--SKDKKIRYSIYMGLIAAVLFSIIESPD--GFNKLPAILLGFIQFPGMSLIGGLPGKI 24 112 117
ref|WP_013825920.1| ref|WP_223790185.1| 243 3 114 4.11e-07 QLHPIISIPFGVITTIVFLQVFG--IPTLPLGGNAG---ILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAG----IGGFL RFHPVIAIITGSFFIFIINQIMNYIFDSIPINGMLGSELITILVPVLLILGGFITAFITNRN--RLLCAFCVGLFFPIINNAINI-AYLNSISAIVLFVLGALFAALITTLGGFI 4 109 118
ref|WP_013825920.1| ref|WP_156095866.1| 66 3 64 6.14e-07 STVTWINNDTKI-HRVVSDYG--LFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLL NNITWINRDFAIEHEIVSNTSNYAFDSGVLKNGQSFSLNFTKAGTYNYYDKLHPNLSGIIIV 187 245 108
# BLAST processed 1 queries
```
### **Step12: Sorting blast hits**
- Input: ``CLUSTERS_Cas/CLUSTER_*.ali``, `CDS.pty`, `VicinityIDs_Cas.lst`, `Seeds_Cas.tsv`, `Vicinity_Cas.tsv`
- Parameters:
- (1) SortingOverlapThreshold:
Overlap threshold; hits are subject to sorting between two profiles if they overlap by more than
the threshold value
- (2) SortingCoverageThresold:
Overlap threshold; hits are subject to sorting between two profiles if they overlap by more than
the threshold value
This step will read the BLAST hits from the CLUSTERS folder and save the sorted results for each cluster in the CLUSTERS\_\${DefenseSystem_Name} /Sorted/ folder with a ‘.hits_sorted’ extension:
- Output: `Cas_OUTPUT/CLUSTERS_Cas/Sorted/CLUSTER_*.hits_sorted`
Format
| ProteinID | BLAST score | Alignment start | Alignment stop | Alignment sequence | CLUSTERID | Contig | Is in vicinity islands | ORF start | ORF stop | Distance to the bait |
| -------------- | ----------- | --------------- | -------------- | ------------------------------------------------------------ | --------- | ----------- | ---------------------- | --------- | -------- | -------------------- |
| WP_013825920.1 | 1286 | 1 | 251 | MGLQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMSPGE | CLUSTER_1 | NC_015574.1 | 1 | 1526093 | 1526849 | 6 |
> CLUSTER_1.hits_sorted
```
WP_013825920.1 1286 1 251 MGLQLHPIISIPFGVITTIVFLQVFGIPTLPLGGNAGILILIPVAIIFGGFTATYFTDTNDKKIIYSICVGIIISFITLILGLKEYIGYNDVVVMFISFCVMAGIGGFLGKIADEVNRKILEIKYKILSNISESKKNILKNVLISILFVGMMSFIFVGLIFMPFGNPDIIIIQSSGFSPNSTLISPSTVTWINNDTKIHRVVSDYGLFDSGNITPGQSYSHYFRDVKAYPYHDSIDPSMKGTVLLPMSPGE CLUSTER_1 NC_015574.1 1 1526093 1526849 6
WP_004031972.1 251 142 250 LLIGLPIFGVCLLLLLGLHDIPAE----IYVGNSGFDPNVTNIYPSKVTWTNNDSQIHRIISDDGLFDSGNLSPGENYTYDFSYHKNKIYKYHDSTNTSLKGTIQIEMGPG CLUSTER_1 NZ_AMPO01000013.1 0 61551 61896 10000
WP_052374236.1 220 135 249 KRNLSVWIVLSILFV-------VGISGCTFKQPTNDTVVIQNEGFSP-SALIVPVNTTVTWINKDPVTQNLVSDTGLFESGNLSNGQSFNYTFNQTGSYHYYSNLYPNMKGSIIVTTSP CLUSTER_1 NZ_JQLY01000001.1 0 1098608 1098959 10000
WP_223790141.1 210 145 245 NLIFVGV--FLIFGIVAVSGCTSSQTSIVTIQNSSFNPSTLNVQVGTTVTWINKDTTTHDVVSDTGLFNSGNLTNGMSYNYTFNQTGSFAYHSAIQPSMTGTIVV CLUSTER_1 NZ_JAIOUQ010000001.1 0 17461 17848 10000
WP_081882600.1 196 145 245 NLIFVGV--FLVLGIVAVSGCTSNQTSGNTVTIQNMAFNPSTLNVKVGTTVTWINKDSVTHDVVSDTGLFNSGNLTNGMSYNYTFNQTGSFPYHCAIHPSMTGTIVV CLUSTER_1 NZ_JQLY01000001.1 0 807416 807809 10000
WP_052374129.1 185 144 248 INFIFLGILLTIGIVAVSGCTSQSSTVTIQNMAFNPSTVHITGSTTIIWINKDNIEHEVVSDTGLFDSGVLAPGESFNYTFNQAGDYAYHCAIHPSMVGIIVVSSS CLUSTER_1 NZ_JQLY01000001.1 0 784336 784822 10000
WP_071906376.1 167 3 159 MKFHPAISIILGIVTILMWFILAGILGLDFSKSISNTSGGATLIILI-----LGGFVATYFTE--DKKIRYSIYEGLIF---TAFVGLSKNLKL--IFAAFIAYVLFIGIGGFIGKMTDNKERQNFK-------NHFEKGFNPIITIVMGFIVANFFYYLLLGI CLUSTER_1 NZ_LT607756.1 0 613477 614590 10000
WP_048082919.1 161 3 151 MKVHPVISIILGIIAGIILLI---ISIKLFSGNALVSAATNFAISIIGGFIATYFA--KEKKIRYGIYEGIILSIMFISLVSLIHTTYIYFLIALVGIIFEMLLPATIGGFIGKMTEGNNRKSFKMKY--LNRNLHPIITIIAGILVTIVLMSL CLUSTER_1 NZ_KN050803.1 0 147173 147935 10000
WP_069584028.1 152 3 112 MKLHPLISIILGLFVTLLLVMIPLVFDAP--PLVGNAMFIF----AFILGGFIATYF--SKDKKIRYSIYMGLIAAVLFSIIESPD--GFNKLPAILLGFIQFPGMSLIGGLPGKI CLUSTER_1 NZ_LMVM01000040.1 0 182744 183170 10000
WP_052375935.1 142 152 245 LNFIRIGIILLVIGVISISGCTQEKQTNTIIIQNFTFKPNPMHVKAGDVVRWTSHDNAPHKIVSDTGNFESPDLNNGDTFTYTFDKKGEFNYHDELDSSIKGKVIV CLUSTER_1 NZ_JQKN01000008.1 0 54678 55008 10000
WP_157197598.1 140 4 118 KFNPVISIISGIIVTITMAYVGFLIIDTP---NFGILDIILLCFSLVIGGFISTYFTE--KRRIVYGVCEGLILSIMCATYVVGTGKGLSYINYIAAYINVALGFVSATYIGSILGRKNR CLUSTER_1 NZ_JQKN01000011.1 0 42549 42912 10000
WP_069583157.1 147 3 112 MRLHPIKSIIIGAVTAITLL---GISTLTFYNSLAFGILNFAAPLIGGFIATYF--TSKRMVRYGACAGIISAAAFVAFEFILG---NIGLEAIPLMFISSSIIFGVIAGLGGITGEI CLUSTER_1 NZ_LMVM01000037.1 0 122575 123646 10000
WP_048082918.1 136 3 120 MRLHPIKSIILGAVIAITLL---GISALVFYNSLAFGILNFVAPLIGGFIATYF--TSKRMARYGACAGIISAAAFVAFEFILG---NIGEDAIIIMFISFSFIFGVIAGLIGIIGGIISNRV CLUSTER_1 NZ_KN050803.1 0 145938 147009 10000
WP_069585799.1 133 5 208 MKPFLSIILALITTIL-LFICEI-SLSVALNIYLGSLTVFLFILGGGIATWFAA--GKKIRYSIYYGLILAVITLVLG--------DYRVLIFA-PIFAGIGGFLGKMADKDSRQTFNGYHPVIAIIVGIIVMYIYNVFLGSV-TGAYDLSSSGLIGFVIG---AITLAVGGF----------TTTFLSKEKKI-----QYGIY CLUSTER_1 NZ_LMVM01000039.1 0 64659 65787 10000
WP_145975997.1 122 6 111 HPVIAIILGNIIT-GFLGGFVI-ILPISLLSHILVIF--IFVLGGFSATYLSRTNKATIGFYNSLLYSISSLIGAIFIFKTGLTPNKVLILFIYFPILGLIGGFIAK CLUSTER_1 NZ_LT607756.1 0 613074 613428 10000
```
### **Step13: Calculating LOUPE metric** (Parallelized)
▷ `parallel` , `mmseqs` tool required
- Input: `Cas_OUTPUT/CLUSTERS_Cas/Sorted/CLUSTER_*.hits_sorted`, `VicinityPermissiveClustsLinear_Cas.tsv`, Database
- Parameter:
- ThreadNum: Number of threads (CPUs) to use in blast search.
This step will calculate effective cluster sizes and the relevance metrics for all sorted hits in CLUSTERS\_\${DefenseSystem_Name} /Sorted/, created in the previous step, and save results into the file specified in arguments:
- Output: `Relevance_Cas.tsv`
| ClusterID | Effective size in vicinity of baits | Effective size in entire database | Median distance to bait (in ORFs) | DSED |
| --------- | ----------------------------------- | --------------------------------- | --------------------------------- | -------------------- |
| CLUSTER_1 | 1 | 21 | 9 | 0.047619047619047616 |
> Relevance_Cas.tsv (partial)
```
CLUSTER_1 1 21 9 0.047619047619047616
CLUSTER_10 2 6 6 0.3333333333333333
CLUSTER_100 1 18 3 0.05555555555555555
CLUSTER_101 1 12 4 0.08333333333333333
CLUSTER_102 1 4 2 0.25
CLUSTER_103 7 10 0 0.7
CLUSTER_104 2 4 2 0.5
CLUSTER_105 2 11 2 0.18181818181818182
CLUSTER_106 1 13 4 0.07692307692307693
CLUSTER_107 7 9 0 0.7777777777777778
CLUSTER_108 1 7 4 0.14285714285714285
CLUSTER_109 3 12 4 0.25
CLUSTER_11 2 12 12 0.16666666666666666
CLUSTER_110 1 1 1 1.0
CLUSTER_111 1 3 3 0.3333333333333333
CLUSTER_112 3 20 7 0.15
CLUSTER_113 2 12 4 0.16666666666666666
CLUSTER_114 3 10 3 0.3
CLUSTER_115 1 9 5 0.1111111111111111
CLUSTER_116 2 28 7 0.07142857142857142
CLUSTER_117 1 2 7 0.5
CLUSTER_118 3 11 4 0.2727272727272727
CLUSTER_119 2 67 5 0.029850746268656716
CLUSTER_12 1 9 5 0.1111111111111111
CLUSTER_120 3 12 3 0.25
CLUSTER_121 2 11 5 0.18181818181818182
CLUSTER_122 3 21 2 0.14285714285714285
CLUSTER_123 2 3 0 0.6666666666666666
CLUSTER_124 1 3 13 0.3333333333333333
CLUSTER_125 1 3 8 0.3333333333333333
CLUSTER_126 2 11 5 0.18181818181818182
CLUSTER_127 3 8 0 0.375
CLUSTER_128 2 10 6 0.2
CLUSTER_129 2 10 3 0.2
CLUSTER_13 1 2 2 0.5
CLUSTER_130 1 2 3 0.5
CLUSTER_131 1 1 3 1.0
```
### **Step14: Sorting Relevance and predictind candidates**
▷ `blast+` tool required
- input:DefenseSystem Name, DefenseSystem FilePath, PathToDatabase
- output: ``Accession/``, ``NewGene_Cas/``
In order to improve the efficiency and avoid repeated reading of cluster files, Sorting relevance and Predicting candidates are put together.
First of all, We determined which type of defense system (Abi,Cas, DND, RM, TA. Demo is corresponding to Cas ), read the Cas_OUTPUT/CLUSTERS_Cas/*.ali files. according to gene function we classifiy them into five files
- ``ACCESSION_Cas.txt`` consists of all genes.
- ``ACCESSION_ONLY_Cas.txt`` consists of the genes annotated as 'Cas', and we give the identifier **1** in the end of every line.
- ``ACCESSION_Other_DefenseGene.txt`` consists of the genes annotated as other defense systems(ike 'toxin', 'abortive infection', 'restriction-modification'), and we give the identifier **2** in the end of every line.
- ``ACCESSION_hypothetical_Cas.txt`` consists of the genes annotated as 'hypothetical', and we give the identifier **4** in the end of every line.
- The genes which not belong to ``ACCESSION_ONLY_Cas.txt``, ``ACCESSION_Other_DefenseGene.txt`` and ``ACCESSION_hypothetical_Cas.txt`` are divided into ``ACCESSION_HouseKeepingGene.txt`` and we give the identifier **4** in the end of every line. Strictly speaking, the definition of House Keeping Gene we used is different from the usual one.
We classified cluster according to gene function: defense genes consistent with input data --**1**, other defense genes -- **2**, housekeeping genes --**3**, and unknown functional genes --**4**. The functions of multiple Accession in each Cluster may be different. If so, select the following order: 1>2>3>4.
In order to improve the discrimination of different types of clusters, we added two new parameters: the conservation of genes within clusters among species and the conservation among genera. The calculation is as follows:
Suppose a CLUSTER contains the number of g genes, these genes appear in the number of n species/genus, each species has some of the clusters' genes and are not repetitive, denotes them as a1, a2, a3,...,an, where $a_1 + a_2 + a_3 + ... + a_n = g$
$$ C = \frac{\frac{1}{a_1} +\frac{1}{a_2} + \frac{1}{a_3} + ... + \frac{1}{a_n} }{n}\ $$
C is the conservation in species or genus if cluster.
The smaller the C value is, the stronger the conservation and concentration of the CLUSTER genes are. Large C value represents the genes of the CLUSTER is scattered in multiple species/genus, reflecting that the gene may have gene horizontal transfer.
In conclusion, we have 6 parameters to describe our data: the number of times the cluster appears on the genome, the number of times the cluster appears on the genome, the ratio of the number of times the cluster appears in the selected range on either side of the seed to the number of times the cluster appears on the whole genome, the conservation of clusters at the species level, the conservation of clusters at the genus level, and the type of clusters. We save the data into *Relevance_Sorted_Category.csv* with the format like:
| ClusterID | Effective size in vicinity of baits | Effective size in entire database | Median distance to bait (in ORFs) | DSED | Category | the conservation of clusters at the species level | the conservation of clusters at the genus level |
| :-------- | ----------------------------------- | --------------------------------- | --------------------------------- | -------------------- | -------- | ------------------------------------------------- | ----------------------------------------------- |
| CLUSTER_1 | 1 | 21 | 9 | 0.047619047619047616 | 3 | 1.0 | 1.0 |
> Relevance_Sorted_Category.csv (partial)
```
CLUSTER_1 1 21 9 0.047619047619047616 3 1.0 1.0
CLUSTER_10 2 6 6 0.3333333333333333 3 0.3333333333333333 1.0
CLUSTER_100 1 18 3 0.05555555555555555 3 1.0 1.0
CLUSTER_101 1 12 4 0.08333333333333333 3 1.0 1.0
CLUSTER_102 1 4 2 0.25 4 1.0 0.0
CLUSTER_103 7 10 0 0.7 1 0.125 0.8571428571428571
CLUSTER_104 2 4 2 0.5 4 0.5 1.0
CLUSTER_105 2 11 2 0.18181818181818182 3 0.5 1.0
CLUSTER_106 1 13 4 0.07692307692307693 3 1.0 1.0
CLUSTER_107 7 9 0 0.7777777777777778 1 0.125 0.8571428571428571
CLUSTER_108 1 7 4 0.14285714285714285 3 1.0 1.0
CLUSTER_109 3 12 4 0.25 3 0.3333333333333333 1.0
CLUSTER_11 2 12 12 0.16666666666666666 3 0.5 1.0
CLUSTER_110 1 1 1 1.0 4 1.0 1.0
CLUSTER_111 1 3 3 0.3333333333333333 4 1.0 0.0
CLUSTER_112 3 20 7 0.15 3 0.2
```
We divide the data above into four clusters: the same resistance gene cluster and other resistance gene clusters are set as positive samples, non-resistance gene clusters are set as negative samples, and unknown function clusters are set as samples to be predicted. By analyzing the dataset, we found that there is a class imbalance problem between positive and negative samples and We divide the data above into four clusters: the same resistance gene cluster and other resistance gene clusters are set as positive samples, non-resistance gene clusters are set as negative samples, and unknown function clusters are set as samples to be predicted. By analyzing the dataset, we found that there is a class imbalance problem between positive and negative samples. Starting from the average performance, we finally chose to use random forest as our classification model. Based on the trained classification model, we predict the unknown clusters and obtain the final classification result.
The data of predictind candidates saves in ``NewGene_Cas/``,contains the gene accession (.lst) and corresponding protein sequence (.faa) of each cluster with unknown function predicted as Cas.
## 👥 Authors and acknowledgment
🌟Thanks to [Maxwell-Wong](https://github.com/Maxwell-Wong), [zhengzhch](https://github.com/zhengzhch) and [SiduoLi2020](https://github.com/SiduoLi2020)
## 💡Reference
**1.Shmakov, S.A., Faure, G., Makarova, K.S. *et al.* Systematic prediction of functionally linked genes in bacterial and archaeal genomes. *Nat Protoc* 14, 3013–3031 (2019). https://doi.org/10.1038/s41596-019-0211-1**
| PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/controllers/reporting.py | import logging
from datetime import datetime
from pylons import request, response, config, tmpl_context as c
from linotp.lib.base import BaseController
from linotp.lib.context import request_context
from linotp.lib.policy import (checkAuthorisation,
PolicyException,
getAdminPolicies)
from linotp.lib.realm import match_realms
from linotp.lib.reply import (sendResult,
sendError,
sendResultIterator,
sendCSVIterator)
from linotp.lib.reporting import ReportingIterator
from linotp.lib.reporting import get_max
from linotp.lib.reporting import delete
from linotp.lib.user import (getUserFromRequest, )
from linotp.lib.util import check_session
from linotp.lib.util import get_client
from linotp.model.meta import Session
audit = config.get('audit')
log = logging.getLogger(__name__)
class ReportingController(BaseController):
"""
reporting
"""
def __before__(self, action, **params):
"""
"""
try:
c.audit = request_context['audit']
c.audit['success'] = False
c.audit['client'] = get_client(request)
# Session handling
check_session(request)
request_context['Audit'] = audit
checkAuthorisation(scope='reporting.access', method=action)
return request
except Exception as exception:
log.exception(exception)
Session.rollback()
Session.close()
return sendError(response, exception, context='before')
def __after__(self, action):
"""
"""
try:
c.audit['administrator'] = getUserFromRequest(request).get('login')
audit.log(c.audit)
Session.commit()
return request
except Exception as exception:
log.exception(exception)
Session.rollback()
return sendError(response, exception, context='after')
finally:
Session.close()
def maximum(self):
"""
method:
reporting/maximum
description:
return the maximum of tokens in a given realm with given status
arguments:
* realms - required: takes realms, only the reporting entries for
this realms will be displayed
* status - optional: (default is 'active')
takes assigned/unassigned/active/ etc.
and shows max of lines in database with this characteristic
returns:
a json result with:
{ "head": [],
"data": [ [row1], [row2] .. ]
}
exception:
if an error occurs an exception is serialized and returned
:return:
"""
result = {}
try:
request_realms = self.request_params.get('realms', '').split(',')
status = self.request_params.get('status', ['total'])
if status != ['total']:
status = status.split(',')
realm_whitelist = []
policies = getAdminPolicies('maximum', scope='reporting.access')
if policies['active'] and policies['realms']:
realm_whitelist = policies.get('realms')
# if there are no policies for us, we are allowed to see all realms
if not realm_whitelist or '*' in realm_whitelist:
realm_whitelist = request_context['Realms'].keys()
realms = match_realms(request_realms, realm_whitelist)
for realm in realms:
result[realm] = {}
for stat in status:
result[realm][stat] = get_max(realm, stat)
return sendResult(response, result)
except PolicyException as policy_exception:
log.exception(policy_exception)
Session.rollback()
return sendError(response, unicode(policy_exception), 1)
except Exception as exc:
log.exception(exc)
Session.rollback()
return sendError(response, exc)
finally:
Session.close()
def delete_all(self):
"""
method:
reporting/delete_all
description:
delete entries from the reporting database table
arguments:
* realms - optional: takes realms, only the reporting entries
from this realm are dedleted
* status - optional: filters reporting entries by status
like 'assigned' or 'inactive'
returns: dict in which value is the number of deleted rows
exception:
if an error occurs an exception is serialized and returned
"""
try:
request_realms = self.request_params.get('realms', '').split(',')
status = self.request_params.get('status', ['total'])
if status != ['total']:
status = status.split(',')
realm_whitelist = []
policies = getAdminPolicies('delete_all', scope='reporting.access')
if policies['active'] and policies['realms']:
realm_whitelist = policies.get('realms')
# if there are no policies for us, we are allowed to see all realms
if not realm_whitelist or '*' in realm_whitelist:
realm_whitelist = request_context['Realms'].keys()
realms = match_realms(request_realms, realm_whitelist)
if '*' in status:
status.remove('*')
status.extend(['active', 'inactive', 'assigned', 'unassigned',
'active&assigned', 'active&unassigned',
'inactive&assigned', 'inactive&unassigned',
'total'])
result = delete(realms=realms, status=status)
Session.commit()
return sendResult(response, result)
except PolicyException as policy_exception:
log.exception(policy_exception)
Session.rollback()
return sendError(response, unicode(policy_exception), 1)
except Exception as exc:
log.exception(exc)
Session.rollback()
return sendError(response, exc)
finally:
Session.close()
def delete_before(self):
"""
method:
reporting/delete_before
description:
delete all entries from reporting database with respect to the
arguments
date must be given in format: 'yyyy-mm-dd'
arguments:
* date - optional: only delete entries which are older than date;
date must be given in format 'yyyy-mm-dd'
if no date is given, all entries get deleted
* realms - optional: takes realms, only the reporting entries
from this realm are dedleted
* status - optional: filters reporting entries by status
like 'assigned' or 'inactive'
returns: dict in which value is the number of deleted rows
exception:
if an error occurs an exception is serialized and returned
"""
try:
request_realms = self.request_params.get('realms', '').split(',')
status = self.request_params.get('status', ['total'])
if status != ['total']:
status = status.split(',')
border_day = self.request_params.get('date')
# this may throw ValueError if date is in wrong format
datetime.strptime(border_day, "%Y-%m-%d")
realm_whitelist = []
policies = getAdminPolicies('delete_before', scope='reporting')
if policies['active'] and policies['realms']:
realm_whitelist = policies.get('realms')
# if there are no policies for us, we are allowed to see all realms
if not realm_whitelist or '*' in realm_whitelist:
realm_whitelist = request_context['Realms'].keys()
realms = match_realms(request_realms, realm_whitelist)
result = delete(date=border_day, realms=realms, status=status)
Session.commit()
return sendResult(response, result)
except PolicyException as policy_exception:
log.exception(policy_exception)
Session.rollback()
return sendError(response, unicode(policy_exception), 1)
except ValueError as value_error:
log.exception(value_error)
Session.rollback()
return sendError(response, unicode(value_error), 1)
except Exception as exc:
log.exception(exc)
Session.rollback()
return sendError(response, exc)
finally:
Session.close()
def show(self):
"""
method:
reporting/show
description:
show entries from the reporting database table
arguments:
* date - optional: only show entries which are newer than date;
date must be given in format 'yyyy-mm-dd'
if no date is given, all entries are shown
* realms - optional: takes realms, only the reporting entries
from this realm are shown
* status - optional: filters reporting entries by status
like 'assigned' or 'inactive'
* sortby - optional: sort the output by column
* sortdir - optional: asc/desc
* page - optional: reqeuest a certain page
* pagesize - optional: limit the number of returned tokens
* outform - optional: if set to "csv", the output will be a .csv file
returns: a json result with:
{ "head": [],
"data": [ [row1]
, [row2]
, [row3] .. ]
}
in case of csv:
first line: header of columns
other lines: column values
exception:
if an error occurs an exception is serialized and returned
"""
try:
param = self.request_params
page = param.get('page')
sort = param.get('sortby')
sortdir = param.get('sortdir')
psize = param.get('pagesize')
output_format = param.get('outform', 'json')
request_realms = param.get('realms', '').split(',')
status = param.get('status', [])
border_day = param.get('date')
if border_day:
# this may throw ValueError if date is in wrong format
datetime.strptime(border_day, "%Y-%m-%d")
realm_whitelist = []
policies = getAdminPolicies('show', scope='reporting.access')
if policies['active'] and policies['realms']:
realm_whitelist = policies.get('realms')
# if there are no policies for us, we are allowed to see all realms
if not realm_whitelist or '*' in realm_whitelist:
realm_whitelist = request_context['Realms'].keys()
realms = match_realms(request_realms, realm_whitelist)
reports = ReportingIterator(realms=realms, status=status, date=None,
page=page, psize=psize, sort=sort,
sortdir=sortdir)
info = reports.getResultSetInfo()
c.audit['success'] = True
Session.commit()
if output_format == 'csv':
response.content_type = "application/force-download"
response.headers['Content-disposition'] = \
'attachment; filename=linotp-reports.csv'
return sendCSVIterator(reports.iterate_reports())
else:
response.content_type = 'application/json'
return sendResultIterator(reports.iterate_reports(), opt=info)
except PolicyException as policy_exception:
log.exception(policy_exception)
Session.rollback()
return sendError(response, unicode(policy_exception), 1)
except ValueError as value_error:
log.exception(value_error)
Session.rollback()
return sendError(response, unicode(value_error), 1)
except Exception as exc:
log.exception(exc)
Session.rollback()
return sendError(response, exc)
finally:
Session.close() | PypiClean |
/BlueWhale3-Recommendation-0.1.3.tar.gz/BlueWhale3-Recommendation-0.1.3/doc/source/scripting/rating/trustsvd.rst | TrustSVD
========
TrustSVD is a trust-based matrix factorization, which extends SVD++ with trust
information.
User's predictions are defined as follows:
.. math::
\hat { r }_{ ui }=\mu +b_{ u }+b_{ i }+{ q_{ i } }^{ \top }\left( p_{ u }+{ \left| { I }_{ u } \right| }^{ -\frac { 1 }{ 2 } }\sum _{ i\in { I }_{ u } } y_{ i }+{ \left| { T }_{ u } \right| }^{ -\frac { 1 }{ 2 } }\sum _{ v\in { T }_{ u } } w_{ v } \right)
We learn the values of involved parameters by minimizing the regularized squared error function associated with:
.. math::
\begin{split}
\mathcal{L} &=\frac { 1 }{ 2 } \sum _{ u }{ \sum _{ j\in { I }_{ u } }{ { ({ \hat { r } }_{ u,j } -{ r }_{ u,j }) }^{ 2 } } } + \frac { { \lambda }_{ t } }{ 2 } \sum _{ u }{ \sum _{ v\in { T }_{ u } }{ { ( { \hat { t } }_{ u,v } -{ t }_{ u,v }) }^{ 2 } } } \\
&+\frac { { \lambda } }{ 2 } \sum _{ u }^{ }{ { \left| { I }_{ u } \right| }^{ -\frac { 1 }{ 2 } }{ b }_{ u }^{ 2 } } +\frac { { \lambda } }{ 2 } \sum _{ j }{ { \left| { U }_{ j } \right| }^{ -\frac { 1 }{ 2 } }{ b }_{ j }^{ 2 } } \\
&+\sum _{ u }^{ }{ (\frac { { \lambda } }{ 2 } { \left| { I }_{ u } \right| }^{ -\frac { 1 }{ 2 } }+\frac { { \lambda }_{ t } }{ 2 } { \left| { T }_{ u } \right| }^{ -\frac { 1 }{ 2 } }{ )\left\| { p }_{ u } \right\| }_{ F }^{ 2 } } \\
&+\frac { { \lambda } }{ 2 } \sum _{ j }{ { \left| { U }_{ j } \right| }^{ -\frac { 1 }{ 2 } }{ \left\| { q }_{ j } \right\| }_{ F }^{ 2 } } +\frac { { \lambda } }{ 2 } \sum _{ i }{ { \left| { U }_{ i } \right| }^{ -\frac { 1 }{ 2 } }{ \left\| { y }_{ i } \right\| }_{ F }^{ 2 } } \\
&+\frac { { \lambda } }{ 2 } { \left| { T }_{ v }^{ + } \right| }^{ -\frac { 1 }{ 2 } }{ \left\| { w }_{ v } \right\| }_{ F }^{ 2 }
\end{split}
Example
-------
.. code-block:: python
:linenos:
import Orange
from orangecontrib.recommendation import TrustSVDLearner
# Load data and train the model
ratings = Orange.data.Table('filmtrust/ratings.tab')
trust = Orange.data.Table('filmtrust/trust.tab')
learner = TrustSVDLearner(num_factors=15, num_iter=25, learning_rate=0.07,
lmbda=0.1, social_lmbda=0.05, trust=trust)
recommender = learner(data)
# Make predictions
prediction = recommender(data[:3])
print(prediction)
.. autoclass:: TrustSVDLearner
:members:
:special-members: __init__
| PypiClean |
/IDEA_SESIT_NEW-0.0.1.tar.gz/IDEA_SESIT_NEW-0.0.1/main/OSeMOSYS/format_data.py | import csv
import pandas as pd
def data_formatting(input_file):
capacity, new_capacity, dELEC = False, False, False
capacity_rows, new_capacity_rows, dELEC_rows = [],[],[]
cap_columns, new_capacity_columns, dELEC_columns = None, None, None
region = None
with open(input_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if capacity:
if len(row) == 0:
capacity = False
continue
if not row[0]:
cap_columns = row
else:
capacity_rows.append(row)
if dELEC:
if len(row) == 0:
dELEC = False
elif row[1] == "dELEC":
continue
elif not row[0]:
dELEC_columns = row
else:
dELEC_rows.append(row)
if new_capacity:
if len(row) == 0:
continue
if not row[0]:
new_capacity_columns = row
else:
new_capacity_rows.append(row)
if row[0] == "2065":
new_capacity = False
break
if len(row) != 0:
if row[0] == "Summary":
region = row[1]
if row[0] == "TotalAnnualCapacity (GW)":
capacity = True
if row[0] == "New Annual Capacity (GW)":
new_capacity = True
if row[0] == "Annual Electricity Generation for dELEC(GWh)":
dELEC = True
# columns = ["Model", "Scenario", "Region", "Variable", "Unit"]
# capacity_df = pd.DataFrame([], columns=cap_columns)
# dELEC_df = pd.DataFrame([], columns=emission_columns)
years = []
entries = []
for i,gen_type in enumerate(cap_columns[20:36]):
got_years = False
scenario = None
if gen_type[0] == 'g':
if "WIND" in gen_type:
scenario = 'g'
gen_type = "WIND"
elif "gen" in gen_type:
scenario = 'g'
gen_type = gen_type[3:]
else:
scenario = 'g'
gen_type = gen_type[1:]
elif gen_type[0] == 'p':
scenario = 'Pellet'
gen_type = gen_type[1:]
elif gen_type[0] == 'r':
scenario = 'Residue'
gen_type = gen_type[1:]
variable = f"Total Capacity|{gen_type}"
capacity_values = []
for j,row in enumerate(capacity_rows):
if i != 0:
got_years = True
if not got_years:
year = row[0]
years.append(year)
capacity_at_year = float(row[i+20])
capacity_values.append(capacity_at_year)
values = ["OSMOSIS", scenario, region, variable, "GW"]
values += capacity_values
entries.append(values)
columns = ["Model", "Scenario", "Region", "Variable", "Unit"]
columns += years
capacity_df = pd.DataFrame([], columns=columns)
for i,row in enumerate(entries):
new_row = pd.DataFrame([row], columns=columns)
capacity_df = pd.concat([capacity_df,new_row], ignore_index=True)
entries = []
for i,gen_type in enumerate(dELEC_columns[20:36]):
scenario = None
if gen_type[0] == 'g':
if "WIND" in gen_type:
scenario = 'g'
gen_type = "WIND"
elif "gen" in gen_type:
scenario = 'g'
gen_type = gen_type[3:]
else:
scenario = 'g'
gen_type = gen_type[1:]
elif gen_type[0] == 'p':
scenario = 'p'
gen_type = gen_type[1:]
elif gen_type[0] == 'r':
scenario = 'r'
gen_type = gen_type[1:]
variable = f"Electricity Generation|{gen_type}"
got_years = False
dELEC_values = []
for j,row in enumerate(dELEC_rows):
if i != 0:
got_years = True
if not got_years:
years.append(row[0])
dELEC_at_year = float(row[i+20])
dELEC_values.append(dELEC_at_year)
values = ["OSMOSIS", scenario, region, variable, "GWh"]
values += dELEC_values
entries.append(values)
dELEC_df = pd.DataFrame([], columns=columns)
for i,row in enumerate(entries):
new_row = pd.DataFrame([row], columns=columns)
dELEC_df = pd.concat([dELEC_df,new_row], ignore_index=True)
entries = []
for i,gen_type in enumerate(new_capacity_columns[20:36]):
scenario = "None"
if gen_type[0] == 'g':
if "WIND" in gen_type:
scenario = 'g'
gen_type = "WIND"
elif "gen" in gen_type:
scenario = 'g'
gen_type = gen_type[3:]
else:
scenario = 'g'
gen_type = gen_type[1:]
elif gen_type[0] == 'p':
scenario = 'p'
gen_type = gen_type[1:]
elif gen_type[0] == 'r':
scenario = 'r'
gen_type = gen_type[1:]
variable = f"New Capacity|{gen_type}"
got_years = False
new_capacity_values = []
for j,row in enumerate(new_capacity_rows):
if i != 0:
got_years = True
if not got_years:
years.append(row[0])
new_capacity_at_year = float(row[i+20])
new_capacity_values.append(new_capacity_at_year)
values = ["OSMOSIS", scenario, region, variable, "GW"]
values += new_capacity_values
entries.append(values)
new_capacity_df = pd.DataFrame([], columns=columns)
for i,row in enumerate(entries):
new_row = pd.DataFrame([row], columns=columns)
new_capacity_df = pd.concat([new_capacity_df,new_row], ignore_index=True)
total_df = pd.concat([capacity_df,dELEC_df], ignore_index=True)
total_df = pd.concat([total_df,new_capacity_df], ignore_index=True)
emissions_df = annual_emissions(input_file)
total_df = pd.concat([total_df, emissions_df], ignore_index = True)
total_df = total_df.groupby(["Model", "Scenario", "Region", "Variable", "Unit"], sort = False).sum().reset_index()
total_df.to_csv(f"{input_file.split('.')[0]}-formatted.csv")
return region
def annual_emissions(input_file):
emissions = False
rows = []
columns = []
region = None
with open(input_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if emissions:
if len(row) == 0:
emissions = False
elif row[1] == region:
continue
elif not row[0]:
columns = row
else:
rows.append(row)
if len(row) == 0:
continue
else:
if row[0] == "Summary":
region = row[1]
if row[0] == "Annual Emissions (Emissions Units)":
emissions = True
years = []
entries = []
got_years = False
for i,emission in enumerate(columns[1:2]):
variable = f"Annual Emissions|{emission}"
values = []
for j,row in enumerate(rows):
if i != 0:
got_years = True
if not got_years:
year = row[0]
years.append(year)
values.append(row[i+1])
entry = ["OSMOSIS", "g", region, variable, "Emission Units"]
entry += values
entries.append(entry)
columns = ["Model", "Scenario", "Region", "Variable", "Unit"]
columns += years
emissions_df = pd.DataFrame([], columns=columns)
for row in entries:
new_row = pd.DataFrame([row], columns=columns)
emissions_df = pd.concat([emissions_df,new_row], ignore_index=True)
return emissions_df | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/chardet/universaldetector.py | import codecs
import logging
import re
from .charsetgroupprober import CharSetGroupProber
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
'iso-8859-2': 'Windows-1250',
'iso-8859-5': 'Windows-1251',
'iso-8859-6': 'Windows-1256',
'iso-8859-7': 'Windows-1253',
'iso-8859-8': 'Windows-1255',
'iso-8859-9': 'Windows-1254',
'iso-8859-13': 'Windows-1257'}
def __init__(self, lang_filter=LanguageFilter.ALL):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self._has_win_bytes = None
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
if not isinstance(byte_str, bytearray):
byte_str = bytearray(byte_str)
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16",
'confidence': 1.0,
'language': ''}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.ESC_ASCII:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence(),
'language':
self._esc_charset_prober.language}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.HIGH_BYTE:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence(),
'language': prober.language}
self.done = True
break
if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`.
"""
# Don't bother with checks if we're already done
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug('no data received!')
# Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii',
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE:
prober_confidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
prober_confidence = prober.get_confidence()
if prober_confidence > max_prober_confidence:
max_prober_confidence = prober_confidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower()
confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'):
if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
charset_name)
self.result = {'encoding': charset_name,
'confidence': confidence,
'language': max_prober.language}
# Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() == logging.DEBUG:
if self.result['encoding'] is None:
self.logger.debug('no probers hit minimum threshold')
for group_prober in self._charset_probers:
if not group_prober:
continue
if isinstance(group_prober, CharSetGroupProber):
for prober in group_prober.probers:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
else:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result | PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/web/media/js/mathjax/jax/output/SVG/fonts/TeX/Size1/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.MathJax_Size1={directory:"Size1/Regular",family:"MathJax_Size1",id:"MJSZ1",32:[0,0,250,0,0,""],40:[850,349,458,152,422,"152 251Q152 646 388 850H416Q422 844 422 841Q422 837 403 816T357 753T302 649T255 482T236 250Q236 124 255 19T301 -147T356 -251T403 -315T422 -340Q422 -343 416 -349H388Q359 -325 332 -296T271 -213T212 -97T170 56T152 251"],41:[850,349,458,35,305,"305 251Q305 -145 69 -349H56Q43 -349 39 -347T35 -338Q37 -333 60 -307T108 -239T160 -136T204 27T221 250T204 473T160 636T108 740T60 807T35 839Q35 850 50 850H56H69Q197 743 256 566Q305 425 305 251"],47:[850,349,578,55,522,"481 838Q489 850 500 850Q508 850 515 844T522 827Q521 824 311 248T96 -337Q90 -349 77 -349Q68 -349 62 -343T55 -326Q56 -323 266 253T481 838"],91:[850,349,417,202,394,"202 -349V850H394V810H242V-309H394V-349H202"],92:[850,349,578,54,522,"522 -326Q522 -337 515 -343T500 -349Q487 -349 481 -337Q477 -328 267 248T55 827Q54 835 60 842T76 850Q89 850 96 838Q100 829 310 253T522 -326"],93:[850,349,417,22,214,"22 810V850H214V-349H22V-309H174V810H22"],123:[851,349,583,105,477,"477 -343L471 -349H458Q432 -349 367 -325T273 -263Q258 -245 250 -212L249 -51Q249 -27 249 12Q248 118 244 128Q243 129 243 130Q220 189 121 228Q109 232 107 235T105 250Q105 256 105 257T105 261T107 265T111 268T118 272T128 276T142 283T162 291Q224 324 243 371Q243 372 244 373Q248 384 249 469Q249 475 249 489Q249 528 249 552L250 714Q253 728 256 736T271 761T299 789T347 816T422 843Q440 849 441 849H443Q445 849 447 849T452 850T457 850H471L477 844V830Q477 820 476 817T470 811T459 807T437 801T404 785Q353 760 338 724Q333 710 333 550Q333 526 333 492T334 447Q334 393 327 368T295 318Q257 280 181 255L169 251L184 245Q318 198 332 112Q333 106 333 -49Q333 -209 338 -223Q351 -255 391 -277T469 -309Q477 -311 477 -329V-343"],125:[850,349,583,105,477,"110 849L115 850Q120 850 125 850Q151 850 215 826T309 764Q324 747 332 714L333 552Q333 528 333 489Q334 383 338 373Q339 372 339 371Q353 336 391 310T469 271Q477 268 477 251Q477 241 476 237T472 232T456 225T428 214Q357 179 339 130Q339 129 338 128Q334 117 333 32Q333 26 333 12Q333 -27 333 -51L332 -212Q328 -228 323 -240T302 -271T255 -307T175 -338Q139 -349 125 -349T108 -346T105 -329Q105 -314 107 -312T130 -304Q233 -271 248 -209Q249 -203 249 -49V57Q249 106 253 125T273 167Q307 213 398 245L413 251L401 255Q265 300 250 389Q249 395 249 550Q249 710 244 724Q224 774 112 811Q105 813 105 830Q105 845 110 849"],710:[744,-551,556,-8,564,"279 669Q273 669 142 610T9 551L0 569Q-8 585 -8 587Q-8 588 -7 588L12 598Q30 608 66 628T136 666L277 744L564 587L555 569Q549 556 547 554T544 552Q539 555 410 612T279 669"],732:[722,-597,556,1,554,"374 597Q337 597 269 627T160 658Q101 658 34 606L24 597L12 611Q1 624 1 626Q1 627 27 648T55 671Q120 722 182 722Q219 722 286 692T395 661Q454 661 521 713L531 722L543 708Q554 695 554 693Q554 692 528 671T500 648Q434 597 374 597"],770:[744,-551,0,-564,8,"-277 669Q-283 669 -414 610T-547 551L-556 569Q-564 585 -564 587Q-564 588 -563 588L-544 598Q-526 608 -490 628T-420 666L-279 744L8 587L-1 569Q-7 556 -9 554T-12 552Q-17 555 -146 612T-277 669"],771:[722,-597,0,-555,-2,"-182 597Q-219 597 -287 627T-396 658Q-455 658 -522 606L-532 597L-544 611Q-555 624 -555 626Q-555 627 -529 648T-501 671Q-436 722 -374 722Q-337 722 -270 692T-161 661Q-102 661 -35 713L-25 722L-13 708Q-2 695 -2 693Q-2 692 -28 671T-56 648Q-122 597 -182 597"],8214:[602,0,778,257,521,"257 0V602H300V0H257ZM478 0V602H521V0H478"],8593:[600,0,667,112,555,"112 421L120 424Q127 427 136 430T161 441T191 458T224 481T260 510T295 546T328 591L333 600L340 589Q380 527 431 489T555 421V377L543 381Q445 418 368 492L355 504V0H312V504L299 492Q222 418 124 381L112 377V421"],8595:[600,0,667,112,555,"312 96V600H355V96L368 108Q445 182 543 219L555 223V179L546 176Q538 173 529 169T505 158T475 141T442 119T407 90T372 53T339 9L334 0L327 11Q287 73 236 111T112 179V223L124 219Q222 182 299 108L312 96"],8657:[599,0,778,57,721,"142 329Q300 419 389 599Q389 598 399 579T420 541T452 494T497 438T558 383T636 329T708 294L721 289V246Q718 246 694 256T623 293T532 356L522 364L521 182V0H478V405L466 417Q436 450 389 516Q388 515 378 500T352 463T312 417L300 405V0H257V364L247 356Q202 320 155 293T82 256L57 246V289L70 294Q101 305 142 329"],8659:[600,-1,778,57,721,"257 236V600H300V195L312 183Q342 150 389 84Q390 85 400 100T426 137T466 183L478 195V600H521V418L522 236L532 244Q576 280 623 307T696 344L721 354V311L708 306Q677 295 636 271Q478 181 389 1Q389 2 379 21T358 59T326 106T281 162T220 217T142 271T70 306L57 311V354Q60 354 83 345T154 308T247 244L257 236"],8719:[750,250,944,55,888,"158 656Q147 684 131 694Q110 707 69 710H55V750H888V710H874Q840 708 820 698T795 678T786 656V-155Q798 -206 874 -210H888V-250H570V-210H584Q618 -208 638 -197T663 -178T673 -155V710H270V277L271 -155Q283 -206 359 -210H373V-250H55V-210H69Q103 -208 123 -197T148 -178T158 -155V656"],8720:[750,250,944,55,888,"158 656Q147 684 131 694Q110 707 69 710H55V750H373V710H359Q325 708 305 698T280 678T271 656L270 223V-210H673V656Q666 672 663 679T639 697T584 710H570V750H888V710H874Q840 708 820 698T795 678T786 656V-155Q798 -206 874 -210H888V-250H55V-210H69Q103 -208 123 -197T148 -178T158 -155V656"],8721:[750,250,1056,56,999,"61 748Q64 750 489 750H913L954 640Q965 609 976 579T993 533T999 516H979L959 517Q936 579 886 621T777 682Q724 700 655 705T436 710H319Q183 710 183 709Q186 706 348 484T511 259Q517 250 513 244L490 216Q466 188 420 134T330 27L149 -187Q149 -188 362 -188Q388 -188 436 -188T506 -189Q679 -189 778 -162T936 -43Q946 -27 959 6H999L913 -249L489 -250Q65 -250 62 -248Q56 -246 56 -239Q56 -234 118 -161Q186 -81 245 -11L428 206Q428 207 242 462L57 717L56 728Q56 744 61 748"],8730:[850,350,1000,111,1020,"263 249Q264 249 315 130T417 -108T470 -228L725 302Q981 837 982 839Q989 850 1001 850Q1008 850 1013 844T1020 832V826L741 243Q645 43 540 -176Q479 -303 469 -324T453 -348Q449 -350 436 -350L424 -349L315 -96Q206 156 205 156L171 130Q138 104 137 104L111 130L263 249"],8739:[627,15,333,144,188,"146 612Q151 627 166 627Q182 627 187 612Q188 610 188 306T187 0Q184 -15 166 -15Q149 -15 146 0V10Q146 19 146 35T146 73T146 122T145 179T145 241T145 306T145 370T145 433T145 489T146 538T146 576T146 602V612"],8741:[627,15,556,144,410,"146 612Q151 627 166 627Q182 627 187 612Q188 610 188 306T187 0Q184 -15 166 -15Q149 -15 146 0V10Q146 19 146 35T146 73T146 122T145 179T145 241T145 306T145 370T145 433T145 489T146 538T146 576T146 602V612ZM368 612Q373 627 388 627Q404 627 409 612Q410 610 410 306T409 0Q406 -15 389 -15Q371 -15 368 0V10Q368 19 368 35T368 73T368 122T367 179T367 241T367 306T367 370T367 433T367 489T368 538T368 576T368 602V612"],8747:[805,306,472,55,610,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244"],8748:[805,306,819,55,957,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244ZM460 -244Q460 -246 466 -251T486 -263T514 -269Q532 -269 546 -260Q567 -247 579 -218T598 -133T609 -15T623 155T644 367Q647 390 652 438T661 512T672 580T687 647T708 703T737 751T775 784T826 804Q828 804 835 804T848 805Q899 802 928 769T957 695Q957 669 941 657T908 645Q889 645 874 658T859 694Q859 705 863 714T873 729T885 737T895 742L899 743Q899 745 892 751T872 762T845 768Q822 768 807 756T781 716T765 652T754 559T745 444T734 300T716 133Q696 -38 684 -102T650 -207Q603 -306 516 -306Q466 -306 434 -272T402 -196Q402 -170 418 -158T451 -146Q470 -146 485 -159T500 -195Q500 -206 496 -215T486 -230T474 -238T464 -242L460 -244"],8749:[805,306,1166,55,1304,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244ZM460 -244Q460 -246 466 -251T486 -263T514 -269Q532 -269 546 -260Q567 -247 579 -218T598 -133T609 -15T623 155T644 367Q647 390 652 438T661 512T672 580T687 647T708 703T737 751T775 784T826 804Q828 804 835 804T848 805Q899 802 928 769T957 695Q957 669 941 657T908 645Q889 645 874 658T859 694Q859 705 863 714T873 729T885 737T895 742L899 743Q899 745 892 751T872 762T845 768Q822 768 807 756T781 716T765 652T754 559T745 444T734 300T716 133Q696 -38 684 -102T650 -207Q603 -306 516 -306Q466 -306 434 -272T402 -196Q402 -170 418 -158T451 -146Q470 -146 485 -159T500 -195Q500 -206 496 -215T486 -230T474 -238T464 -242L460 -244ZM807 -244Q807 -246 813 -251T833 -263T861 -269Q880 -269 893 -260Q914 -247 926 -218T945 -133T956 -15T970 155T991 367Q994 390 999 438T1008 512T1019 580T1034 647T1055 703T1084 751T1122 784T1173 804Q1175 804 1182 804T1195 805Q1246 802 1275 769T1304 695Q1304 669 1288 657T1255 645Q1236 645 1221 658T1206 694Q1206 705 1210 714T1220 729T1232 737T1242 742L1246 743Q1246 745 1239 751T1219 762T1192 768Q1169 768 1154 756T1128 716T1112 652T1101 559T1092 444T1081 300T1063 133Q1043 -38 1031 -102T997 -207Q950 -306 863 -306Q813 -306 781 -272T749 -196Q749 -170 765 -158T798 -146Q817 -146 832 -159T847 -195Q847 -206 843 -215T833 -230T821 -238T811 -242L807 -244"],8750:[805,306,472,55,610,"269 74L256 80Q244 85 227 97T191 128T161 179T148 250Q148 332 199 379T302 433L306 434L307 444Q309 456 313 495T321 553T331 607T345 664T365 712T393 756T431 785T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q471 768 454 752T427 693T414 626T406 536Q405 530 405 527L397 425L404 422Q410 419 421 413T445 399T470 376T494 345T511 303T518 250Q518 205 502 169T460 112T410 80T364 66L360 65L359 55Q357 38 353 4T346 -43T340 -81T333 -118T326 -148T316 -179T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q231 -241 242 -183T266 33L269 74ZM272 122Q272 156 300 391Q300 392 299 392Q287 392 263 379T213 331T187 249Q187 211 205 180T239 137T272 116V122ZM366 107Q378 107 402 119T453 167T479 249Q479 340 394 383V377Q394 375 394 374T393 371T393 366T392 357T391 342T389 321T386 291T382 251T377 199T369 133Q366 112 366 107"],8896:[750,249,833,55,777,"119 -249T97 -249T65 -235T55 -207Q55 -201 56 -198Q58 -190 218 268T380 729Q392 750 416 750Q438 750 451 732Q453 728 534 498T695 36L775 -194Q777 -204 777 -208Q777 -222 767 -235T735 -249Q713 -249 700 -231Q696 -225 557 177L416 579L276 177Q136 -226 132 -231Q119 -249 97 -249"],8897:[750,249,833,55,777,"55 708Q55 729 68 739T96 750Q119 750 132 731Q136 726 276 323L416 -79L557 323Q696 725 700 731Q713 749 735 749Q756 749 766 736T777 708Q777 700 696 466T533 1T451 -232Q436 -249 416 -249Q402 -249 391 -241Q384 -236 380 -226Q368 -198 219 230Q55 697 55 708"],8898:[750,249,833,54,777,"139 -217Q127 -241 114 -246Q106 -249 97 -249Q67 -249 57 -220Q55 -214 55 102Q55 152 55 221T54 312Q54 422 60 464T91 554Q120 612 165 654T257 714T337 741T392 749Q393 750 402 750Q414 750 422 749Q557 749 660 659T776 430Q777 422 777 102Q777 -214 775 -220Q765 -249 735 -249Q716 -249 708 -241T694 -217L692 428L690 441Q674 540 597 603T416 666H409Q388 666 364 662T294 638T212 581Q156 523 142 441L140 428L139 105V-217"],8899:[750,249,833,55,777,"96 750Q103 750 109 748T120 744T127 737T133 730T137 723T139 718V395L140 73L142 60Q159 -43 237 -104T416 -166Q521 -166 597 -103T690 60L692 73L694 718Q708 749 735 749Q765 749 775 720Q777 714 777 398Q777 78 776 71Q766 -51 680 -140Q571 -249 416 -249H411Q261 -249 152 -140Q66 -51 56 71Q55 78 55 398Q55 714 57 720Q60 734 70 740Q80 750 96 750"],8968:[850,349,472,202,449,"202 -349V850H449V810H242V-349H202"],8969:[850,349,472,22,269,"22 810V850H269V-349H229V810H22"],8970:[850,349,472,202,449,"202 -349V850H242V-309H449V-349H202"],8971:[850,349,472,22,269,"229 -309V850H269V-349H22V-309H229"],9168:[602,0,667,312,355,"312 0V602H355V0H312"],10216:[850,350,472,96,394,"373 850Q392 850 394 832Q394 825 267 538L139 250L267 -38Q394 -325 394 -332Q392 -350 375 -350Q361 -350 356 -338Q354 -331 289 -186T161 103T97 250T160 397T289 685T356 838Q362 850 373 850"],10217:[850,350,472,77,375,"77 832Q77 837 82 843T98 850Q110 849 115 838Q117 831 182 686T310 397T374 250T311 103T182 -185T115 -338Q110 -350 96 -350Q79 -350 77 -332Q77 -325 204 -38L332 250L204 538Q77 825 77 832"],10752:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM555 -165Q672 -165 767 -108T916 44T970 250Q970 418 861 532T600 664Q591 665 548 665Q446 665 353 614T200 466T140 250V243Q140 88 248 -30Q262 -46 280 -62T338 -105T434 -148T555 -165ZM478 250Q478 288 503 307T551 326Q586 326 609 305T632 250Q632 217 610 196T555 174T500 196T478 250"],10753:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM513 478Q513 664 512 664Q504 664 481 660T406 637T313 588Q281 564 255 537T211 483T181 431T161 382T150 342T144 310T141 292H513V478ZM798 588Q758 616 711 634T639 658T602 663L597 664V292H969Q969 293 967 309T960 341T949 381T930 430T900 482T856 537T798 588ZM513 -164V208H141Q142 205 144 189T149 160T158 125T173 83T196 39T229 -9Q249 -34 273 -55T318 -92T363 -119T405 -138T444 -150T475 -158T499 -162T513 -164ZM775 -103Q801 -87 823 -68T863 -30T894 10T919 49T937 88T950 123T959 154T964 180T968 198L969 208H597V-164Q599 -163 616 -161T647 -155T683 -145T728 -128T775 -103"],10754:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM600 664Q591 665 548 665Q414 665 306 583L292 573L423 441L555 310L687 441L818 573L804 583Q714 650 600 664ZM364 118L495 250L364 382L232 513L223 500Q140 391 140 250Q140 107 223 0L232 -13L364 118ZM970 250Q970 389 887 501L878 512Q878 513 861 496T812 447T746 381L615 250L746 118L878 -13L887 0Q970 109 970 250ZM687 59L555 190L423 59L292 -73L306 -83Q416 -166 555 -166T804 -83L818 -73L687 59"],10756:[750,249,833,55,777,"96 750Q103 750 109 748T120 744T127 737T133 730T137 723T139 718V395L140 73L142 60Q159 -43 237 -104T416 -166Q521 -166 597 -103T690 60L692 73L694 718Q708 749 735 749Q765 749 775 720Q777 714 777 398Q777 78 776 71Q766 -51 680 -140Q571 -249 416 -249H411Q261 -249 152 -140Q66 -51 56 71Q55 78 55 398Q55 714 57 720Q60 734 70 740Q80 750 96 750ZM223 276Q223 282 224 287T227 296T232 302T238 308T243 313T250 316L254 319H374V376V406Q374 438 382 454T418 470Q443 467 450 453T458 410V376V319H579Q580 319 583 317T589 313T594 308T600 302T604 295T608 287T609 276Q609 253 587 241Q577 235 513 235H458V178Q458 176 458 166T459 148Q459 84 415 84Q401 84 390 93T375 117Q374 120 374 178V235H319Q317 235 307 235T290 234Q223 234 223 276"],10758:[750,249,833,55,777,"777 -217Q766 -244 745 -249H88Q64 -242 57 -220Q55 -214 55 250T57 720Q60 734 70 740Q80 750 96 750Q127 750 137 720Q139 714 139 274V-166H693V274Q693 714 695 720Q705 749 735 749Q766 749 775 719Q777 713 777 248V-217"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Size1/Regular/Main.js"); | PypiClean |
/Joule-0.9.41.tar.gz/Joule-0.9.41/joule/controllers/event_controller.py | from aiohttp import web
import json
from sqlalchemy.orm import Session
import datetime
from joule.models import EventStream, EventStore, event_stream
from joule.models import folder, Folder
from joule.errors import ConfigurationError
async def info(request: web.Request):
db: Session = request.app["db"]
event_store: EventStore = request.app["event-store"]
if 'path' in request.query:
my_stream = folder.find_stream_by_path(request.query['path'], db, stream_type=EventStream)
elif 'id' in request.query:
my_stream = db.get(EventStream, request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
stream_info = await event_store.info([my_stream])
return web.json_response(my_stream.to_json(stream_info))
async def move(request: web.Request):
db: Session = request.app["db"]
if request.content_type != 'application/json':
return web.Response(text='content-type must be application/json', status=400)
body = await request.json()
# find the stream
if 'src_path' in body:
my_stream = folder.find_stream_by_path(body['src_path'], db, stream_type=EventStream)
elif 'src_id' in body:
my_stream = db.get(EventStream, body["src_id"])
else:
return web.Response(text="specify a source id or a path", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
# find or create the destination folder
if 'dest_path' in body:
try:
destination = folder.find(body['dest_path'], db, create=True)
except ConfigurationError as e:
return web.Response(text="Destination error: %s" % str(e), status=400)
elif 'dest_id' in body:
destination = db.get(Folder, body["dest_id"])
else:
return web.Response(text="specify a destination", status=400)
# make sure name is unique in this destination
existing_names = [s.name for s in destination.data_streams + destination.event_streams]
if my_stream.name in existing_names:
db.rollback()
return web.Response(text="stream with the same name exists in the destination folder",
status=400)
my_stream.folder.touch()
destination.event_streams.append(my_stream)
destination.touch()
db.commit()
return web.json_response({"stream": my_stream.to_json()})
async def create(request):
db: Session = request.app["db"]
if request.content_type != 'application/json':
return web.Response(text='content-type must be application/json', status=400)
body = await request.json()
if 'stream' not in body:
return web.Response(text="provide a stream", status=400)
# find or create the destination folder
if 'dest_path' in body:
try:
destination = folder.find(body['dest_path'], db, create=True)
except ConfigurationError as e:
return web.Response(text="Destination error: %s" % str(e), status=400)
elif 'dest_id' in body:
destination = db.get(Folder, body["dest_id"])
else:
return web.Response(text="specify a destination", status=400)
try:
new_stream = event_stream.from_json(body['stream'])
# clear out the id's
new_stream.id = None
# make sure name is unique in this destination
existing_names = [s.name for s in destination.data_streams + destination.event_streams]
if new_stream.name in existing_names:
raise ConfigurationError("stream with the same name exists in the folder")
destination.event_streams.append(new_stream)
new_stream.touch()
db.commit()
except (TypeError, ValueError) as e:
db.rollback()
return web.Response(text="Invalid stream JSON: %r" % e, status=400)
except ConfigurationError as e:
db.rollback()
return web.Response(text="Invalid stream specification: %s" % e, status=400)
except KeyError as e:
db.rollback()
return web.Response(text="Invalid or missing stream attribute: %s" % e, status=400)
return web.json_response(data=new_stream.to_json())
async def update(request: web.Request):
db: Session = request.app["db"]
if request.content_type != 'application/json':
return web.Response(text='content-type must be application/json', status=400)
body = await request.json()
if 'id' not in body:
return web.Response(text="Invalid request: specify id", status=400)
my_stream: EventStream = db.get(EventStream, body['id'])
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
if 'stream' not in body:
return web.Response(text="Invalid request: specify stream as JSON", status=400)
try:
attrs = dict(body['stream'])
except ValueError:
return web.Response(text="error: [stream] attribute must be JSON", status=400)
try:
my_stream.update_attributes(attrs)
# make sure name is unique in this destination
existing_names = [s.name for s in
my_stream.folder.data_streams + my_stream.folder.event_streams
if s.id != my_stream.id]
if my_stream.name in existing_names:
raise ConfigurationError("stream with the same name exists in the folder")
db.commit()
except (ValueError, ConfigurationError) as e:
db.rollback()
return web.Response(text="Invalid stream specification: %s" % e, status=400)
return web.json_response(data=my_stream.to_json())
async def delete(request):
db: Session = request.app["db"]
data_store: EventStore = request.app["event-store"]
# find the requested stream
if 'path' in request.query:
my_stream = folder.find_stream_by_path(request.query['path'], db,
stream_type=EventStream)
elif 'id' in request.query:
my_stream = db.get(EventStream, request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
await data_store.destroy(my_stream)
my_stream.folder.touch()
db.delete(my_stream)
db.commit()
return web.Response(text="ok")
# ----- data actions ----
async def write_events(request):
db: Session = request.app["db"]
event_store: EventStore = request.app["event-store"]
body = await request.json()
# find the requested stream
if 'path' in body:
my_stream = folder.find_stream_by_path(body['path'], db,
stream_type=EventStream)
elif 'id' in body:
my_stream = db.get(EventStream, body["id"])
else:
return web.Response(text="specify an id or a path!!", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
if 'events' not in body:
return web.Response(text="specify events to add", status=400)
events = await event_store.upsert(my_stream, body['events'])
return web.json_response(data={'count': len(events), 'events': events})
async def read_events(request):
db: Session = request.app["db"]
event_store: EventStore = request.app["event-store"]
# find the requested stream
if 'path' in request.query:
my_stream = folder.find_stream_by_path(request.query['path'], db,
stream_type=EventStream)
elif 'id' in request.query:
my_stream = db.get(EventStream, request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
# parse optional parameters
params = {'start': None, 'end': None, 'limit': None}
param = "" # to appease type checker
try:
for param in params:
if param in request.query:
params[param] = int(request.query[param])
except ValueError:
return web.Response(text="parameter [%s] must be an int" % param, status=400)
# make sure parameters make sense
if ((params['start'] is not None and params['end'] is not None) and
(params['start'] >= params['end'])):
return web.Response(text="[start] must be < [end]", status=400)
# handle json filter parameter
json_filter = None
if 'filter' in request.query and request.query['filter'] is not None and len(request.query['filter']) > 0:
try:
json_filter = json.loads(request.query['filter'])
# TODO verify syntax
except (json.decoder.JSONDecodeError, ValueError):
return web.Response(text="invalid filter parameter", status=400)
# handle limit parameter, default is HARD, do not return unless count < limit
if params['limit'] is not None and 'return-subset' not in request.query:
if params['limit'] <= 0:
return web.Response(text="[limit] must be > 0", status=400)
event_count = await event_store.count(my_stream, params['start'], params['end'], json_filter=json_filter)
if event_count > params['limit']:
# too many events, just send the count parameter
return web.json_response(data={'count': event_count, 'events': []})
# if return-subset, limit is SOFT, return just that many events
limit = None
if params['limit'] is not None and 'return-subset' in request.query:
if params['limit'] <= 0:
return web.Response(text="[limit] must be > 0", status=400)
limit = params['limit']
events = await event_store.extract(my_stream, params['start'], params['end'], limit=limit, json_filter=json_filter)
return web.json_response(data={'count': len(events), 'events': events})
async def remove_events(request):
db: Session = request.app["db"]
event_store: EventStore = request.app["event-store"]
# find the requested stream
if 'path' in request.query:
my_stream = folder.find_stream_by_path(request.query['path'], db,
stream_type=EventStream)
elif 'id' in request.query:
my_stream = db.get(EventStream, request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if my_stream is None:
return web.Response(text="stream does not exist", status=404)
# parse optional parameters
params = {'start': None, 'end': None}
param = "" # to appease type checker
try:
for param in params:
if param in request.query:
params[param] = int(request.query[param])
except ValueError:
return web.Response(text="parameter [%s] must be an int" % param, status=400)
# make sure parameters make sense
if ((params['start'] is not None and params['end'] is not None) and
(params['start'] >= params['end'])):
return web.Response(text="[start] must be < [end]", status=400)
# handle json filter parameter
json_filter = None
if 'filter' in request.query and request.query['filter'] is not None and len(request.query['filter']) > 0:
try:
json_filter = json.loads(request.query['filter'])
# TODO verify syntax
except (json.decoder.JSONDecodeError, ValueError):
return web.Response(text="invalid filter parameter", status=400)
await event_store.remove(my_stream, params['start'], params['end'], json_filter=json_filter)
return web.Response(text="ok") | PypiClean |
/LensFlare-0.0.1.tar.gz/LensFlare-0.0.1/README.md |
# LensFlare
LensFlare is an example package I created to help myself and others better understand neural networks. A lot of the code is based off work that I did in the [Coursera deeplearning.ai course](https://www.coursera.org/specializations/deep-learning)
An example work flow is shown below:
```python
import tensorflow as tf
from lensflare.classification import TfNNClassifier
from lensflare.util import load_moons_dataset
```
```python
X_train, y_train = load_moons_dataset()
```

```python
tf.reset_default_graph()
# layer_dims contains neural network structure parameters
layers_dims=[X_train.shape[0], 200, 80, 10, 1]
clf = TfNNClassifier(layers_dims=layers_dims,
optimizer="adam",
lambd=.05,
keep_prob=0.7,
num_epochs=5000)
clf.fit(X_train, y_train, seed=3)
y_pred_train = clf.transform(X_train, y_train)
```
Cost after epoch 0: 1.036825
Cost after epoch 1000: 0.108737
Cost after epoch 2000: 0.104837
Cost after epoch 3000: 0.106805
Cost after epoch 4000: 0.105311
INFO:tensorflow:Restoring parameters from results/model
Training Accuracy: 0.983333333333
```python
from lensflare.funcs.tf_funcs import plot_decision_boundary, predict_dec
# Plot decision boundary
predictions, X, dropout_var, sess = predict_dec()
model = lambda X_train: sess.run([predictions], feed_dict={X:X_train, dropout_var: 1.0});
plot_decision_boundary(model, X_train, y_train)
sess.close()
```
INFO:tensorflow:Restoring parameters from results/model

| PypiClean |
/Malas-1.0.210930.1.tar.gz/Malas-1.0.210930.1/malas/file_validator.py | import os
def command_type_validator(flag, input_data):
# Command type send_config_set and Device information list are treatable in the same manner
if (flag == "DEVICE") or (flag == "CONFIG_2"):
# Pass the value
return input_data
# Command type send_command only accepting one command
elif (flag == "CONFIG_1") and (len(input_data) == 1):
# Pass the value
return input_data[0].rstrip("\n")
# Command type send_command not accepting multiple commands
else:
print("FAIL: There are " + str(len(input_data)) + " commands found in the configuration file")
print("FAIL: Please use only one command for command type \'send_command\' and try again!")
# Repeat execute configuration_file_validator with the same flag and then pass the value
return configuration_file_validator(flag)
def file_validator(flag, input_file):
# File exist is accepted
if os.path.isfile(input_file) == True:
print("PASS: \'" + input_file + "\' found")
# Open the file with read permission
with open(input_file, "r") as file:
# Read the file from the beginning of the file
file.seek(0)
# Verify the configuration file's content according to the command type flag and then pass the value
return command_type_validator(flag, file.readlines())
# File not exist is not accepted
else:
print("FAIL: \'{}\' not found!".format(input_file))
if flag == "DEVICE":
# Repeat execute device_info_list_validator and then pass the value
return device_info_list_validator()
elif (flag == "CONFIG_1") or (flag == "CONFIG_2"):
# Repeat execute configuration_file_validator with the same flag and then pass the value
return configuration_file_validator(flag)
def device_info_list_validator():
# Prompt user for
device_info_list = input("\nEnter the path and the name of the device information list file: ")
# Verify the file's existence with a flag and then pass the value
return file_validator("DEVICE", device_info_list)
def configuration_file_validator(command):
# Flag the configuration file based on the command type
if command == "send_command":
flag = "CONFIG_1"
elif command == "send_config_set":
flag = "CONFIG_2"
# When configuration_file_validator executed by command_type_validator; Use the same flag
else:
flag = command
# Prompt user for
configuration_file = input("\nEnter the path and the name of the configuration file: ")
# Verify the file's existence with a flag and then pass the value
return file_validator(flag, configuration_file) | PypiClean |
/AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/_main.py | import json
import copy
import inspect
import warnings
from typing import Union, Dict
from collections import OrderedDict
from .utils import plot_convergences
from .utils import get_one_tpe_x_iter
from .utils import to_skopt_as_dict
from .utils import post_process_skopt_results
from .utils import to_skopt_space
from .utils import save_skopt_results
from .utils import Dimension
from .utils import plot_convergence
from ._space import Categorical, Real, Integer
from .utils import sort_x_iters, x_iter_for_tpe
from .utils import loss_histogram, plot_hyperparameters
from ai4water.utils.utils import JsonEncoder
from ai4water.utils.utils import clear_weights
from ai4water.utils.utils import jsonize, dateandtime_now
from ai4water.utils.visualizations import edf_plot
from ai4water.backend import hyperopt as _hyperopt
from ai4water.utils.utils import create_subplots
from ai4water.backend import np, pd, plt, os, sklearn, optuna, plotly, skopt, easy_mpl
GridSearchCV = sklearn.model_selection.GridSearchCV
RandomizedSearchCV = sklearn.model_selection.RandomizedSearchCV
ParameterGrid = sklearn.model_selection.ParameterGrid
ParameterSampler = sklearn.model_selection.ParameterSampler
bar_chart = easy_mpl.bar_chart
parallel_coordinates = easy_mpl.parallel_coordinates
if skopt is None:
pass
else:
Space = skopt.space.space.Space
#Dimension = skopt.space.space.Dimension
forest_minimize = skopt.forest_minimize
gp_minimize = skopt.gp_minimize
BayesSearchCV = skopt.BayesSearchCV
use_named_args = skopt.utils.use_named_args
from skopt.plots import plot_evaluations
if _hyperopt is not None:
space_eval = _hyperopt.space_eval
hp = _hyperopt.hp
miscs_to_idxs_vals = _hyperopt.base.miscs_to_idxs_vals
Apply = _hyperopt.pyll.base.Apply
fmin_hyperopt = _hyperopt.fmin
tpe = _hyperopt.tpe
STATUS_OK = _hyperopt.STATUS_OK
Trials = _hyperopt.Trials
rand = _hyperopt.rand
else:
space_eval, hp = None, None
miscs_to_idxs_vals = None
Apply = None
fmin_hyperopt = None
tpe = None
STATUS_OK = None
Trials = None
rand = None
if _hyperopt is not None:
try: # atpe is only available in later versions of hyperopt
atpe = _hyperopt.atpe
except AttributeError:
atpe = None
else:
atpe = None
if optuna is None:
plot_contour = None
else:
plot_contour = optuna.visualization.plot_contour
from ._fanova import fANOVA
# TODO RayTune libraries under the hood https://docs.ray.io/en/master/tune/api_docs/suggestion.html#summary
# TODO add generic algorithm, deap/pygad
# TODO skopt provides functions other than gp_minimize, see if they are useful and can be used.
# todo loading gpmin_results is not consistent.
SEP = os.sep
COUNTER = 0
ALGORITHMS = {
'bayes': {},
'bayes_rf': {'name': 'decision_tree', 'backend': ['skopt']},
'gbrt': {'name': 'gradient-boosted-tree regression', 'backend': ['skopt']},
'tpe': {'name': 'Tree of Parzen Estimators', 'backend': ['hyperopt', 'optuna']},
'atpe': {'name': 'Adaptive Tree of Parzen Estimators', 'backend': ['hyperopt']},
'random': {'name': 'random search', 'backend': ['sklearn', 'optuna', 'hyperopt']},
'grid': {'name': 'grid search', 'backend': ['sklearn', 'optuna']},
'cmaes': {'name': 'Covariance Matrix Adaptation Evolution Strategy', 'backend': ['optuna']}
}
class HyperOpt(object):
"""
The purpose of this class is to provide a uniform and simplifed interface to
use `hyperopt`, `optuna`, `scikit-optimize` and `scikit-learn` based hyperparameter
optimization methods. Ideally this class should provide all the functionalities of
beforementioned libaries with a uniform interface. It however also complements
these libraries by combining their functionalities and adding some additional
functionalities to them. On the other hand this class should not limit or
complicate the use of its underlying libraries. This means all the functionalities
of underlying libraries are available in this class as well. Moreover, you can
use this class just as you use one of its underlying library.
The purpose here is to make a class which allows application of any of the
available optimization methods on any type of model/classifier/regressor. If the
classifier/regressor is of sklearn-based, then for random search, we use
RanddomSearchCV_, for grid search, we use GridSearchCV_ and for Bayesian, we
use BayesSearchCV_ . On the other hand, if the model is not sklearn-based, you
will still be able to implement any of the three methods. In such case, the
bayesian_ will be implemented using `gp_minimize`. Random search and grid search
will be done by simple iterating over the sample space generated as in sklearn
based samplers. However, the post-processing of the results is (supposed to be)
done same as is done in RandomSearchCV and GridSearchCV.
The class is expected to pass all the tests written in sklearn or skopt for
corresponding classes.
For detailed use of this class see this `hpo_tutorial`_
Attributes
--------------
- results dict:
- gpmin_results dict:
- skopt_results :
- hp_space :
- space
- skopt_space :
- space dict:
- title str: name of the folder in which all results will be saved. By
default this is same as name of `algorithm`. For `AI4Water` based
models, this is more detailed, containing problem type etc.
Methods
-------
- eval_with_best: evaluates the objective_fn on best parameters
- best_paras(): returns the best parameters from optimization.
The following examples illustrate how we can uniformly apply different optimization algorithms.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.hyperopt import HyperOpt, Categorical, Integer, Real
>>> from ai4water.datasets import busan_beach
>>> from SeqMetrics import RegressionMetrics
>>> data = busan_beach()
>>> input_features = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>> output_features = ['tetx_coppml']
We have to define an objective function which will take keyword arguments
and return a scaler value as output. This scaler value will be minized during optimzation
>>> def objective_fn(**suggestion)->float:
... # the objective function must receive new parameters as keyword arguments
... model = Model(
... input_features=input_features,
... output_features=output_features,
... model={"XGBRegressor": suggestion},
... verbosity=0)
...
... model.fit(data=data)
...
... t, p = model.predict(return_true=True)
... mse = RegressionMetrics(t, p).mse()
... # the objective function must return a scaler value which needs to be minimized
... return mse
Define search space
The search splace determines pool from which parameters are chosen during optimization.
>>> num_samples=5 # only relavent for random and grid search
>>> search_space = [
... Categorical(['gbtree', 'dart'], name='booster'),
... Integer(low=1000, high=2000, name='n_estimators', num_samples=num_samples),
... Real(low=1.0e-5, high=0.1, name='learning_rate', num_samples=num_samples)
... ]
... # Using Baysian with gaussian processes
>>> optimizer = HyperOpt('bayes', objective_fn=objective_fn, param_space=search_space,
... num_iterations=num_iterations )
>>> optimizer.fit()
Using TPE with optuna
>>> num_iterations = 10
>>> optimizer = HyperOpt('tpe', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using cmaes with optuna
>>> optimizer = HyperOpt('cmaes', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using random with optuna, we can also try hyperopt and sklearn as backend for random algorithm
>>> optimizer = HyperOpt('random', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using TPE of hyperopt
>>> optimizer = HyperOpt('tpe', objective_fn=objective_fn, param_space=search_space,
... backend='hyperopt',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using grid with sklearn
>>> optimizer = HyperOpt('grid', objective_fn=objective_fn, param_space=search_space,
... backend='sklearn',
... num_iterations=num_iterations )
>>> optimizer.fit()
.. _hpo_tutorial:
https://ai4water-examples.readthedocs.io/en/latest/auto_examples/index.html#hyperparameter-optimization
.. _GridSearchCV:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
.. _RanddomSearchCV:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
.. _BayesSearchCV:
https://scikit-optimize.github.io/stable/modules/generated/skopt.BayesSearchCV.html
.. _bayesian:
https://github.com/scikit-optimize/scikit-optimize/blob/9334d50a1ad5c9f7c013a1c1cb95313a54b83168/examples/bayesian-optimization.py#L109
"""
def __init__(
self,
algorithm: str, *,
param_space,
objective_fn,
eval_on_best: bool = False,
backend: str = None,
opt_path: str = None,
process_results: bool = True,
verbosity: int = 1,
**kwargs
):
"""
Initializes the class
Parameters
----------
algorithm : str
must be one of ``random``, ``grid``, ``bayes``, ``bayes_rf``, and ``tpe``, defining which
optimization algorithm to use.
objective_fn : callable
Any callable function whose returned value is to be minimized.
It can also be either sklearn/xgboost based regressor/classifier.
param_space : list, dict
the search space of parameters to be optimized. We recommend the use
of Real, Integer and categorical classes from [ai4water.hyperopt][ai4water.hyperopt.Integer]
(not from skopt.space). These classes allow a uniform way of defining
the parameter space for all the underlying libraries. However, to
make this class work exactly similar to its underlying libraries,
the user can also define parameter space as is defined in its
underlying libraries. For example, for hyperopt based method like
'tpe' the parameter space can be specified as in the examples of
hyperopt library. In case the code breaks, please report.
eval_on_best : bool, optional
if True, then after optimization, the objective_fn will
be evaluated on best parameters and the results will be stored in the
folder named "best" inside `title` folder.
opt_path :
path to save the results
backend : str, optional
Defines which backend library to use for the `algorithm`. For
example the user can specify whether to use `optuna` or `hyper_opt`
or `sklearn` for `grid` algorithm.
verbosity : bool, optional
determines amount of information being printed
**kwargs :
Any additional keyword arguments will for the underlying optimization
algorithm. In case of using AI4Water model, these must be arguments
which are passed to AI4Water's Model class.
"""
if algorithm not in ALGORITHMS:
raise ValueError(f"""Invalid value of algorithm provided. Allowd values for algorithm"
are {list(ALGORITHMS.keys())}.
You provided {algorithm}""")
self.objective_fn = objective_fn
self.algorithm = algorithm
self.backend = backend
self.param_space = param_space
self.original_space = param_space # todo self.space and self.param_space should be combined.
self.title = self.algorithm
self.results = OrderedDict() # internally stored results
self.gpmin_results = None #
self.data = None
self.eval_on_best = eval_on_best
self.opt_path = opt_path
self._process_results = process_results
self.objective_fn_is_dl = False
self.verbosity = verbosity
self.gpmin_args = self.check_args(**kwargs)
if self.use_sklearn:
if self.algorithm == "random":
self.optfn = RandomizedSearchCV(estimator=objective_fn, param_distributions=param_space, **kwargs)
else:
self.optfn = GridSearchCV(estimator=objective_fn, param_grid=param_space, **kwargs)
elif self.use_skopt_bayes:
self.optfn = BayesSearchCV(estimator=objective_fn, search_spaces=param_space, **kwargs)
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, x):
if x is not None:
assert x in ['optuna', 'hyperopt', 'sklearn', 'skopt'], f"""
Backend must be one of hyperopt, optuna or sklearn but is is {x}"""
if self.algorithm == 'tpe':
if x is None:
x = 'optuna'
assert x in ['optuna', 'hyperopt']
elif self.algorithm == 'cmaes':
if x is None:
x = 'optuna'
assert x == 'optuna'
elif self.algorithm == 'atpe':
if x is None:
x = 'hyperopt'
assert x == 'hyperopt'
elif self.algorithm == 'random':
if x is None:
x = 'sklearn'
assert x in ['optuna', 'hyperopt', 'sklearn']
elif self.algorithm == 'grid':
if x is None:
x = 'sklearn'
assert x in ['sklearn', 'optuna']
elif self.algorithm in ['bayes', "bayes_rf"]:
if x is None:
x = 'skopt'
else:
raise ValueError
if x == 'hyperopt' and _hyperopt is None:
raise ValueError(f"You must install `hyperopt` to use it as backend for {self.algorithm} algorithm.")
if x == 'optuna' and optuna is None:
raise ValueError(f"You must install optuna to use `optuna` as backend for {self.algorithm} algorithm")
self._backend = x
@property
def title(self):
return self._title
@title.setter
def title(self, x):
self._title = x + '_' + str(dateandtime_now())
@property
def objective_fn_is_dl(self):
return self._objective_fn_is_dl
@objective_fn_is_dl.setter
def objective_fn_is_dl(self, x):
self._objective_fn_is_dl = x
def check_args(self, **kwargs):
kwargs = copy.deepcopy(kwargs)
if 'n_initial_points' in kwargs:
if int(''.join(skopt.__version__.split('.')[1])) < 8:
raise ValueError(f"""
'n_initial_points' argument is not available in skopt version < 0.8.
However you are using skopt version {skopt.__version__} .
See https://scikit-optimize.github.io/stable/modules/generated/skopt.gp_minimize.html#skopt.gp_minimize
for more details.
""""")
if 'x0' in kwargs and self.algorithm in ['tpe', 'atpe', 'random', 'grid', 'cmaes']:
kwargs.pop('x0')
return kwargs
def __getattr__(self, item):
# TODO, not sure if this is the best way but venturing since it is done by the legend
# here https://github.com/philipperemy/n-beats/blob/master/nbeats_keras/model.py#L166
# Since it was not possible to inherit this class from BaseSearchCV and BayesSearchCV at the same time, this
# hack makes sure that all the functionalities of GridSearchCV, RandomizeSearchCV and BayesSearchCV are also
# available with class.
if self.use_sklearn or self.use_skopt_bayes:
return getattr(self.optfn, item)
else:
raise AttributeError(f"HyperOpt does not have attribute {item}")
@property
def param_space(self):
return self._param_space
@param_space.setter
def param_space(self, x):
if self.algorithm in ["bayes", "bayes_rf"]:
assert Dimension is not None, f"you must have scikit-optimize installed to use {self.algorithm}."
if isinstance(x, dict):
_param_space = []
for k, v in x.items():
assert isinstance(v, Dimension), f"""
space for parameter {k} is of invalid type {v.__class__.__name__}.
For {self.algorithm}, it must be of type {Dimension.__name__}
"""
_param_space.append(v)
else:
assert isinstance(x, list), f"""
param space must be list of parameters but it is of type
{x.__class__.__name__}"""
for space in x:
# each element in the list can be a tuple of lower and and upper bounds
if not isinstance(space, tuple):
assert isinstance(space, Dimension), f"""
param space must be one of Integer, Real or Categorical
but it is of type {space.__class__.__name__}"""
_param_space = x
elif self.algorithm in ["random", "grid"] and self.backend != 'optuna':
# todo, do we also need to provide grid of sample space for random??
if isinstance(x, dict):
_param_space = x
elif isinstance(x, list):
_param_space = {}
for _space in x:
assert isinstance(_space, Dimension)
_param_space[_space.name] = _space.grid
else:
raise ValueError
elif self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
if isinstance(x, list):
# space is provided as list. Either all of them must be hp.space or Dimension.
if isinstance(x[0], Dimension):
_param_space = {}
for space in x:
assert isinstance(space, Dimension)
_param_space[space.name] = space.as_hp()
elif isinstance(x[0], Apply):
_param_space = []
for space in x:
assert isinstance(space, Apply), f"""invalid space type {space.__class__.__name__}"""
_param_space.append(space)
else:
raise NotImplementedError
elif isinstance(x, Dimension): # for single hyper-parameter optimization ?
_param_space = x.as_hp()
else:
_param_space = x
elif self.backend == 'optuna':
if isinstance(x, list):
_param_space = {}
for s in x:
assert isinstance(s, Dimension)
_param_space[s.name] = s
elif isinstance(x, dict):
assert all([isinstance(s, Dimension) for s in x.values()])
_param_space = x
else:
raise NotImplementedError(f"unknown type of space {x.__class__.__name__}")
else:
raise ValueError
self._param_space = _param_space
def skopt_space(self):
"""Tries to make skopt compatible Space object. If unsuccessful, return None"""
return to_skopt_space(self.original_space)
def space(self) -> dict:
"""Returns a skopt compatible space but as dictionary"""
return to_skopt_as_dict(self.algorithm, self.backend, self.original_space)
@property
def use_sklearn(self):
# will return True if we are to use sklearn's GridSearchCV or RandomSearchCV
if self.algorithm in ["random", "grid"] and "sklearn" in str(type(self.objective_fn)):
return True
return False
@property
def use_skopt_bayes(self):
# will return true if we have to use skopt based BayesSearchCV
if self.algorithm in ["bayes", "bayes_rf"] and "sklearn" in str(type(self.objective_fn)):
assert not self.use_sklearn
return True
return False
@property
def use_skopt_gpmin(self):
# will return True if we have to use skopt based gp_minimize function. This is to implement Bayesian on
# non-sklearn based models
if self.algorithm in ["bayes", "bayes_rf"] and "sklearn" not in str(type(self.objective_fn)):
assert not self.use_sklearn
assert not self.use_skopt_bayes
return True
return False
@property
def use_tpe(self):
if self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
return True
else:
return False
@property
def use_own(self):
# return True, we have to build our own optimization method.
if not self.use_sklearn and not self.use_skopt_bayes and not self.use_skopt_gpmin:
return True
return False
@property
def random_state(self):
if "random_state" not in self.gpmin_args:
return np.random.RandomState(313)
else:
return np.random.RandomState(self.gpmin_args['random_state'])
@property
def num_iterations(self):
if self.backend == 'sklearn' and self.algorithm == 'grid':
self.gpmin_args['num_iterations'] = len(ParameterGrid(self.param_space))
if 'num_iterations' in self.gpmin_args:
return self.gpmin_args['num_iterations']
if self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
return self.gpmin_args.get('max_evals', 9223372036854775807)
if self.backend == 'optuna':
return self.gpmin_args.get('n_trials', None) # default value of n_trials is None in study.optimize()
if 'n_calls' in self.gpmin_args:
return self.gpmin_args['n_calls']
return self.gpmin_args['n_iter']
@property
def use_named_args(self):
argspec = inspect.getfullargspec(self.objective_fn)
if argspec.varkw is None:
return False
elif isinstance(argspec.varkw, str):
return True
else:
raise NotImplementedError
@property
def opt_path(self):
return self._opt_path
@opt_path.setter
def opt_path(self, path):
if path is None:
path = os.path.join(os.getcwd(), f"results{SEP}" + self.title)
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.exists(path):
os.makedirs(path)
self._opt_path = path
def best_paras(self, as_list=False) -> Union[list, dict]:
# returns best parameters either as dictionary or as list
if self.use_skopt_gpmin:
xys = self.xy_of_iterations()
paras = xys[self.best_iter()]['x']
elif self.backend == 'hyperopt':
d = get_one_tpe_x_iter(self.trials.best_trial['misc']['vals'], self.hp_space())
if as_list:
return list(d.values())
else:
return d
elif self.backend == 'optuna':
if as_list:
return list(self.study.best_trial.params.values())
return self.study.best_trial.params
elif self.use_skopt_bayes or self.use_sklearn:
# using BayesSerchCV
paras = self.optfn.best_params_
else:
paras = sort_x_iters(self.results[self.best_iter()]['x'], list(self.param_space.keys()))
if as_list:
return list(paras.values())
return paras
def fit(self, *args, **kwargs):
"""Makes and calls the underlying fit method
parameters
----------
**kwargs :
any keyword arguments for the userdefined objective function
Example
-------
>>> def objective_fn(a=2, b=5, **suggestions)->float:
... # do something e.g calcualte validation score
>>> val_score = 2.0
>>> return val_score
"""
if self.use_sklearn or self.use_skopt_bayes:
fit_fn = self.optfn.fit
elif self.use_skopt_gpmin:
fit_fn = self.own_fit
elif self.use_own:
self.predict = self._predict
if self.algorithm == "grid" and self.backend != 'optuna':
fit_fn = self.grid_search
elif self.algorithm == 'random' and self.backend not in ['optuna', 'hyperopt']:
fit_fn = self.random_search
elif self.backend == 'hyperopt':
fit_fn = self.fmin
elif self.backend == 'optuna':
fit_fn = self.optuna_objective
else:
raise NotImplementedError
else:
raise NotImplementedError(f"""No fit function found for algorithm {self.algorithm}
with backend {self.backend}""")
res = fit_fn(*args, **kwargs)
serialized = self.serialize()
fname = os.path.join(self.opt_path, 'serialized.json')
with open(fname, 'w') as fp:
json.dump(serialized, fp, sort_keys=True, indent=4, cls=JsonEncoder)
return res
def original_para_order(self):
if isinstance(self.param_space, dict):
return list(self.param_space.keys())
elif self.skopt_space() is not None:
names = []
for s in self.skopt_space():
names.append(s.name)
return names
else:
raise NotImplementedError
def dims(self):
# this will be used for gp_minimize
return list(self.param_space)
def model_for_gpmin(self, **kws):
"""
This function can be called in two cases
- The user has made its own objective_fn.
- We make objective_fn using AI4Water and return the error.
In first case, we just return what user has provided.
"""
if callable(self.objective_fn) and not self.use_named_args:
# external function for bayesian but this function does not require named args.
return self.objective_fn
dims = self.dims()
if self.use_named_args:
# external function and this function accepts named args.
@use_named_args(dimensions=dims)
def fitness(**kwargs):
return self.objective_fn(**kwargs, **kws)
return fitness
raise ValueError(f"used named args is {self.use_named_args}")
def own_fit(self, **kws):
"""kws are the keyword arguments to user objective function
by the user
"""
if self.algorithm == "bayes":
minimize_func = gp_minimize
else: # bayes_rf
minimize_func = forest_minimize
kwargs = self.gpmin_args
if 'num_iterations' in kwargs:
kwargs['n_calls'] = kwargs.pop('num_iterations')
try:
search_result = minimize_func(
func=self.model_for_gpmin(**kws),
dimensions=self.dims(),
**kwargs)
except ValueError as e:
if int(''.join(sklearn.__version__.split('.')[1])) > 22:
raise ValueError(f"""
For bayesian optimization, If your sklearn version is above 0.23,
then this error may be related to
https://github.com/kiudee/bayes-skopt/issues/90 .
Try to lower the sklearn version to 0.22 and run again.
{e}
""")
else:
raise ValueError(e)
# the `space` in search_results may not be in same order as originally provided.
space = search_result['space']
if space.__dict__.__len__() > 1:
ordered_sapce = OrderedDict()
for k in self.space().keys():
ordered_sapce[k] = [s for s in space if s.name == k][0]
search_result['space'] = Space(ordered_sapce.values())
self.gpmin_results = search_result
if len(self.results) < 1:
fv = search_result.func_vals
xiters = search_result.x_iters
for idx, y, x in zip(range(len(fv)), fv, xiters):
self.results[idx] = {'y': y, 'x': x}
if self._process_results:
post_process_skopt_results(search_result, self.results,
self.opt_path, rename=True)
if len(search_result.func_vals)<=100 and self.algorithm != "bayes_rf":
save_skopt_results(search_result, self.opt_path)
self.process_results()
if self.eval_on_best:
self.eval_with_best()
return search_result
def save_results(self, results, path:str = None):
"""
saves the hpo results so that they can be loaded
using load_results method.
parameters
----------
results :
hpo results i.e. output of optimizer.fit()
path :
path where to save the results
"""
assert self.algorithm == "bayes"
if path is None:
path = self.opt_path
save_skopt_results(results, path)
return
def eval_sequence(self, params, **kwargs):
""""
kwargs :
any additional keyword arguments for objective_fn
"""
if self.verbosity > 0:
print(f"total number of iterations: {len(params)}")
for idx, para in enumerate(params):
if self.use_named_args: # objective_fn is external but uses kwargs
err = self.objective_fn(**para, **kwargs)
else: # objective_fn is external and does not uses keywork arguments
try:
err = self.objective_fn(*list(para.values()), **kwargs)
except TypeError:
raise TypeError(f"""
use_named_args argument is set to {self.use_named_args}. If your
objective function takes key word arguments, make sure that
this argument is set to True during initiatiation of HyperOpt.""")
err = round(err, 8)
self.results[idx] = {'y':err, 'x':sort_x_iters(para, self.original_para_order())}
if self._process_results:
clear_weights(self.opt_path, self.results, rename=True)
self.process_results()
if self.eval_on_best:
self.eval_with_best()
return self.results
def grid_search(self, **kwargs):
params = list(ParameterGrid(self.param_space))
self.param_grid = params
return self.eval_sequence(params, **kwargs)
def random_search(self, **kwargs):
"""
objective function that will used during random search method.
parameters
----------
kwargs :
keyword arguments in the user defined objective function.
"""
for k, v in self.param_space.items():
if v is None:
grid = self.space()[k].grid
assert grid is not None, f"""grid for parameter {k} could not be created. Inferred grid is
{grid}. Please either provide the `num_samples` parameter while creating space or explicitly
provide grid for {k}"""
param_list = list(ParameterSampler(self.param_space, n_iter=self.num_iterations,
random_state=self.random_state))
if len(param_list) < self.num_iterations:
# we need to correct it so that num_iterations gets calculated correctly next time
self.gpmin_args['n_calls'] = len(param_list)
self.gpmin_args['n_iter'] = len(param_list)
self.param_grid = param_list
return self.eval_sequence(param_list, **kwargs)
def optuna_objective(self, **kwargs):
"""
objective function that will used during random search method.
parameters
----------
kwargs :
keyword arguments in the user defined objective function.
"""
if self.verbosity == 0:
optuna.logging.set_verbosity(optuna.logging.ERROR)
sampler = {
'tpe': optuna.samplers.TPESampler,
'cmaes': optuna.samplers.CmaEsSampler,
'random': optuna.samplers.RandomSampler,
'grid': optuna.samplers.GridSampler
}
def objective(trial):
suggestion = {}
for space_name, _space in self.param_space.items():
suggestion[space_name] = _space.suggest(trial)
return self.objective_fn(**suggestion, **kwargs)
if self.algorithm in ['tpe', 'cmaes', 'random']:
study = optuna.create_study(direction='minimize', sampler=sampler[self.algorithm]())
else:
space = {s.name: s.grid for s in self.skopt_space()}
study = optuna.create_study(sampler=sampler[self.algorithm](space))
study.optimize(objective, n_trials=self.num_iterations)
setattr(self, 'study', study)
if self._process_results:
self.process_results()
return study
def fmin(self, **kwargs):
suggest_options = {
'tpe': tpe.suggest,
'random': rand.suggest
}
if atpe is not None:
suggest_options.update({'atpe': atpe.suggest})
trials = Trials()
model_kws = self.gpmin_args
if 'num_iterations' in model_kws:
model_kws['max_evals'] = model_kws.pop('num_iterations')
space = self.hp_space()
if self.use_named_args:
def objective_fn(kws):
# the objective function in hyperopt library receives a dictionary
return self.objective_fn(**kws)
objective_f = objective_fn
else:
objective_f = self.objective_fn
if len(self.space()) > 1:
space = list(self.hp_space().values())
elif len(self.space()) == 1:
space = list(self.hp_space().values())[0]
else:
raise NotImplementedError
best = fmin_hyperopt(objective_f,
space=space,
algo=suggest_options[self.algorithm],
trials=trials,
**kwargs,
**model_kws)
with open(os.path.join(self.opt_path, 'trials.json'), "w") as fp:
json.dump(jsonize(trials.trials), fp, sort_keys=True, indent=4, cls=JsonEncoder)
setattr(self, 'trials', trials)
# self.results = trials.results
if self._process_results:
self.process_results()
return best
def _predict(self, *args, **params):
if self.use_named_args:
return self.objective_fn(**params)
if callable(self.objective_fn) and not self.use_named_args:
return self.objective_fn(*args)
def hp_space(self) -> dict:
"""returns a dictionary whose values are hyperopt equivalent space instances."""
return {k: v.as_hp(False if self.algorithm == 'atpe' else True) for k,v in self.space().items()}
def xy_of_iterations(self) -> Dict[int,Dict[str, Union[str, dict]]]:
"""returns a dictionary whose keys are iteration numbers are values are xy parirs
at those iterations.
Returns
Dict[int, Dict[str, [dict,float]]]
"""
if self.backend == "optuna":
num_iters = range(self.num_iterations)
results = {}
for idx, trial in zip(num_iters, self.study.trials):
results[idx] = {'y': trial.value, 'x': trial.params}
return results
elif self.backend == "hyperopt":
return x_iter_for_tpe(self.trials, self.hp_space(), as_list=False)
elif self.backend == 'skopt':
if self.use_skopt_bayes:
fv = self.optfn.cv_results_['mean_test_score']
xiters = self.optfn.cv_results_['params']
else:
assert self.gpmin_results is not None, f"gpmin_results is not populated yet"
fv = self.gpmin_results['func_vals']
xiters = self.gpmin_results['x_iters']
results = {}
for idx, y, x in zip(range(len(fv)), fv, xiters):
results[idx] = {'y': y, 'x': self.to_kw(x)}
return results
else:
# for sklearn based
return self.results
def func_vals(self)->np.ndarray:
"""returns the value of objective function at each iteration."""
if self.backend == 'hyperopt':
return np.array([self.trials.results[i]['loss'] for i in range(self.num_iterations)])
elif self.backend == 'optuna':
return np.array([s.values for s in self.study.trials])
elif self.use_skopt_bayes or self.use_sklearn:
return self.optfn.cv_results_['mean_test_score']
else:
return np.array([v['y'] for v in self.results.values()])
def skopt_results(self):
class OptimizeResult:
x_iters = [list(s['x'].values()) for s in self.xy_of_iterations().values()]
func_vals = self.func_vals()
space = self.skopt_space()
if isinstance(self.best_paras(), list):
x = self.best_paras
elif isinstance(self.best_paras(), dict):
x = list(self.best_paras().values())
else:
raise NotImplementedError
return OptimizeResult()
def best_iter(self)->int:
"""returns the iteration on which best/optimized parameters are obtained.
The indexing starts from 0.
"""
return np.nanargmin(self.func_vals()).item()
def best_xy(self) -> dict:
"""Returns best (optimized) parameters as dictionary.
The dictionary has two keys ``x`` and ``y``. ``x`` is the
best hyperparameters while `y` is the corresponding objective function value.
"""
d = self.xy_of_iterations()
key = list(d.keys())[self.best_iter()]
return d[key]
def _plot_edf(self, save=True, show=False, **kwargs):
"""empirical CDF of objective function"""
plt.close("all")
y = np.array(list(self.xy_of_iterations().keys())).astype("float64")
edf_plot(y, show=show, **kwargs)
if save:
plt.savefig(os.path.join(self.opt_path, "edf"))
return
def plot_parallel_coords(self, save=True, show=False, **kwargs):
""" parallel coordinates of hyperparameters
Parameters
-----------
save : bool, default=True
show : bool, default=False
**kwargs :
any keyword arguments for easy_mpl.parallel_coordinates
"""
d = self.xy_of_iterations()
data = pd.DataFrame([list(v['x'].values()) for v in d.values()],
columns=[s for s in self.space()])
categories = np.array(list(self.xy_of_iterations().keys())).astype("float64")
_kws = dict(coord_title_kws=dict(rotation=10, fontsize=12))
if kwargs is not None:
_kws.update(kwargs)
parallel_coordinates(
data=data,
categories=categories,
title="Hyperparameters",
show=False,
**_kws
)
if save:
fname = os.path.join(self.opt_path, "parallel_coordinates")
plt.savefig(fname, dpi=500, bbox_inches="tight")
if show:
plt.show()
return
def _plot_evaluations(self, save=True):
plt.close('all')
plot_evaluations(self.skopt_results(), dimensions=self.best_paras(as_list=True))
if save:
plt.savefig(os.path.join(self.opt_path, "evaluations.png"),
dpi=300,
bbox_inches='tight')
return
def _plot_convergence(self,
original:bool=False,
ax = None,
save=True,
show=False,
**kwargs):
plt.close('all')
if original:
ax = easy_mpl.plot(self.func_vals(),
marker=".",
markersize= 12,
lw= 2,
ax_kws=dict(xlabel="Number of calls $n$",
ylabel=r"$\min f(x)$ after $n$ calls",
grid=True),
show=False,
**kwargs)
else:
ax = plot_convergence(self.func_vals(), ax=ax, show=False, **kwargs)
if save:
fname = os.path.join(self.opt_path, "convergence.png")
plt.savefig(fname, dpi=300, bbox_inches='tight')
if show:
plt.show()
return ax
def process_results(self, show=False):
"""post processing of results"""
self.save_iterations_as_xy()
self.plot_parallel_coords()
# deep learning related results
if self.objective_fn_is_dl:
plot_convergences(
self.opt_path,
what='val_loss',
ylabel='Validation MSE')
plot_convergences(
self.opt_path,
what='loss',
ylabel='MSE',
leg_pos="upper right")
self._plot_edf()
# distributions/historgrams of explored hyperparameters
self._plot_distributions(show=show)
# convergence plot,
#if sr.x_iters is not None and self.backend != "skopt": # todo
self._plot_convergence(show=show)
# plot of hyperparameter space as explored by the optimizer
if self.backend != 'skopt' and len(self.space()) < 20 and skopt is not None:
self._plot_evaluations()
if len(self.best_paras(True))>1:
plt.close('all')
try:
self.plot_importance()
plt.close('all')
self.plot_importance(plot_type="bar", show=show)
except (RuntimeError, AttributeError):
warnings.warn(f"Error encountered during fanova calculation")
if self.backend == 'hyperopt':
loss_histogram([y for y in self.trials.losses()],
save=True,
fname=os.path.join(self.opt_path, "loss_histogram.png")
)
plot_hyperparameters(
self.trials,
fname=os.path.join(self.opt_path, "hyperparameters.png"),
save=True)
if plotly is not None:
if self.backend == 'optuna':
fig = plot_contour(self.study)
plotly.offline.plot(fig, filename=os.path.join(self.opt_path, 'contours.html'),
auto_open=False)
return
def plot_importance(
self,
save=True,
show:bool=False,
plot_type="box",
with_optuna:bool = False,
**tree_kws
)->plt.Axes:
"""plots hyperparameter importance using fANOVA"""
if with_optuna:
return self._calc_importance_with_optuna(plot_type, save=save, show=show)
X = pd.DataFrame([list(iter_xy['x'].values()) for iter_xy in self.xy_of_iterations().values()])
Y = np.array([iter_xy['y'] for iter_xy in self.xy_of_iterations().values()])
X.columns = list(self.xy_of_iterations()[0]['x'].keys())
dtypes = [space.__class__.__name__ for space in self.skopt_space()]
bounds = [(space.low, space.high) if isinstance(space, (Real, Integer)) else None for space in self.skopt_space()]
kws = {'X': X, 'Y': Y, 'dtypes': dtypes, 'bounds': bounds}
kws.update(tree_kws)
if plot_type == "bar":
try:
importance = fANOVA(**kws).feature_importance()
except (AttributeError, RuntimeError):
raise ValueError(f"Error encountered during fANOVA, try setting `with_optuna` to True")
ax = self._plot_importance_as_barchart(importance, save=save)
else:
try:
mean, std = fANOVA(**kws).feature_importance(return_raw=True)
except (AttributeError, RuntimeError):
raise ValueError(f"Error encountered during fANOVA, try setting `with_optuna` to True")
ax = self._plot_importance_as_boxplot(mean, std, save)
if show:
plt.show()
return ax
def _plot_importance_as_boxplot(self, mean, std, save:bool=False):
df = pd.DataFrame([mean, std])
plt.close('all')
ax = df.boxplot(rot=70, return_type="axes")
ax.set_ylabel("Relative Importance")
if save:
plt.savefig(os.path.join(
self.opt_path,
"fanova_importance_hist.png"),
dpi=300,
bbox_inches='tight')
fname = "fanova_importances_raw.json"
with open(os.path.join(self.opt_path, fname), 'w') as fp:
json.dump(jsonize(df.to_dict()), fp, indent=4, sort_keys=True)
return ax
def _plot_importance_as_barchart(self, importance, save=False):
df = pd.DataFrame.from_dict(importance, orient='index')
ax = bar_chart(df, orient='h', show=False,
ax_kws={'title': "fANOVA hyperparameter importance",
'xlabel': "Relative Importance"})
fname = "fanova_importances.json"
if save:
plt.savefig(os.path.join(self.opt_path, 'fanova_importance_bar.png'),
bbox_inches="tight", dpi=300)
with open(os.path.join(self.opt_path, fname), 'w') as fp:
json.dump(jsonize(df.to_dict()), fp, indent=4, sort_keys=True)
return ax
def _calc_importance_with_optuna(self, plot_type="bar", save=False, show=True):
# todo, it is plotting both bar_chart and boxplot on same axes
from ._optuna_fanova import plot_param_importances
importances, importance_paras, ax = plot_param_importances(self.optuna_study())
if plot_type == "bar":
if save:
plt.savefig(os.path.join(self.opt_path, 'fanova_importance_bar.png'),
bbox_inches="tight", dpi=300)
else:
plt.close('all') # because bar chart has already been drawn
df = pd.DataFrame.from_dict(importance_paras)
ax = df.boxplot(rot=70, return_type="axes")
ax.set_ylabel("Relative Importance")
if save:
plt.savefig(os.path.join(
self.opt_path,
"fanova_importance_hist.png"),
dpi=300,
bbox_inches='tight')
with open(os.path.join(self.opt_path, "importances.json"), 'w') as fp:
json.dump(importances, fp, indent=4, sort_keys=True, cls=JsonEncoder)
with open(os.path.join(self.opt_path, "fanova_importances.json"), 'w') as fp:
json.dump(importance_paras, fp, indent=4, sort_keys=True, cls=JsonEncoder)
if show:
plt.show()
return ax
def optuna_study(self):
"""
Attempts to create an optuna Study instance so that
optuna based plots can be generated.
Returns
None, if not possible else Study
"""
from optuna.study import Study
from optuna.trial import TrialState
if self.backend == 'optuna':
return self.study
class _Trial:
state = TrialState.COMPLETE
def __init__(self,
number:int,
values:Union[list, int, float],
params:dict,
distributions:dict):
values = jsonize(values)
self._number = number
self._values = values
if isinstance(values, list):
assert len(values) == 1
self.value = values[0]
elif isinstance(values, float) or isinstance(values, int):
self.value = values
else:
try: # try to convert it to float if possible
self.value = float(values)
except Exception as e:
raise NotImplementedError(f"""
values must be convertible to list but it is {values} of type
{values.__class__.__name__} Actual error message was {e}""")
self.params = params
self._distributions = distributions
self.distributions = distributions
XY_OF_ITERATIONS = self.xy_of_iterations()
SPACE = self.space()
BEST_PARAS = self.best_paras()
class _Study(Study):
trials = []
idx = 0
distributions = {sn: s.to_optuna() for sn, s in SPACE.items()}
for xy in XY_OF_ITERATIONS.values():
_x, _y = xy['x'], xy['y']
assert isinstance(_x, dict), f"""
params must of type dict but provided params are of type
{_x.__class__.__name__}"""
trials.append(_Trial(number=idx,
values=_y,
params=_x,
distributions=distributions
))
idx += 1
best_params = BEST_PARAS
best_trial = None
best_value = None
_study_id = 0
_distributions = distributions
def __init__(self):
pass
def _is_multi_objective(self):
return False
study = _Study()
setattr(self, 'study', study)
return study
def _plot_distributions(self, save=True, show=True, figsize=None)->plt.Figure:
"""plot distributions of explored hyperparameters"""
# name of hyperparameters
h_paras = list(self.best_xy()['x'].keys())
# container with a list for each hyperparameter
h_para_lists = {k: [] for k in h_paras}
for xy in self.xy_of_iterations().values():
#score = xy['y']
x_iter = xy['x']
for para, val in x_iter.items():
h_para_lists[para].append(val)
figsize = figsize or (6+len(h_paras), 6+len(h_paras))
fig, axes = create_subplots(naxes=len(h_paras),
figsize=figsize)
if not isinstance(axes, np.ndarray):
axes = np.array([axes])
for ax, col in zip(axes.flat, h_paras):
labels, bins = np.unique(np.array(h_para_lists[col]), return_counts=True)
if isinstance(self.space()[col], Real):
labels = [round(label, 3) for label in labels]
bar_chart(bins, labels, orient="v", ax=ax, rotation=90, label=col,
show=False)
ax.set_ylabel("Number of iterations")
ax.legend()
if save:
fname = os.path.join(self.opt_path, "distributions.png")
plt.savefig(fname, bbox_inches="tight")
if show:
plt.show()
return fig
def to_kw(self, x):
names = []
if isinstance(self.space(), dict):
for key in self.space().keys():
names.append(key)
else:
raise NotImplementedError
xkv = {}
if names is not None:
for name, val in zip(names, x):
xkv[name] = val
else:
xkv = x
return xkv
def eval_with_best(self):
"""
Find the best parameters and evaluate the objective_fn with them.
Arguments:
return_model bool: If True, then then the built objective_fn will be returned
"""
if self.use_named_args:
x = self.best_paras()
else:
x = self.best_paras(True)
if self.use_named_args:
return self.objective_fn(**x)
if callable(self.objective_fn) and not self.use_named_args:
if isinstance(x, list) and self.backend == 'hyperopt': # when x = [x]
if len(x) == 1:
x = x[0]
return self.objective_fn(x)
raise NotImplementedError
@classmethod
def from_gp_parameters(cls, fpath: str, objective_fn):
"""loads results saved from bayesian optimization"""
opt_path = os.path.dirname(fpath)
with open(fpath, 'r') as fp:
gpmin_results = json.load(fp)
space = gpmin_results['space']
spaces = []
for sp_name, sp_paras in space.items():
if sp_paras['type'] == 'Categorical':
spaces.append(Categorical(sp_paras['categories'], name=sp_name))
elif sp_paras['type'] == 'Integer':
spaces.append(Integer(low=sp_paras['low'], high=sp_paras['high'], name=sp_name, prior=sp_paras['prior']))
elif sp_paras['type'] == 'Real':
spaces.append(Real(low=sp_paras['low'], high=sp_paras['high'], name=sp_name, prior=sp_paras['prior']))
else:
raise NotImplementedError
optimizer = cls('bayes',
param_space=spaces,
objective_fn=objective_fn,
opt_path=opt_path,
backend='skopt')
optimizer.gpmin_results = gpmin_results
return optimizer
def pre_calculated_results(self, resutls, from_gp_parameters=True):
"""Loads the pre-calculated results i.e. x and y values which
have been already evaluated."""
with open(resutls, 'r') as fp:
results = json.load(fp)
return
def serialize(self):
return {'fun': '',
'x': '',
"best_paras": jsonize(self.best_paras()),
'space': {k: v.serialize() for k, v in self.space().items()},
'fun_vals': self.func_vals(),
# 'iters': self.xy_of_iterations(), # todo, for BayesSearchCVs, not getting ys
'algorithm': self.algorithm,
'backend': self.backend,
'opt_path': self.opt_path
}
def save_iterations_as_xy(self):
iterations = self.xy_of_iterations()
jsonized_iterations = jsonize(iterations)
fname = os.path.join(self.opt_path, "iterations.json")
with open(fname, "w") as fp:
json.dump(jsonized_iterations, fp, sort_keys=False, indent=4, cls=JsonEncoder)
fname = os.path.join(self.opt_path, "iterations_sorted.json")
with open(fname, "w") as fp:
json.dump(dict(sorted(jsonized_iterations.items())), fp, sort_keys=True, indent=4, cls=JsonEncoder)
def add_previous_results(
self,
iterations: Union[dict, str] = None,
x: list = None,
y :list = None
):
"""adds results from previous iterations.
If you have run the optimization priviously, you can make use
of those results by appending them.
Arguments:
iterations:
It can be either a dictionary whose keys are y values and values are x
or it can be a path to a file which contains these xy values as dictioary.
x:
a list of lists where each sub-list is the value of hyperparameter
at at one iteratio. The `x` and `y` arguments optional and will
only be used if `iterations` are not provided.
y:
a list of float values where each value in y is the output
of objective_fn with corresponding x. The length of `x` and `y`
must be equal.
"""
assert self.algorithm in ["bayes", "bayes_rf"]
if iterations is None:
assert isinstance(x, list) and isinstance(y, list)
assert len(x) == len(y), f"x has {len(x)} values while y has {len(y)} values. They must be equal"
x0 = x
y0 = y
elif isinstance(iterations, str):
assert os.path.exists(iterations), f"the path {iterations} does not exist"
# it is a path
with open(iterations, 'r') as fp:
iter_dict = json.load(fp)
x0, y0 = self.dict_to_xy(iter_dict)
else:
if not isinstance(iterations, dict):
raise ValueError(f"iterations must be a dictionary but it is of type {iterations.__class__.__name__}")
x0, y0 = self.dict_to_xy(iterations)
# todo check for inf and nan in y0
self.gpmin_args['x0'] = x0
self.gpmin_args['y0'] = y0
return
@staticmethod
def dict_to_xy(iterations:dict):
x0, y0 = [], []
for y, x in iterations.items():
y0.append(float(y))
x0.append(list(x.values()))
return x0, y0
def load_results(self, fname:str):
"""loads the previously computed results. It should not
be used after .fit()
parameters
----------
fname : str
complete path of hpo_results.bin file e.g.
path/to/hpo_results.bin
"""
from joblib import load # some modules may not be dependent upon joblib
assert len(self.results) == 0, f"""
Loading results after call to .fit is not allowed.
Create a new instance of HyperOpt and then call this function.
"""
if not os.path.exists(fname):
raise FileNotFoundError(f" File {fname} does not exist")
new_results = load(fname)
self.gpmin_results = new_results
fv = new_results.func_vals
xiters = new_results.x_iters
for idx, y, x in zip(range(len(fv)), fv, xiters):
self.results[idx] = {'y': y, 'x': x}
return | PypiClean |
/MokaPlayer-0.8.5.7.tar.gz/MokaPlayer-0.8.5.7/README.md | # MokaPlayer
A music player written in python

## Prerequisites
- TagLib
- Python 3.6.0
- PyGObject
- GStreamer
## Installing
```sh
pip install mokaplayer
```
## Features
- Support all the most popular music file formats
- Gapless playback
- Resumes playback on startup
- Edit tags for multiple files at the same time
- Playlists
- Audio visualization
- 10 Band Equalizer
- Change playback speed while maintaining pitch
- Go to [Artist/Album] with CTRL-P
- Filter with CTRL-F
- Automatically fetch:
- Album covers
- Artist covers
- Lyrics
- ASCII and Guitar Pro tabs
## Configuration
If you clone the repository you need to create the file `mokaplayer/config/secret.py` with this line:
```python
LASTFM_SECRET_API_KEY = 'YOUR_API_KEY'
```
### On Windows
Download the following packages with [MSYS2](http://www.msys2.org/):
```sh
mingw-w64-i686-gtk3
mingw-w64-i686-python3-gobject
mingw-w64-i686-python3-pip
mingw-w64-i686-gcc
mingw-w64-i686-taglib
mingw-w64-i686-python3-lxml
mingw-w64-i686-swig
mingw-w64-i686-gst-python
mingw-w64-i686-gst-plugins-base
mingw-w64-i686-gst-plugins-good
mingw-w64-i686-gst-plugins-bad
mingw-w64-i686-gst-plugins-ugly
```
| PypiClean |
/Gnosis_Utils-1.2.2.tar.gz/Gnosis_Utils-1.2.2/gnosis/xml/xmlcat.py | import sys
from xml.sax import handler, make_parser
from xml.sax.saxutils import escape
class Canonicalize(handler.ContentHandler):
def __init__(self, out=sys.stdout):
handler.ContentHandler.__init__(self)
self._out = out
def startDocument(self):
xml_decl = '<?xml version="1.0" encoding="utf-8"?>\n'
self._out.write(xml_decl)
def endDocument(self):
pass # sys.stderr.write("Bye bye!\n")
def startElement(self, name, attrs):
self._out.write('<' + name)
name_val = attrs.items()
name_val.sort() # canonicalize attributes
for (name, value) in name_val:
self._out.write(' %s="%s"' % (name, escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def characters(self, content):
self._out.write(escape(content))
def ignorableWhitespace(self, content):
pass #self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
class TextDump(handler.ContentHandler):
def __init__(self, out=sys.stdout):
#handler.ContentHandler.__init__(self)
self._out = out
self.last = ""
self.skipping = 0
def characters(self, content):
#print repr(escape(content))
if self.last.strip() or content.strip():
if self.skipping:
self._out.write('\n')
self.skipping = 0
self._out.write(escape(content))
elif self.last == '\n' and content <> '\n':
self._out.write(content)
else:
self.skipping = 1
self.last = content
if __name__=='__main__':
parser = make_parser()
if sys.argv[1] in ('-dump','--dump','/dump'):
del sys.argv[1]
parser.setContentHandler(TextDump())
else:
parser.setContentHandler(Canonicalize())
for fname in sys.argv[1:]:
parser.parse(fname) | PypiClean |
/DBRetina-2.2.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/kSpider2/ks_interactome.py | from __future__ import division
import sys
import click
from kSpider2.click_context import cli
import matplotlib.pyplot as plt
import json
import os
import seaborn as sns
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pandas as pd
import kSpider2.dbretina_doc_url as dbretina_doc
class Interactome:
def __init__(self, output_prefix):
self.graph = {}
self.output_prefix = output_prefix
def add_edge(self, node1, node2):
if node1 == node2:
return
if node1 not in self.graph:
self.graph[node1] = {}
if node2 not in self.graph:
self.graph[node2] = {}
if node2 in self.graph[node1]:
self.graph[node1][node2] += 1
self.graph[node2][node1] += 1
else:
self.graph[node1][node2] = 1
self.graph[node2][node1] = 1
def export(self, ctx):
rows = ["feature_1\tfeature_2\tshared_groups"]
for node1, edges in self.graph.items():
rows.extend(
f"{node1}\t{node2}\t{weight}"
for node2, weight in edges.items()
if node1 < node2
)
output_file_name = f"{self.output_prefix}_interactome.tsv"
ctx.obj.INFO(f"Exporting interactome to {output_file_name}")
with open(output_file_name, 'w') as file:
file.write("\n".join(rows))
def plot_statistics(self):
# Create a DataFrame from the graph data
data = []
for node1, edges in self.graph.items():
data.extend(
{'Node1': node1, 'Node2': node2, 'Weight': weight}
for node2, weight in edges.items()
if node1 < node2
)
df = pd.DataFrame(data)
# Create a summary statistics table
print("Summary statistics of the interactome weights:")
summary_stats = df['Weight'].describe()
print(pd.DataFrame.from_dict(dict(summary_stats), orient='index').to_string())
# Plot a histogram of the weights
plt.figure(figsize=(10, 6))
sns.histplot(df['Weight'], kde=False, bins=30)
plt.title('Histogram of Edge Weights')
plt.xlabel('Edge Weight')
plt.ylabel('Frequency')
plt.savefig(f"{self.output_prefix}_interactome_histogram.png", dpi=500)
# Boxplot to show spread of weights
plt.figure(figsize=(10, 6))
sns.boxplot(x=df['Weight'])
plt.title('Boxplot of Edge Weights')
plt.savefig(f"{self.output_prefix}_interactome_boxplot.png", dpi=500)
# Scatter plot to show any potential relationship between nodes and weights
df['NodePair'] = df.apply(lambda row: f"{row['Node1']} - {row['Node2']}", axis=1)
plt.figure(figsize=(10, 6))
sns.scatterplot(x='NodePair', y='Weight', data=df)
plt.title('Scatter Plot of Node Pairs and Weights')
plt.xticks(rotation=90)
plt.savefig(f"{self.output_prefix}_interactome_scatter.png", dpi=500)
def graph_export(self, graphml, gexf, ctx):
if graphml or gexf:
self.G = nx.Graph()
for node1, edges in self.graph.items():
for node2, weight in edges.items():
self.G.add_edge(node1, node2, weight=weight)
if graphml:
ctx.obj.INFO("Exporting interactome as graphml file")
nx.write_graphml(self.G, f"{self.output_prefix}_interactome.graphml")
if gexf:
ctx.obj.INFO("Exporting interactome as gexf file")
nx.write_gexf(self.G, f"{self.output_prefix}_interactome.gexf")
def get_command():
_sys_argv = sys.argv
for i in range(len(_sys_argv)):
if _sys_argv[i] == "-i":
_sys_argv[i+1] = os.path.abspath(_sys_argv[i+1])
return "#command: DBRetina " + " ".join(_sys_argv[1:])
@cli.command(name="interactome", epilog = dbretina_doc.doc_url("interactome"), help_priority=9)
# @click.option('-t', '--threads', "user_threads", default=1, required=False, type=int, help="number of cores") # TODO later in C++ version
@click.option('-i', '--index-prefix', "index_prefix", required=True, type=click.STRING, help="index file prefix")
@click.option('-p', '--pairwise', 'pairwise_file', required=True, type=click.Path(exists=True), help="pairwise TSV file")
@click.option('--graphml', 'graphml', is_flag=True, default = False, help="export interactome as graphml file")
@click.option('--gexf', 'gexf', is_flag=True, default = False, help="export interactome as gexf file")
@click.option('-o', '--output-prefix', "output_prefix", required=True, type=click.STRING, help="output file prefix")
@click.pass_context
def main(ctx, index_prefix, pairwise_file, output_prefix, graphml, gexf):
"""Construct a features-interactome.
Detailed description:
For a groups pairwise file, construct an interactome between the features of each group and all other features in the pairwise file.
"""
###################################
# 1. Extract gene set pairs
###################################
ctx.obj.INFO(f"Extracting gene set pairs from {pairwise_file}")
geneSet_pairs = set()
metadata = []
# iterate over the pairwise file to construct geneSet_pairs
with open(pairwise_file, 'r') as pairwise_tsv:
while True:
pos = pairwise_tsv.tell()
line = pairwise_tsv.readline()
if not line.startswith('#'):
pairwise_tsv.seek(pos)
break
else:
metadata.append(line)
metadata.append(f"#command: {get_command()}\n")
next(pairwise_tsv)
for row in pairwise_tsv:
row = row.strip().split('\t')
geneSet_pairs.add((row[2], row[3]))
##############################################
# 2. Map gene set to genes (group to features)
##############################################
ctx.obj.INFO(f"Mapping gene sets to features")
raw_json_file = f"{index_prefix}_raw.json"
with open(raw_json_file, "r") as f:
supergroups_to_features = json.loads(f.read())["data"]
##############################################
#3. Build the interactome
##############################################
ctx.obj.INFO("Building the interactome. Please wait...")
interactome = Interactome(output_prefix)
for geneSet_pair in tqdm(geneSet_pairs):
geneSet1_features = supergroups_to_features[geneSet_pair[0]]
geneSet2_features = supergroups_to_features[geneSet_pair[1]]
# create edges between all pairs of features
for feature1 in geneSet1_features:
for feature2 in geneSet2_features:
interactome.add_edge(feature1, feature2)
interactome.export(ctx)
interactome.graph_export(graphml, gexf, ctx)
ctx.obj.SUCCESS("Interactome has been constructed successfully.")
# interactome.plot_statistics() | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methods/advanced/save_file.py |
import asyncio
import functools
import inspect
import io
import logging
import math
import os
from hashlib import md5
from pathlib import PurePath
from typing import Union, BinaryIO, Callable
import fipper
from fipper import StopTransmission
from fipper import raw
from fipper.session import Session
log = logging.getLogger(__name__)
class SaveFile:
async def save_file(
self: "fipper.Client",
path: Union[str, BinaryIO],
file_id: int = None,
file_part: int = 0,
progress: Callable = None,
progress_args: tuple = ()
):
"""Upload a file onto Telegram servers, without actually sending the message to anyone.
Useful whenever an InputFile type is required.
.. note::
This is a utility method intended to be used **only** when working with raw
:obj:`functions <fipper.api.functions>` (i.e: a Telegram API method you wish to use which is not
available yet in the Client class as an easy-to-use method).
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
path (``str`` | ``BinaryIO``):
The path of the file you want to upload that exists on your local machine or a binary file-like object
with its attribute ".name" set for in-memory uploads.
file_id (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
file_part (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
progress (``Callable``, *optional*):
Pass a callback function to view the file transmission progress.
The function must take *(current, total)* as positional arguments (look at Other Parameters below for a
detailed description) and will be called back each time a new file chunk has been successfully
transmitted.
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function.
You can pass anything you need to be available in the progress callback scope; for example, a Message
object or a Client instance in order to edit the message with the updated progress status.
Other Parameters:
current (``int``):
The amount of bytes transmitted so far.
total (``int``):
The total size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the ``progress_args`` parameter.
You can either keep ``*args`` or add every single extra argument in your function signature.
Returns:
``InputFile``: On success, the uploaded file is returned in form of an InputFile object.
Raises:
RPCError: In case of a Telegram RPC error.
"""
if path is None:
return None
async def worker(session):
while True:
data = await queue.get()
if data is None:
return
try:
await session.invoke(data)
except Exception as e:
log.error(e)
part_size = 512 * 1024
if isinstance(path, (str, PurePath)):
fp = open(path, "rb")
elif isinstance(path, io.IOBase):
fp = path
else:
raise ValueError("Invalid file. Expected a file path as string or a binary (not text) file pointer")
file_name = getattr(fp, "name", "file.jpg")
fp.seek(0, os.SEEK_END)
file_size = fp.tell()
fp.seek(0)
if file_size == 0:
raise ValueError("File size equals to 0 B")
file_size_limit_mib = 4000 if self.me.is_premium else 2000
if file_size > file_size_limit_mib * 1024 * 1024:
raise ValueError(f"Can't upload files bigger than {file_size_limit_mib} MiB")
file_total_parts = int(math.ceil(file_size / part_size))
is_big = file_size > 10 * 1024 * 1024
pool_size = 3 if is_big else 1
workers_count = 4 if is_big else 1
is_missing_part = file_id is not None
file_id = file_id or self.rnd_id()
md5_sum = md5() if not is_big and not is_missing_part else None
pool = [
Session(
self, await self.storage.dc_id(), await self.storage.auth_key(),
await self.storage.test_mode(), is_media=True
) for _ in range(pool_size)
]
workers = [self.loop.create_task(worker(session)) for session in pool for _ in range(workers_count)]
queue = asyncio.Queue(16)
try:
for session in pool:
await session.start()
fp.seek(part_size * file_part)
while True:
chunk = fp.read(part_size)
if not chunk:
if not is_big and not is_missing_part:
md5_sum = "".join([hex(i)[2:].zfill(2) for i in md5_sum.digest()])
break
if is_big:
rpc = raw.functions.upload.SaveBigFilePart(
file_id=file_id,
file_part=file_part,
file_total_parts=file_total_parts,
bytes=chunk
)
else:
rpc = raw.functions.upload.SaveFilePart(
file_id=file_id,
file_part=file_part,
bytes=chunk
)
await queue.put(rpc)
if is_missing_part:
return
if not is_big and not is_missing_part:
md5_sum.update(chunk)
file_part += 1
if progress:
func = functools.partial(
progress,
min(file_part * part_size, file_size),
file_size,
*progress_args
)
if inspect.iscoroutinefunction(progress):
await func()
else:
await self.loop.run_in_executor(self.executor, func)
except StopTransmission:
raise
except Exception as e:
log.error(e, exc_info=True)
else:
if is_big:
return raw.types.InputFileBig(
id=file_id,
parts=file_total_parts,
name=file_name,
)
else:
return raw.types.InputFile(
id=file_id,
parts=file_total_parts,
name=file_name,
md5_checksum=md5_sum
)
finally:
for _ in workers:
await queue.put(None)
await asyncio.gather(*workers)
for session in pool:
await session.stop()
if isinstance(path, (str, PurePath)):
fp.close() | PypiClean |
/ETo-1.1.0.tar.gz/ETo-1.1.0/README.rst | ETo - A Python package for calculating reference evapotranspiration
===================================================================
The ETo package contains a class and associated functions to calculate reference evapotranspiration (ETo) using the `UN-FAO 56 paper <http://www.fao.org/docrep/X0490E/X0490E00.htm>`_. Additional functions have been added to calculate historic ETo or potential evapotranspiration (PET) for comparison purposes.
A parameter estimation function has also been added to the base class to convert most any variety of metereological parameter inputs to the necessary parameters needed to calculate ETo.
Documentation
--------------
The primary documentation for the package can be found `here <http://eto.readthedocs.io>`_.
Installation
------------
ETo can be installed via pip or conda::
pip install eto
or::
conda install -c mullenkamp eto
The core dependency is `Pandas <http://pandas.pydata.org/pandas-docs/stable/>`_.
| PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/admin/cfg.py | from admin import admin
from importlib import import_module
from flask import current_app
from .models import Setting,Type
from flask.ext.wtf import Form
class BaseSettings(object):
_settings = []
_values = {}
_form = None
_defaults = {}
_cfg = ''
def __init__(self):
self._cfg = current_app.config
for itm in self._settings:
if not itm in self._values:
try:
self.set_setting(itm,self._defaults[itm])
except KeyError:
raise ValueError('Need to be given a value or default for {}'.format(itm))
self.set_setting(itm,self._cfg[itm])
def _get_setting_field(self,setting,field_type=None):
pass
def _get_form(self):
raise NotImplementedError
@property
def form(self):
return self._get_form()
def set_setting(self,setting,value):
if setting in self._settings:
self._values[setting] = value
def get_setting(self,setting,default=None):
if setting in self._settings:
return self._values[setting]
return default
def apply_settings(self,**kwargs):
if len(kwargs) > 0:
for k in kwargs:
self.set_setting(k,kwargs[k])
for setting in self._settings:
if not setting.startswith('_'):
self._cfg[setting.upper()] = self.get_setting(setting)
def _get_default_field(self,setting,fields):
return fields.__dict__['StringField']
class SiteSettings(BaseSettings):
_settings = [
'RECAPTCHA_PUBLIC_KEY',
'RECAPTCHA_PRIVATE_KEY',
'ADMIN_PER_PAGE',
'CODEMIRROR_LANGUAGES',
'CODEMIRROR_THEME',
'BLUEPRINTS',
'EXTENSIONS',
'TEMPLATE_FILTERS',
'CONTEXT_PROCESSORS',
]
_defaults = {
'RECAPTCHA_PUBLIC_KEY':'',
'RECAPTCHA_PRIVATE_KEY':'',
'ADMIN_PER_PAGE':'',
'CODEMIRROR_LANGUAGES':['python'],
'CODEMIRROR_THEME':'3024-year',
'BLUEPRINTS':'',
'EXTENSIONS':'',
'TEMPLATE_FILTERS':'',
'CONTEXT_PROCESSORS':'',
}
def _get_setting_field(self,setting,field_type=None):
fields = import_module('wtforms.fields')
if field_type is not None:
field = fields.__dict__[field_type]
else:
from .models import Setting
s = Setting.query.filter(Setting.name==setting).first()
if s is None:
field = self._get_default_field(setting,fields)
else:
field = fields.__dict__[s.type.field_type]
return field
def _get_form(self):
form_args = {}
for itm in self._settings:
form_args[itm] = self._get_setting_field(itm)()
self._form = type(
'EditSiteSettingsForm',(Form,),form_args
)
return self._form
@admin.before_app_request
def add_settings():
from app import app
settings = app.config.copy()
CACHED_SETTINGS = [
'RECAPTCHA_PUBLIC_KEY',
'RECAPTCHA_PRIVATE_KEY',
'ADMIN_PER_PAGE',
'CODEMIRROR_LANGUAGES',
'CODEMIRROR_THEME',
'BLUEPRINTS',
'EXTENSIONS',
'TEMPLATE_FILTERS',
'CONTEXT_PROCESSORS',
]
for itm in CACHED_SETTINGS:
setting = Setting.query.filter(Setting.name==itm).first()
if setting is None:
t = Type.query.filter(Type.name==type(settings[itm])).first()
value = settings.get(itm,None)
if value is None:
value = ''
if t is None:
t = Type(type(settings[itm]))
t.save()
setting = Setting(
name=itm,type=t,value=value
)
setting.save() | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Exceptions/IllegalActionExceptions.py |
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# exception bad actions
class IllegalAction(Grid2OpException):
"""
This exception indicate that the :class:`grid2op.BaseAction` is illegal.
It is for example thrown when an :class:`grid2op.BaseAgent` tries to perform an action against the rule.
This is handled in :func:`grid2op.Environment.Environment.step`
An :class:`grid2op.BaseAction` is said to be **illegal** depending on some rules implemented in
:func:`grid2op.BaseAction.ActionSpace.is_legal` method.
An action can be legal in some context, but illegal in others.
"""
pass
class OnProduction(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to
setting wrong values to generators.
"""
pass
class VSetpointModified(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint voltage magnitude of a production has been changed.
"""
pass
class ActiveSetPointAbovePmax(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint active power of a production is set to be higher than Pmax.
"""
pass
class ActiveSetPointBelowPmin(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint active power of a production is set to be lower than Pmin.
"""
pass
class OnLoad(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to
setting wrong values to loads.
"""
pass
class OnLines(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to setting
wrong values to lines (reconnection impossible, disconnection impossible etc).
"""
pass
class InvalidReconnection(OnLines):
"""
This is a more precise exception than :class:`OnLines` indicating that the :class:`grid2op.BaseAgent` tried to
reconnect a powerline illegally.
"""
pass
# attempt to use redispatching or unit commit method in an environment not set up.
class UnitCommitorRedispachingNotAvailable(IllegalAction):
"""
attempt to use redispatching or unit commit method in an environment not set up.
"""
pass | PypiClean |
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/regression/catboost.py |
from catboost import CatBoostRegressor as _CatBoostRegressor
from sklearn.model_selection import train_test_split
from amplo.regression._base import BaseRegressor
from amplo.utils import check_dtypes
class CatBoostRegressor(BaseRegressor):
"""
Amplo wrapper for catboost.CatBoostRegressor.
Parameters
----------
test_size : float, default: 0.1
Test size for train-test-split in fitting the model.
random_state : int, default: None
Random state for train-test-split in fitting the model.
verbose : {0, 1, 2}, default: 0
Verbose logging.
**model_params : Any
Model parameters for underlying catboost.CatBoostRegressor.
"""
model: _CatBoostRegressor # type hint
def __init__(self, test_size=0.1, random_state=None, verbose=0, **model_params):
# Verify input dtypes and integrity
check_dtypes(
("test_size", test_size, float),
("random_state", random_state, (type(None), int)),
("model_params", model_params, dict),
)
if not 0 <= test_size < 1:
raise ValueError(f"Invalid attribute for test_size: {test_size}")
# Set up model
default_model_params = {
"n_estimators": 1000,
"allow_writing_files": False,
"early_stopping_rounds": 100,
"use_best_model": True,
"verbose": verbose,
}
for k, v in default_model_params.items():
if k not in model_params:
model_params[k] = v
model = _CatBoostRegressor(**model_params)
# Set attributes
self.test_size = test_size
self.random_state = random_state
super().__init__(model=model, verbose=verbose)
def fit(self, x, y=None, **fit_params):
# Split data and fit model
xt, xv, yt, yv = train_test_split(
x, y, test_size=self.test_size, random_state=self.random_state
)
self.model.fit(
xt,
yt,
eval_set=[(xv, yv)],
early_stopping_rounds=self.model.get_params().get("early_stopping_rounds"),
use_best_model=self.model.get_params().get("use_best_model"),
)
self.is_fitted_ = True
return self | PypiClean |
/CPAT-3.0.4.tar.gz/CPAT-3.0.4/docs_v3/_build/html/searchindex.js | Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,sphinx:56},filenames:["index.rst"],objects:{},objnames:{},objtypes:{},terms:{"03746259323e":0,"10fold_crossvalid":0,"20287252729e":0,"2nd":0,"3rd":0,"4_orf_1":0,"4_orf_2":0,"4_orf_3":0,"4_orf_4":0,"4_orf_5":0,"4_orf_6":0,"4_orf_7":0,"4_orf_8":0,"6mer":0,"7th":0,"8077265737e":0,"87205265917e":0,"8th":0,"case":0,"default":0,"import":0,"long":0,"new":0,"null":0,"public":0,"short":0,And:0,CDS:0,FOR:0,For:0,IDs:0,NOT:0,The:0,There:0,These:0,aaagttgaagaatttg:0,aattccattcttaaatacctcactgtcttggccatggggaagcactatgg:0,abov:0,access:0,accord:0,accuraci:0,achiev:0,acid:0,addit:0,address:0,after:0,agctaaaacaaaaaacaatggtttgtgcaaaaatctcataatgttttaat:0,aggaccaggtccacccagcagctgtttgcccagagctggagcctcagctt:0,align:0,all:0,along:0,alreadi:0,also:0,altern:0,alwai:0,ani:0,annot:0,anoth:0,anti:0,antisens:0,area:0,assembl:0,assess:0,atg:0,attach:0,automat:0,availablel:0,averag:0,balanc:0,base:0,basic:0,becaus:0,bed:0,been:0,befor:0,below:0,best:0,better:0,between:0,bia:0,big:0,bioinformat:0,blate:0,blue:0,boston:0,browser:0,bug:0,built:0,button:0,bz2:0,bzip2:0,cabili:0,caccca:0,cacccc:0,caccct:0,caggagccagagttggagatattacagtcaactttggcttctaagccagt:0,calcul:0,california:0,can:0,candid:0,carri:0,catalog:0,ccatcatcgtgggcgtcatgttcttcgagcgcgccttcgatcaaggcgcg:0,cctcagctgggggaaagaccctggcctaggggtcttagccactccccacc:0,cgene:0,characterist:0,clearli:0,click:0,close:0,cluster:0,cod:0,code:0,coden:0,coding_fil:0,coding_prob:0,codon:0,column:0,combinatori:0,compar:0,compat:0,compos:0,compress:0,consid:0,consol:0,contain:0,control:0,copi:0,could:0,coverag:0,cpat_run_info:0,cpc:0,criteria:0,cross:0,ctagggtatagttcaggggtatccaatcctttggcttccctgggccatgt:0,ctgctattttgagttttccattaacttccaaagaattctggttttcaaaa:0,ctgcttctacattggttgctttgttaactctacctgatcttcacttgtca:0,cttcta:0,cttctt:0,curv:0,dasari:0,dat:0,databas:0,datset:0,defin:0,demonstr:0,detail:0,determin:0,devic:0,differ:0,direct:0,directli:0,directori:0,disk:0,distribut:0,dm3:0,dna:0,document:0,doe:0,doi:0,done:0,dot:0,download:0,dozen:0,e74:0,each:0,edu:0,effect:0,either:0,end:0,enough:0,ensembl:0,entri:0,equal:0,even:0,evolutionari:0,exactli:0,exist:0,exit:0,extrem:0,fai:0,fals:0,faq:0,faqformat:0,fasta:0,fasta_format:0,featur:0,fickett:0,fifth:0,figur:0,figure_3:0,find:0,fine:0,first:0,fit:0,fix:0,floor:0,fly:0,fold:0,folder:0,follow:0,format1:0,format:0,found:0,foundat:0,frame:0,franklin:0,free:0,frequenc:0,from:0,ftp:0,futur:0,gaaacggctttacttacaaaacagactctttaccttctgctgtgtttgaa:0,gaacga:0,gaacgc:0,gaacgg:0,gaacgt:0,gaagatgatgctcaaggtactcttcatggaccaccattcgctgttggcaa:0,gacgctatctacgaccacatcaacgaggggaagctgtggaaacacatcaa:0,gcacaagtatgagaacaagtagttccttggaggcccccatccaggccaga:0,gcggtggcgcgagttggactgtgaagaaacatggcggccgcgacgttgac:0,gene:0,gene_fil:0,gener:0,genom:0,get:0,give:0,gkt006:0,glm:0,gnu:0,goal:0,gplot:0,graphic:0,gtaatttgagaccacttcaaagccctctgcaaacaccccaaaggcagaat:0,gtatgtttagtcagcatgctcaggaaataaatgtgaattgcccttgagac:0,handl:0,has:0,hasn:0,have:0,head:0,help:0,here:0,hex:0,hexamer_dat:0,hg19:0,high:0,highest:0,hope:0,host:0,html:0,http:0,human:0,human_coding_transcripts_cd:0,human_coding_transcripts_hg19:0,human_coding_transcripts_mrna:0,human_hexam:0,human_logitmodel:0,human_noncoding_transcripts_hg19:0,human_noncoding_transcripts_rna:0,human_test_coding_mrna:0,human_test_coding_mrna_hg19:0,human_train:0,identifi:0,ignor:0,impli:0,inc:0,includ:0,increas:0,independ:0,index:0,indic:0,inform:0,instead:0,irvin:0,known:0,kocher:0,lab:0,label:0,larger:0,largest:0,later:0,least:0,length:0,librari:0,liguo:0,like:0,lincrna:0,line_width:0,linguist:0,list:0,load:0,log:0,log_fil:0,logi:0,logic:0,logist:0,logit_model:0,logitmodel:0,longest:0,longet:0,lowess:0,mai:0,major:0,make:0,make_hexamer_tab:0,make_logitmodel:0,make_logitmodel_run_info:0,mani:0,mask:0,mayo:0,merchant:0,messag:0,method:0,might:0,min:0,min_orf_len:0,minimum:0,minor:0,mm10:0,mm9:0,mode:0,modifi:0,more:0,most:0,mous:0,mrna:0,multipl:0,must:0,n_top_orf:0,name:0,nar:0,ncbi:0,need:0,net:0,ngene:0,nm_013387:0,nm_030915:0,nm_198086:0,no_orf:0,non:0,noncod:0,noncoding_fil:0,note:0,nucleic:0,nucleotid:0,number:0,numer:0,numpi:0,object:0,occur:0,oit:0,older:0,one:0,onli:0,open:0,optimum:0,orf_4:0,orf_end:0,orf_fram:0,orf_prob:0,orf_seq:0,orf_start:0,orf_strand:0,orffind:0,org:0,origin:0,other:0,otherwis:0,our:0,out:0,out_fil:0,outfil:0,output1:0,output2:0,own:0,packag:0,paper:0,park:0,particular:0,past:0,perfectli:0,phylocsf:0,pleas:0,plese:0,point:0,posit:0,possibl:0,potenti:0,prebuild:0,prebuilt:0,precis:0,predict:0,predictor:0,prefix:0,print:0,probabl:0,problem:0,process:0,produc:0,program:0,protein:0,provid:0,publish:0,purpos:0,put:0,python3:0,qualiti:0,rank:0,rdata:0,read:0,real:0,recal:0,receiv:0,recommend:0,red:0,redistribut:0,ref:0,ref_genom:0,refseq:0,regard:0,regress:0,regular:0,remot:0,remov:0,report:0,repres:0,requir:0,research:0,respect:0,respons:0,rna:0,roc:0,rocr:0,roughli:0,rscript:0,same:0,save:0,score:0,screen:0,script:0,search:0,see:0,select:0,sens:0,separ:0,sequenc:0,sequn:0,server:0,set:0,sever:0,should:0,show:0,sinc:0,size:0,small:0,softwar:0,solid:0,sort:0,sourceforg:0,speci:0,specifi:0,speed:0,standard:0,start:0,start_codon:0,stat:0,statu:0,step1:0,step2:0,step3:0,stop:0,stop_codon:0,strand:0,street:0,submit:0,subset:0,support:0,taa:0,tag:0,term:0,test:0,testcod:0,text:0,tga:0,tggaagaattgtcttgggccacacataaaatacagtaaccatagctgatg:0,them:0,thi:0,tool:0,top:0,total:0,transcript:0,translat:0,tsv:0,ttcgaaattgtactccctgctgttccgcaggacctccaccttcgccctca:0,two:0,txt:0,uci:0,ucsc:0,under:0,uniqu:0,univers:0,updat:0,upgrad:0,upload:0,uploada:0,uqcr10:0,url:0,usa:0,usag:0,use:0,used:0,useful:0,user:0,utr:0,valid:0,valu:0,variabl:0,verbos:0,version:0,wai:0,wang:0,want:0,warn:0,warranti:0,web:0,webserv:[],wei:0,well:0,were:0,when:0,which:0,width:0,wiki:0,wikipedia:0,within:0,without:0,wlcb:0,work:0,write:0,xls:0,you:0,your:0,zebrafish:0,zv9:0},titles:["Release history"],titleterms:{Use:0,build:0,check:0,choos:0,command:0,comparison:0,comput:0,contact:0,cpat:0,cutoff:0,data:0,dataset:0,detect:0,evalu:0,exampl:0,file:0,hexam:0,histori:0,how:0,input:0,instal:0,introduct:0,licens:0,line:0,local:0,logit:0,model:0,onlin:0,option:0,orf:0,output:0,perform:0,pip3:0,prepar:0,prerequisit:0,refer:0,releas:0,result:0,run:0,tabl:0,train:0,using:0}}) | PypiClean |
/HTTPolice-0.9.0.tar.gz/HTTPolice-0.9.0/httpolice/known/__init__.py | import csv
from enum import Enum
import importlib
import io
import os
import pkgutil
from httpolice import structure
from httpolice.citation import Citation, RFC
class Knowledge:
"""Manages the data for one class from :mod:`httpolice.structure`."""
# Load "raw" data from the CSV file. Lazily "process" it on the fly.
# Allow "unprocessing" back into "raw" and dumping, for ``tools/iana.py``.
def __init__(self, cls, name):
self.cls = cls
self.name = name
self.filename = '%s.csv' % self.name
self.keys_by_name = {}
self.raw_by_key = {}
self.processed_by_key = {}
data = pkgutil.get_data(__name__, self.filename)
buf = io.StringIO(data.decode('ascii'), newline=u'')
reader = csv.DictReader(buf, lineterminator=u'\n')
self.fieldnames = reader.fieldnames
for raw in reader:
key = self.cls(raw['key'])
assert key not in self.raw_by_key
self.raw_by_key[key] = raw
name = self.name_from_raw(key, raw)
assert name not in self.keys_by_name
self.keys_by_name[name] = key
self.accessor = KnowledgeAccessor(self)
def __contains__(self, key):
return key in self.raw_by_key
def __iter__(self):
return iter(self.raw_by_key)
def get(self, key):
processed = self.processed_by_key.get(key)
if processed is None:
raw = self.raw_by_key.get(key)
if raw is None:
processed = {}
else:
processed = self.process(raw)
self.processed_by_key[key] = processed
return processed
@classmethod
def name_from_raw(cls, key, raw):
name = key if isinstance(key, str) else raw['title']
name = (name.
replace('-', ' ').replace(' ', '_').replace('/', '_').
replace('+', '_').replace('.', '_').
lower())
# Python keywords can't be used as identifiers.
if name in ['continue', 'for', 'from', 'return']:
name = name + '_'
return name
def process(self, raw):
processed = {}
for (field, value) in raw.items():
if not value:
continue
elif field == 'key':
processed[field] = self.cls(value)
elif value.isdigit():
processed[field] = int(value)
elif value.lower() in [u'true', u'false']:
processed[field] = (value.lower() == u'true')
elif isinstance(getattr(self, field, None), type): # Enum
processed[field] = getattr(self, field)[value]
else:
processed[field] = value
if 'rfc' in processed:
processed['citation'] = RFC(processed.pop('rfc'),
processed.pop('rfc_section', None),
processed.pop('rfc_appendix', None))
if 'cite_url' in processed:
processed['citation'] = Citation(processed.pop('cite_title', None),
processed.pop('cite_url'))
return processed
def unprocess(self, processed, orig_raw): # pragma: no cover
# pylint: disable=unused-argument
processed = processed.copy()
cite = processed.pop('citation', None)
if isinstance(cite, RFC):
processed['rfc'] = cite.num
processed['rfc_section'] = cite.section
processed['rfc_appendix'] = cite.appendix
elif isinstance(cite, Citation):
processed['cite_url'] = cite.url
processed['cite_title'] = cite.title
raw = {}
for (field, value) in processed.items():
if hasattr(value, 'name'): # Enum
raw[field] = value.name
elif value is not None:
raw[field] = str(value)
return raw
def dump(self, new): # pragma: no cover
with io.open(os.path.join(os.path.dirname(__file__), self.filename),
'w', newline=u'') as f:
writer = csv.DictWriter(f, self.fieldnames, lineterminator=u'\n')
writer.writeheader()
for processed in new:
orig_raw = self.raw_by_key.get(processed['key'], {})
writer.writerow(self.unprocess(processed, orig_raw))
class KnowledgeAccessor:
"""
For example, ``h.accept`` returns ``FieldName(u'Accept')``.
This makes code visually nicer and prevents typos.
"""
def __init__(self, knowledge):
self.knowledge = knowledge
def __getattr__(self, name):
try:
return self.knowledge.keys_by_name[name]
except KeyError:
raise AttributeError(name)
class SyntaxKnowledge(Knowledge):
"""Knowledge that includes a reference to a grammar symbol."""
def process(self, raw):
processed = super(SyntaxKnowledge, self).process(raw)
if 'syntax_module' in processed:
module = importlib.import_module('httpolice.syntax.%s' %
processed.pop('syntax_module'))
processed['syntax'] = getattr(module,
processed.pop('syntax_symbol'))
return processed
def unprocess(self, processed, orig_raw): # pragma: no cover
# There is no reliable way to convert a reference to a `Symbol`
# back into module name + variable name, nor do we need that,
# so just preserve whatever we had originally.
processed = processed.copy()
processed.pop('syntax', None)
processed['syntax_module'] = orig_raw.get('syntax_module', '')
processed['syntax_symbol'] = orig_raw.get('syntax_symbol', '')
return super(SyntaxKnowledge, self).unprocess(processed, orig_raw)
def syntax_for(self, key):
return self.get(key).get('syntax')
class Argument(Enum):
no = 0
optional = 1
required = 2
class ArgumentKnowledge(SyntaxKnowledge):
"""Knowledge about things that can have arguments with some syntax."""
argument = Argument
def argument_required(self, key):
return self.get(key).get('argument') is Argument.required
def no_argument(self, key):
return self.get(key).get('argument') is Argument.no
class HeaderRule(Enum):
single = 1
multi = 2
special = 3
class HeaderKnowledge(SyntaxKnowledge):
rule = HeaderRule
def is_bad_for_connection(self, key):
return self.get(key).get('bad_for_connection')
def is_bad_for_trailer(self, key):
return self.get(key).get('bad_for_trailer')
def bad_quoted_delims(self, key):
info = self.get(key)
return info.get('quoted_comma_bad'), info.get('quoted_semicolon_bad')
def is_for_request(self, key):
return self.get(key).get('for_request')
def is_for_response(self, key):
return self.get(key).get('for_response')
def is_precondition(self, key):
return self.get(key).get('precondition')
def is_proactive_conneg(self, key):
return self.get(key).get('proactive_conneg')
def is_representation_metadata(self, key):
return self.get(key).get('representation_metadata')
def rule_for(self, key):
return self.get(key).get('rule')
def is_deprecated(self, key):
return self.get(key).get('iana_status') in [u'deprecated',
u'obsoleted']
header = HeaderKnowledge(structure.FieldName, 'header')
h = header.accessor
class MethodKnowledge(Knowledge):
@classmethod
def name_from_raw(cls, key, raw):
return super(MethodKnowledge, cls).name_from_raw(key, raw).upper()
def defines_body(self, key):
return self.get(key).get('defines_body')
def is_cacheable(self, key):
return self.get(key).get('cacheable')
def is_safe(self, key):
return self.get(key).get('safe')
method = MethodKnowledge(structure.Method, 'method')
m = method.accessor
class Cacheable(Enum):
not_at_all = 0
not_by_default = 1
by_default = 2
class StatusCodeKnowledge(Knowledge):
cacheable = Cacheable
def is_cacheable(self, key):
return self.get(key).get('cacheable')
status_code = StatusCodeKnowledge(structure.StatusCode, 'status_code')
st = status_code.accessor
class ArgumentForm(Enum):
token = 1
quoted_string = 2
class CacheDirectiveKnowledge(ArgumentKnowledge):
argument_form = ArgumentForm
def token_preferred(self, key):
return self.get(key).get('argument_form') is ArgumentForm.token
def quoted_string_preferred(self, key):
return self.get(key).get('argument_form') is ArgumentForm.quoted_string
def is_for_request(self, key):
return self.get(key).get('for_request')
def is_for_response(self, key):
return self.get(key).get('for_response')
cache_directive = CacheDirectiveKnowledge(structure.CacheDirective,
'cache_directive')
cache = cache_directive.accessor
forwarded_param = ArgumentKnowledge(structure.ForwardedParam,
'forwarded_param')
forwarded = forwarded_param.accessor
class MediaTypeKnowledge(Knowledge):
def is_deprecated(self, key):
return self.get(key).get('deprecated')
def is_json(self, key):
return self.get(key).get('is_json') or key.endswith(u'+json')
def is_xml(self, key):
return self.get(key).get('is_xml') or key.endswith(u'+xml')
def is_multipart(self, key):
return key.startswith(u'multipart/')
def is_patch(self, key):
return self.get(key).get('patch')
media_type = MediaTypeKnowledge(structure.MediaType, 'media_type')
media = media_type.accessor
class ProductKnowledge(Knowledge):
def is_library(self, key):
return self.get(key).get('library')
product = ProductKnowledge(structure.ProductName, 'product')
alt_svc_param = ArgumentKnowledge(structure.AltSvcParam, 'alt_svc_param')
altsvc = alt_svc_param.accessor
auth_scheme = Knowledge(structure.AuthScheme, 'auth_scheme')
auth = auth_scheme.accessor
content_coding = Knowledge(structure.ContentCoding, 'content_coding')
cc = content_coding.accessor
hsts_directive = ArgumentKnowledge(structure.HSTSDirective, 'hsts_directive')
hsts = hsts_directive.accessor
preference = ArgumentKnowledge(structure.Preference, 'preference')
prefer = preference.accessor
range_unit = Knowledge(structure.RangeUnit, 'range_unit')
unit = range_unit.accessor
transfer_coding = Knowledge(structure.TransferCoding, 'transfer_coding')
tc = transfer_coding.accessor
class UpgradeTokenKnowledge(Knowledge):
@classmethod
def name_from_raw(cls, key, raw):
# Can't use the parent logic because `websocket` vs. `WebSocket`.
return key
upgrade_token = UpgradeTokenKnowledge(structure.UpgradeToken, 'upgrade_token')
upgrade = upgrade_token.accessor
warn_code = Knowledge(structure.WarnCode, 'warn_code')
warn = warn_code.accessor
def _collect():
globals_ = globals()
return {globals_[name].knowledge.cls: (globals_[name].knowledge, name)
for name in globals_
if isinstance(globals_[name], KnowledgeAccessor)}
classes = _collect() # dict containing items like: ``Method: (method, 'm')``
def get(obj):
for cls, (knowledge, _) in classes.items():
if isinstance(obj, cls):
return knowledge.get(obj)
return None # pragma: no cover
def citation(obj):
return get(obj).get('citation')
def title(obj, with_citation=False):
info = get(obj)
t = info.get('title')
if with_citation:
cite = info.get('citation')
if cite and cite.title:
if t:
t = u'%s (%s)' % (t, cite.title)
else:
t = cite.title
return t | PypiClean |
/APy2-0.1.tar.gz/APy-0.1/apy2/core/api.py | from .context import Context
from .function import Function
from .resource import Resource
from ..util.simple_match import smatch
class Api():
def __init__(self):
self._functions = {}
self._resources = {}
self._context = []
def context(self, name=None):
name = name or self.current_context()
return Context(self, name)
def current_context(self):
if not self._context:
return "root"
return self._context[-1]
def enter_context(self, name):
self._context.append(name)
def exit_context(self, name=None):
if self._context:
self._context.pop(-1)
def add(self, name=None, context=None):
from inspect import isfunction
def decorator(x):
if isfunction(x) or isinstance(x, Function):
y = self._add_function(x, name, context)
elif isinstance(x, Resource):
y = self._add_resource(x, name, context)
else:
raise Exception("TODO: EXCEPTION")
return y
return decorator
def _add_function(self, f, name, context):
if hasattr(f, "_func"):
f = f._func
y = Function(f)
y.name = name or y.name
y.context = context or self.current_context()
y.key = "%s.%s" % (str(y.context), str(y.name))
self._functions[y.key] = y
return y
def _add_resource(self, r, name):
raise NotImplementedError("add_resource")
def find_functions(self, name="*", context=None):
results = []
context = context or self.current_context()
for foo in self._functions.values():
if smatch(foo.name, name) and smatch(foo.context, context):
results.append(foo)
return results
def get_function(self, name, context=None):
context = context or self.current_context()
key = "%s.%s" % (str(context), str(name))
if key in self._functions:
return self._functions[key]
return None | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/types/input_stream/audio_piped.py | from typing import Dict
from typing import Optional
from ...ffprobe import FFprobe
from .audio_parameters import AudioParameters
from .input_audio_stream import InputAudioStream
from .input_stream import InputStream
class AudioPiped(InputStream):
"""The audio only stream piped descriptor
Attributes:
ffmpeg_parameters (``str``):
FFmpeg additional parameters
lip_sync (``bool``):
Lip Sync mode
raw_headers (``str``):
Headers of http the connection
stream_audio (:obj:`~metacalls.types.InputAudioStream()`):
Input Audio Stream Descriptor
stream_video (:obj:`~metacalls.types.InputVideoStream()`):
Input Video Stream Descriptor
Parameters:
path (``str``):
The audio file path
audio_parameters (:obj:`~metacalls.types.AudioParameters()`):
The audio parameters of the stream, can be used also
:obj:`~metacalls.types.HighQualityAudio()`,
:obj:`~metacalls.types.MediumQualityAudio()` or
:obj:`~metacalls.types.LowQualityAudio()`
headers (``Dict[str, str]``, **optional**):
Headers of http the connection
additional_ffmpeg_parameters (``str``, **optional**):
FFmpeg additional parameters
"""
def __init__(
self,
path: str,
audio_parameters: AudioParameters = AudioParameters(),
headers: Optional[Dict[str, str]] = None,
additional_ffmpeg_parameters: str = '',
):
self._path = path
self.ffmpeg_parameters = additional_ffmpeg_parameters
self.raw_headers = headers
super().__init__(
InputAudioStream(
f'fifo://{path}',
audio_parameters,
),
)
@property
def headers(self):
return FFprobe.ffmpeg_headers(self.raw_headers)
async def check_pipe(self):
header = await FFprobe.check_file(
self._path,
needed_audio=True,
needed_video=False,
needed_image=False,
headers=self.raw_headers,
)
self.stream_audio.header_enabled = header | PypiClean |
/CDS-1.0.1.tar.gz/CDS-1.0.1/cds/modules/maintenance/subformats.py | from __future__ import absolute_import, print_function
from celery import chain
from .tasks import MaintenanceTranscodeVideoTask
from cds_sorenson.api import get_all_distinct_qualities, can_be_transcoded
from cds.modules.deposit.api import deposit_video_resolver
from cds.modules.records.api import CDSVideosFilesIterator
from cds.modules.records.resolver import record_resolver
id_types = ['recid', 'depid']
def create_all_missing_subformats(id_type, id_value):
"""Create all missing subformats."""
_validate(id_type=id_type)
video_deposit, dep_uuid = _resolve_deposit(id_type, id_value)
master, ar, w, h = _get_master_video(video_deposit)
subformats = CDSVideosFilesIterator.get_video_subformats(master)
dones = [subformat['tags']['preset_quality'] for subformat in subformats]
missing = set(get_all_distinct_qualities()) - set(dones)
transcodables = list(
filter(lambda q: can_be_transcoded(q, ar, w, h), missing))
# sequential (and immutable) transcoding to avoid MergeConflicts on bucket
if transcodables:
chain([
MaintenanceTranscodeVideoTask().si(
version_id=master['version_id'],
preset_quality=quality,
deposit_id=dep_uuid
) for quality in transcodables]).apply_async()
return transcodables
def create_subformat(id_type, id_value, quality):
"""Recreate a given subformat."""
_validate(id_type=id_type, quality=quality)
video_deposit, dep_uuid = _resolve_deposit(id_type, id_value)
master, ar, w, h = _get_master_video(video_deposit)
subformat = can_be_transcoded(quality, ar, w, h)
if subformat:
MaintenanceTranscodeVideoTask().s(
version_id=master['version_id'],
preset_quality=subformat['quality'],
deposit_id=dep_uuid
).apply_async()
return subformat
def create_all_subformats(id_type, id_value):
"""Recreate all subformats."""
_validate(id_type=id_type)
video_deposit, dep_uuid = _resolve_deposit(id_type, id_value)
master, ar, w, h = _get_master_video(video_deposit)
transcodables = list(filter(lambda q: can_be_transcoded(q, ar, w, h),
get_all_distinct_qualities()))
# sequential (and immutable) transcoding to avoid MergeConflicts on bucket
if transcodables:
chain([
MaintenanceTranscodeVideoTask().si(
version_id=master['version_id'],
preset_quality=quality,
deposit_id=dep_uuid
)
for quality in transcodables]).apply_async()
return transcodables
def _resolve_deposit(id_type, id_value):
"""Return the deposit video."""
dep_uuid = id_value
if id_type == 'recid':
_, record = record_resolver.resolve(id_value)
dep_uuid = record['_deposit']['id']
return deposit_video_resolver(dep_uuid), dep_uuid
def _get_master_video(video_deposit):
"""Return master video."""
master = CDSVideosFilesIterator.get_master_video_file(video_deposit)
if not master:
raise Exception("No master video found for the given record")
return master, master['tags']['display_aspect_ratio'], \
int(master['tags']['width']), int(master['tags']['height'])
def _validate(id_type=None, quality=None):
"""Validate input parameters."""
if id_type not in id_types:
raise Exception('`id_type` param must be one of {0}'.format(id_types))
all_possible_qualities = get_all_distinct_qualities()
if quality and quality not in all_possible_qualities:
raise Exception('`quality` param must be one of {0}'.format(
all_possible_qualities)) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/Asana-Math/SansSerif/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.AsanaMathJax_SansSerif={directory:"SansSerif/Regular",family:"AsanaMathJax_SansSerif",id:"ASANAMATHSANSSERIF",32:[0,0,249,0,0,""],120224:[694,0,665,28,638,"377 694l261 -694h-92l-76 204h-291l-77 -204h-74l261 694h88zM448 260l-91 257c-23 66 -30 91 -32 105h-1c-7 -37 -38 -122 -59 -181l-65 -181h248"],120225:[694,0,665,97,610,"97 694h237c142 0 249 -82 249 -174c0 -76 -75 -134 -166 -157c106 -17 193 -88 193 -175c0 -98 -105 -188 -249 -188h-264v694zM180 391h116c138 0 209 68 209 128c0 66 -88 119 -199 119h-126v-247zM180 56h153c108 0 196 58 196 133c0 80 -98 141 -204 141h-145v-274"],120226:[716,22,637,67,588,"577 681l-15 -81c-42 27 -90 52 -169 52c-139 0 -237 -142 -237 -305c0 -153 89 -305 240 -305c69 0 114 10 186 61l6 -69c-71 -43 -110 -56 -193 -56c-195 0 -328 176 -328 369c0 196 135 369 327 369c66 0 120 -11 183 -35"],120227:[694,0,721,96,665,"96 694h262c172 0 307 -159 307 -353c0 -189 -136 -341 -306 -341h-263v694zM182 56h153c140 0 244 115 244 285c0 176 -104 297 -244 297h-153v-582"],120228:[691,0,596,94,554,"513 329h-330v-258h371v-71h-460v691h446v-65h-357v-236h330v-61"],120229:[691,0,568,94,526,"485 315h-302v-315h-89v691h432v-65h-343v-250h302v-61"],120230:[716,22,665,67,599,"599 299v-272c-31 -15 -110 -49 -208 -49c-181 0 -324 165 -324 369c0 199 138 369 326 369c94 0 144 -24 198 -49l-15 -81c-62 51 -120 66 -184 66c-136 0 -236 -139 -236 -305s100 -305 236 -305c25 0 73 3 129 20v176h-132v61h210"],120231:[694,0,707,94,613,"613 694v-694h-89v330h-341v-330h-89v694h89v-303h341v303h89"],120232:[694,0,276,94,183,"183 694v-694h-89v694h89"],120233:[694,22,471,42,388,"388 694v-545c0 -113 -72 -171 -175 -171c-84 0 -147 30 -171 42l14 75c34 -38 86 -56 128 -56c118 0 118 96 118 113v542h86"],120234:[694,0,693,96,651,"369 420l282 -420h-92l-245 364l-133 -137v-227h-85v694h85v-371l363 371h93"],120235:[694,0,540,94,499,"183 694v-628c89 0 227 -1 316 0v-66h-405v694h89"],120236:[694,0,874,100,774,"530 355l130 339h114v-694h-78v620l-1 1c-5 -24 -49 -145 -90 -251l-133 -348h-73l-148 389c-26 68 -66 177 -72 209h-1v-620h-78v694h113l191 -501c15 -42 27 -79 32 -100h1c11 47 58 171 93 262"],120237:[694,0,707,96,611,"220 694l245 -486c6 -11 48 -96 67 -137h1v623h78v-694h-124l-245 486c-6 11 -48 96 -67 137h-1v-623h-78v694h124"],120238:[716,22,735,56,679,"679 345c0 -210 -145 -367 -312 -367c-163 0 -311 155 -311 367c0 208 142 371 312 371c166 0 311 -161 311 -371zM368 43c114 0 222 122 222 317c0 179 -107 292 -223 292c-112 0 -222 -109 -222 -292c0 -192 105 -317 223 -317"],120239:[694,0,637,96,582,"352 289h-167v-289h-89v694h249c138 0 237 -95 237 -202c0 -112 -106 -203 -230 -203zM323 638h-141v-291h141c115 0 178 66 178 145c0 85 -68 146 -178 146"],120240:[716,125,735,56,679,"533 33l119 -158h-103l-86 121c-43 -17 -80 -18 -96 -18c-163 0 -311 155 -311 367c0 208 142 371 312 371c166 0 311 -161 311 -371c0 -105 -37 -227 -146 -312zM424 51l-101 143h89l79 -105c90 82 102 191 102 256c0 185 -106 307 -226 307c-116 0 -225 -120 -225 -307 c0 -186 108 -303 226 -303c29 0 55 9 56 9"],120241:[694,0,644,96,617,"419 326l198 -326h-89l-188 317h-158v-317h-86v694h241c143 0 252 -87 252 -186c0 -81 -67 -152 -170 -182zM182 378h144c118 0 182 59 182 130s-66 130 -182 130h-144v-260"],120242:[716,22,554,44,499,"465 666l-15 -80c-37 31 -85 64 -169 64c-92 0 -146 -60 -146 -119c0 -102 121 -119 139 -123c92 -23 125 -32 172 -82c22 -24 53 -72 53 -137c0 -110 -92 -211 -224 -211c-63 0 -143 14 -231 69l15 81c84 -71 174 -81 215 -81c91 0 146 66 146 132c0 43 -22 76 -42 93 c-28 25 -38 28 -123 49c-59 14 -103 26 -145 71c-40 42 -54 84 -54 127c0 99 87 197 226 197c73 0 128 -17 183 -50"],120243:[688,0,679,36,644,"385 624v-624h-89v624h-90c-12 0 -24 -1 -36 -1h-134v65h608v-65h-134c-12 0 -24 1 -36 1h-89"],120244:[694,22,686,94,593,"593 694v-461c0 -145 -113 -255 -247 -255c-135 0 -252 109 -252 255v461h89v-462c0 -135 82 -198 164 -198c84 0 168 66 168 198v462h78"],120245:[694,0,665,14,652,"652 694l-273 -694h-92l-273 694h93l173 -448c9 -24 52 -134 60 -174h1c9 46 84 239 100 280l133 342h78"],120246:[694,0,943,14,929,"929 694l-203 -694h-87l-115 401c-27 95 -51 190 -55 220h-1c-6 -52 -49 -201 -70 -274l-100 -347h-81l-203 694h86l128 -449c8 -33 35 -131 39 -171h1c5 56 55 231 77 308l90 312h81l98 -341c66 -228 72 -280 72 -281h1c5 51 53 219 79 308l91 314h72"],120247:[694,0,665,14,652,"369 366l283 -366h-104l-222 306l-216 -306h-96l269 366l-255 328h104l194 -261l188 261h96"],120248:[694,0,665,3,663,"663 694l-287 -417v-277h-86v278l-287 416h105l198 -290c17 -24 17 -26 36 -57l61 95c7 11 12 19 17 26l155 226h88"],120249:[694,0,610,56,560,"554 653l-385 -589h32c87 0 176 2 263 2h96v-66h-504v43l385 590h-136c-12 0 -24 -1 -36 -1h-200v62h485v-41"],120250:[461,11,479,44,399,"399 289v-289h-75v36c-58 -45 -152 -47 -166 -47c-87 0 -114 81 -114 124c0 88 127 130 277 132v43c0 74 -40 114 -87 114c-49 0 -99 -13 -151 -50l-6 66c47 27 92 43 157 43c92 0 165 -70 165 -172zM321 132v62c-43 -2 -205 -9 -205 -79c0 -42 45 -65 93 -65 c11 0 49 1 78 19c34 19 34 44 34 63"],120251:[694,11,515,82,480,"157 694v-298c60 49 122 59 161 59c94 0 162 -108 162 -232c0 -134 -86 -234 -186 -234c-67 0 -110 35 -134 57v-46h-78v694h75zM160 335v-221c25 -46 58 -64 97 -64c61 0 145 43 145 173c0 128 -76 171 -135 171c-55 0 -91 -35 -107 -59"],120252:[461,11,443,36,415,"410 418l-12 -65c-55 36 -89 44 -141 44c-106 0 -143 -96 -143 -174c0 -98 56 -170 139 -170c51 0 105 14 156 54l6 -67c-54 -37 -109 -51 -163 -51c-125 0 -216 109 -216 233c0 118 81 239 220 239c61 0 95 -9 154 -43"],120253:[694,11,515,36,434,"434 694v-694h-78v52c-56 -54 -115 -63 -148 -63c-95 0 -172 102 -172 233c0 129 78 233 177 233c37 0 93 -11 146 -55v294h75zM356 139v200c-22 32 -52 55 -97 55c-59 0 -145 -41 -145 -173c0 -124 73 -171 135 -171c26 0 51 8 74 28c32 29 33 47 33 61"],120254:[461,11,443,35,414,"414 219h-309c3 -98 67 -169 147 -169c23 0 89 4 152 56l6 -65c-39 -28 -96 -52 -159 -52c-120 0 -216 105 -216 237c0 129 90 235 201 235c71 0 178 -40 178 -242zM110 274h247c-13 92 -69 126 -121 126c-57 0 -110 -50 -126 -126"],120255:[705,0,304,27,347,"176 386v-386h-75v386h-74v58h74v90c0 100 72 171 166 171c37 0 68 -8 80 -11v-67c-34 15 -61 17 -81 17c-26 0 -93 -7 -93 -81v-119h113v-58h-110"],120256:[455,206,499,28,485,"352 391l-4 1c11 -15 33 -52 33 -100c0 -89 -70 -163 -160 -163c-37 0 -70 14 -91 29c-7 -9 -11 -25 -11 -36c0 -53 45 -53 58 -53h88c39 0 206 0 206 -139c0 -77 -99 -136 -222 -136c-122 0 -221 60 -221 135c0 26 12 72 64 103c-17 21 -26 50 -26 76c0 5 0 44 29 85 c-20 26 -33 62 -33 99c0 91 73 163 159 163c40 0 74 -15 95 -32c30 13 88 32 158 32l11 -63c-2 0 -41 3 -59 3c-24 0 -52 -4 -74 -4zM221 186c43 0 90 28 90 106s-47 106 -90 106c-37 0 -89 -23 -89 -106s52 -106 89 -106zM266 -3h-90c-78 0 -78 -65 -78 -68 c0 -47 71 -78 152 -78c82 0 151 33 151 77c0 69 -113 69 -135 69"],120257:[694,0,515,81,435,"435 298v-298h-78v291c0 32 -1 103 -97 103c-76 0 -101 -83 -101 -149v-245h-78v694h75v-311c32 41 80 72 142 72c122 0 137 -91 137 -157"],120258:[680,0,237,74,163,"163 680v-89h-89v89h89zM156 444v-444h-75v444h75"],120259:[680,205,265,-61,184,"184 680v-89h-89v89h89zM-61 -171l15 61c22 -20 48 -31 77 -31c28 0 78 9 78 73v512h75v-503c0 -94 -62 -146 -125 -146c-26 0 -72 6 -120 34"],120260:[694,0,487,84,471,"281 272l190 -272h-82l-156 224l-80 -83v-141h-69v694h72v-458l203 208h90"],120261:[694,0,237,81,156,"156 694v-694h-75v694h75"],120262:[455,0,793,81,713,"713 298v-298h-78v291c0 38 -5 103 -98 103c-74 0 -101 -81 -101 -149v-245h-78v291c0 38 -5 103 -98 103c-74 0 -101 -81 -101 -149v-245h-78v450h72v-72c27 36 74 77 146 77c32 0 102 -6 128 -82c37 54 89 82 149 82c121 0 137 -90 137 -157"],120263:[455,0,515,81,435,"435 298v-298h-78v291c0 32 -1 103 -97 103c-76 0 -101 -83 -101 -149v-245h-78v450h72v-71c6 8 55 76 145 76c122 0 137 -91 137 -157"],120264:[461,11,499,30,469,"469 220c0 -130 -100 -231 -220 -231c-117 0 -219 99 -219 231c0 131 97 241 220 241c119 0 219 -107 219 -241zM249 53c74 0 142 59 142 177s-72 170 -142 170c-65 0 -141 -49 -141 -170s72 -177 141 -177"],120265:[455,194,515,82,480,"160 46v-240h-78v638h75v-49c46 39 104 60 164 60c96 0 159 -112 159 -232c0 -136 -88 -234 -185 -234c-60 0 -104 28 -135 57zM160 333v-201c0 -18 0 -20 10 -35c20 -27 47 -47 86 -47c75 0 146 70 146 173c0 92 -60 168 -136 168c-43 0 -82 -23 -106 -58"],120266:[455,194,515,36,434,"434 455v-649h-78v244c-42 -38 -95 -61 -151 -61c-96 0 -169 105 -169 232c0 135 86 234 186 234c78 0 127 -55 137 -68v68h75zM359 140v140c0 40 -31 111 -100 111c-73 0 -145 -68 -145 -170c0 -93 60 -171 136 -171c36 0 73 17 99 57c10 13 10 15 10 33"],120267:[455,0,340,82,327,"157 214v-214h-75v450h70v-91c31 46 86 95 175 96v-67c-98 -2 -170 -74 -170 -174"],120268:[461,11,382,28,360,"360 128c0 -53 -26 -139 -164 -139c-81 0 -136 28 -168 44l12 68c18 -12 74 -49 155 -49c15 0 94 0 94 69c0 53 -58 65 -94 72c-55 11 -77 15 -108 37c-25 19 -54 51 -54 100c0 131 141 131 166 131c51 0 92 -8 144 -32l-12 -65c-57 34 -110 38 -142 38 c-13 0 -85 0 -85 -62c0 -49 46 -59 101 -70c50 -9 77 -15 116 -50c2 -3 39 -37 39 -92"],120269:[571,11,360,19,332,"175 386v-250c0 -53 14 -83 51 -83c32 0 64 12 90 33l16 -59c-25 -13 -76 -38 -146 -38c-69 0 -83 70 -83 130v267h-84v58h87v127h69v-127h141v-58h-141"],120270:[444,11,515,81,435,"435 444v-444h-75v48c-49 -43 -120 -59 -173 -59c-83 0 -106 48 -106 120v335h78v-331c0 -43 7 -69 78 -69c64 0 120 35 120 110v290h78"],120271:[444,0,460,14,446,"446 444l-172 -444h-88l-172 444h78l139 -391h1c-4 -6 87 246 139 391h75"],120272:[444,0,682,14,668,"230 0h-79l-137 444h76l106 -389h1l106 389h70l111 -390h1l110 390h73l-137 -444h-88l-105 388"],120273:[444,0,460,0,460,"260 229l200 -229h-83l-150 188l-145 -188h-82l194 229l-188 215h83l138 -165l134 165h82"],120274:[444,205,460,14,446,"446 444l-218 -535c-41 -102 -78 -114 -113 -114c-22 0 -50 3 -72 8l-6 65c30 -12 62 -16 78 -16c36 0 49 35 93 148l-194 444h79l145 -387l133 387h75"],120275:[444,0,433,28,402,"400 405l-266 -344h268v-61h-374v41l267 345h-253v58h358v-39"],120276:[694,0,732,42,690,"474 651l211 -604c5 -13 5 -15 5 -20c0 -27 -25 -27 -39 -27h-54c-48 0 -55 20 -67 55l-34 96h-277l-39 -111c-14 -40 -45 -40 -63 -40h-36c-14 0 -39 0 -39 27c0 5 0 7 5 20l211 604c15 43 44 43 64 43h88c20 0 49 0 64 -43zM249 237h217l-108 330h-1"],120277:[694,0,732,91,671,"139 694h246c42 0 255 0 255 -180c0 -78 -53 -130 -145 -151c37 -6 176 -28 176 -169c0 -181 -184 -194 -258 -194h-274c-37 0 -48 12 -48 48v598c0 37 12 48 48 48zM232 397h116c25 0 156 0 156 114c0 94 -116 97 -155 97h-117v-211zM232 86h145c48 0 155 7 155 110 c0 129 -139 129 -174 129h-126v-239"],120278:[716,22,701,61,647,"604 550c-20 0 -53 66 -176 66c-206 0 -220 -175 -220 -269c0 -144 42 -269 224 -269c87 0 122 16 177 56c13 11 15 11 19 11c10 0 12 -10 13 -18c1 -14 6 -66 6 -78c0 -10 -1 -11 -28 -26c-65 -35 -106 -45 -189 -45c-233 0 -369 111 -369 369c0 234 113 369 368 369 c86 0 135 -14 177 -29c28 -9 29 -10 29 -19c0 -3 0 -5 -2 -13l-15 -87c-3 -15 -8 -18 -14 -18"],120279:[694,0,793,91,732,"403 694c163 0 329 -70 329 -352c0 -196 -85 -342 -329 -342h-264c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h264zM235 86h129c213 0 224 143 224 256c0 112 -9 266 -225 266h-128v-522"],120280:[691,0,640,91,595,"238 311v-201h301c33 0 56 0 56 -48v-14c0 -35 -10 -48 -48 -48h-408c-37 0 -48 12 -48 48v595c0 37 12 48 48 48h393c41 0 48 -16 48 -48v-8c0 -48 -23 -48 -58 -48h-284v-182h263c35 0 48 -10 48 -47c0 -40 -17 -47 -48 -47h-263"],120281:[691,0,610,91,564,"238 299v-251c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v595c0 37 12 48 48 48h377c41 0 48 -16 48 -48v-8c0 -48 -23 -48 -57 -48h-269v-194h232c35 0 48 -10 48 -47c0 -40 -17 -47 -48 -47h-232"],120282:[716,22,732,61,659,"618 536c-19 0 -56 80 -190 80c-205 0 -220 -173 -220 -269c0 -108 21 -269 221 -269c28 0 66 4 94 11v141h-69c-26 0 -27 1 -27 27v40c0 26 1 27 27 27h178c26 0 27 -1 27 -27v-243c0 -18 0 -20 -3 -23c-5 -4 -99 -53 -231 -53c-240 0 -364 125 -364 369 c0 237 116 369 366 369c79 0 131 -13 193 -40c22 -10 29 -12 29 -22l-17 -100c-3 -15 -8 -18 -14 -18"],120283:[694,0,793,91,702,"702 646v-598c0 -35 -10 -48 -48 -48h-51c-41 0 -48 16 -48 48v266h-317v-266c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h51c41 0 48 -16 48 -48v-238h317v238c0 35 10 48 48 48h51c41 0 48 -16 48 -48"],120284:[694,0,329,92,239,"239 646v-598c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h51c41 0 48 -16 48 -48"],120285:[694,22,518,46,427,"427 646v-497c0 -117 -85 -171 -192 -171c-45 0 -105 9 -164 35c-19 8 -25 11 -25 20l15 88c2 10 4 19 14 19c6 0 8 -2 16 -12c14 -18 46 -50 104 -50c88 0 88 56 88 77v491c0 35 10 48 48 48h48c41 0 48 -16 48 -48"],120286:[694,0,762,91,701,"421 405l280 -376c0 -29 -14 -29 -35 -29h-60c-19 0 -34 0 -50 21l-224 300l-111 -105v-168c0 -35 -10 -48 -48 -48h-34c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h34c41 0 48 -16 48 -48v-276l322 304c21 20 41 20 58 20h61c14 0 34 0 34 -21c0 -4 0 -10 -12 -21"],120287:[694,0,579,91,534,"238 105h249c46 0 47 -25 47 -48v-9c0 -35 -10 -48 -48 -48h-347c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h51c41 0 48 -16 48 -48v-541"],120288:[694,0,976,91,886,"687 651c17 41 42 43 67 43h84c41 0 48 -16 48 -48v-598c0 -35 -10 -48 -48 -48h-27c-41 0 -48 16 -48 48v517h-1c-7 -26 -61 -162 -71 -186l-128 -314c-18 -43 -42 -43 -76 -43s-58 0 -76 43c-24 59 -185 455 -196 500h-1v-517c0 -35 -10 -48 -48 -48h-27 c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h83c18 0 50 0 67 -41l199 -504h1c8 35 177 451 198 502"],120289:[694,0,793,91,702,"303 654l275 -533h1v525c0 35 10 48 48 48h27c41 0 48 -16 48 -48v-598c0 -35 -10 -48 -48 -48h-93c-20 0 -50 0 -71 40l-275 533h-1v-525c0 -35 -10 -48 -48 -48h-27c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h93c20 0 50 0 71 -40"],120290:[716,22,793,61,732,"732 342c0 -205 -78 -364 -336 -364c-270 0 -335 177 -335 364c0 183 59 374 336 374c262 0 335 -173 335 -374zM397 58c188 0 188 211 188 300c0 90 0 280 -189 280c-188 0 -188 -192 -188 -280c0 -91 0 -300 189 -300"],120291:[694,0,701,91,641,"238 268v-220c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h250c154 0 252 -65 252 -216c0 -144 -94 -210 -251 -210h-152zM358 608h-123v-261h122c134 0 145 64 145 131c0 70 -13 130 -144 130"],120292:[716,106,793,61,732,"562 -106c-39 0 -36 2 -94 89c-32 -5 -55 -5 -72 -5c-270 0 -335 177 -335 364c0 183 59 374 336 374c262 0 335 -173 335 -374c0 -72 -7 -237 -134 -314l65 -84c13 -18 15 -20 15 -28c0 -22 -23 -22 -36 -22h-80zM416 58c0 8 -70 96 -70 113c0 23 22 23 36 23h61 c20 0 24 0 34 -11l55 -71c56 64 56 168 56 231c0 89 0 295 -192 295c-191 0 -191 -207 -191 -295c0 -89 0 -287 191 -287c4 0 20 0 20 2"],120293:[694,0,701,91,653,"483 320l170 -290c0 -30 -17 -30 -37 -30h-59c-22 0 -33 0 -47 24l-164 287h-111v-263c0 -35 -10 -48 -48 -48h-48c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h240c45 0 262 0 262 -197c0 -124 -95 -162 -158 -177zM235 383h130c123 0 137 54 137 113 c0 66 -21 112 -137 112h-130v-225"],120294:[716,22,610,48,549,"481 536c-18 0 -49 77 -176 77c-34 0 -119 -2 -119 -87c0 -57 62 -80 84 -85l96 -22c100 -23 183 -107 183 -219c0 -155 -94 -222 -244 -222c-26 0 -80 2 -147 23c-5 2 -112 39 -109 59l17 100c3 15 8 18 14 18c4 0 6 0 18 -12c50 -45 123 -79 207 -79c33 0 119 3 119 97 c0 86 -94 98 -131 107c-78 18 -125 28 -176 80c-19 20 -56 66 -56 137c0 187 155 208 244 208c51 0 110 -5 181 -40c25 -13 26 -14 26 -22c-4 -33 -12 -72 -17 -100c-2 -15 -8 -18 -14 -18"],120295:[688,0,732,40,692,"293 587l-201 -3c-28 0 -52 0 -52 48v8c0 37 12 48 48 48h556c41 0 48 -16 48 -48v-8c0 -48 -24 -48 -52 -48c-44 0 -156 3 -200 3v-539c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v539"],120296:[694,22,762,91,672,"672 646v-417c0 -217 -174 -251 -288 -251c-127 0 -293 41 -293 252v416c0 37 12 48 48 48h51c41 0 48 -16 48 -48v-420c0 -103 31 -162 147 -162c122 0 151 65 151 163v419c0 35 10 48 48 48h40c41 0 48 -16 48 -48"],120297:[694,0,732,27,705,"700 649l-223 -605c-16 -44 -44 -44 -65 -44h-92c-20 0 -49 0 -64 42l-224 607c-4 12 -5 13 -5 18c0 27 25 27 39 27h54c21 0 49 0 65 -43l189 -526l187 525c16 44 45 44 66 44h39c14 0 39 0 39 -27c0 -5 -1 -6 -5 -18"],120298:[694,0,1037,24,1014,"846 42c-11 -42 -39 -42 -60 -42h-72c-20 0 -48 0 -59 42l-138 525h-1c-2 -21 -39 -187 -137 -521c-12 -46 -40 -46 -61 -46h-66c-21 0 -49 0 -60 42l-164 604c-4 14 -4 18 -4 19c0 29 27 29 41 29h34c19 0 48 0 60 -43c17 -63 127 -471 135 -523h1l134 522 c12 44 40 44 61 44h66c29 0 49 -5 60 -44l137 -522h1l138 522c12 44 40 44 60 44h21c14 0 41 0 41 -29"],120299:[694,0,732,37,694,"433 368l252 -326c4 -5 9 -13 9 -20c0 -22 -23 -22 -36 -22h-83c-19 0 -35 0 -50 22l-167 240l-161 -238c-12 -18 -16 -24 -49 -24h-75c-14 0 -36 0 -36 23c0 7 237 333 246 345l-219 284c-10 12 -10 14 -10 20c0 22 21 22 35 22h83c21 0 34 0 51 -23l135 -185l131 187 c15 21 32 21 49 21h75c14 0 36 0 36 -23c0 -7 -207 -291 -216 -303"],120300:[694,0,732,24,708,"699 650l-261 -373v-230c0 -37 -13 -47 -48 -47h-48c-37 0 -48 12 -48 48v230l-261 373c-6 8 -9 13 -9 20c0 23 22 23 36 23h66c36 0 58 -8 81 -41l168 -250l169 253c27 38 58 38 79 38h49c14 0 36 0 36 -23c0 -7 -5 -16 -9 -21"],120301:[694,0,671,61,616,"600 607l-360 -502c92 2 231 0 319 0c34 0 57 0 57 -48v-9c0 -35 -10 -48 -48 -48h-459c-37 0 -48 12 -48 48v8c0 20 0 22 10 35l362 505h-296c-40 0 -61 0 -61 49c0 32 6 49 48 49h438c47 0 48 -23 48 -50c0 -22 0 -24 -10 -37"],120302:[475,11,524,31,464,"258 409c-113 0 -151 -73 -166 -73c-7 0 -12 5 -13 11c-1 8 -7 66 -7 73c0 11 5 14 26 23c67 28 117 32 164 32c50 0 202 -1 202 -159v-268c0 -35 -10 -48 -48 -48h-37c-41 0 -48 16 -48 48v20c-47 -69 -122 -79 -156 -79c-88 0 -144 74 -144 144c0 147 262 149 297 149 v35c0 59 -16 92 -70 92zM328 154v87c-28 -1 -167 -8 -167 -106c0 -38 27 -74 69 -74c3 0 98 0 98 93"],120303:[694,11,560,61,523,"194 646v-233c9 10 59 56 141 56c188 0 188 -190 188 -238c0 -50 0 -242 -213 -242c-16 0 -67 0 -113 50c-1 -39 -31 -39 -48 -39h-40c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h37c41 0 48 -16 48 -48zM197 362v-262c11 -14 36 -39 78 -39c112 0 112 95 112 169 c0 75 0 167 -101 167c-46 0 -76 -23 -89 -35"],120304:[475,11,487,37,457,"277 67c108 0 147 66 162 66c10 0 12 -10 12 -13c2 -13 6 -59 6 -70c0 -10 -1 -11 -29 -26c-67 -34 -127 -35 -154 -35c-151 0 -237 75 -237 241c0 175 93 245 242 245c70 0 100 -10 148 -32c17 -8 24 -12 24 -21c0 -5 -10 -61 -12 -69c-1 -4 -3 -13 -13 -13 c-21 0 -37 57 -146 57c-102 0 -107 -84 -107 -165c0 -64 0 -165 104 -165"],120305:[694,11,560,37,499,"499 646v-598c0 -35 -10 -48 -48 -48h-40c-34 0 -48 10 -48 47c-43 -46 -86 -58 -128 -58c-198 0 -198 193 -198 239c0 49 0 241 203 241c31 0 81 -7 126 -50v227c0 35 10 48 48 48h37c41 0 48 -16 48 -48zM363 125v241c-22 21 -49 31 -78 31c-112 0 -112 -94 -112 -169 s0 -167 101 -167c12 0 35 2 60 18c29 20 29 29 29 46"],120306:[475,11,510,31,479,"287 61c106 0 153 68 168 68c9 0 10 -1 14 -32c1 -8 4 -37 4 -44c0 -12 -7 -15 -29 -26c-66 -33 -123 -38 -161 -38c-157 0 -252 80 -252 244c0 152 79 242 239 242c149 0 209 -86 209 -215c0 -45 -25 -46 -48 -46h-272c3 -64 15 -153 128 -153zM160 267h208 c-3 78 -18 136 -98 136c-87 0 -107 -75 -110 -136"],120307:[705,0,335,30,381,"368 595c-16 0 -23 38 -87 38c-65 0 -65 -40 -65 -66v-109h51c12 0 48 0 48 -36s-36 -36 -48 -36h-48v-338c0 -35 -10 -48 -48 -48h-37c-37 0 -48 12 -48 48v338c-22 0 -56 0 -56 36s35 36 56 36v80c0 150 120 167 195 167c15 0 36 0 65 -6c34 -7 35 -8 35 -32v-46 c0 -16 0 -26 -13 -26"],120308:[469,206,549,25,534,"200 92h48c135 0 276 0 276 -156c0 -142 -191 -142 -250 -142c-47 0 -114 1 -171 27c-63 29 -78 81 -78 112c0 64 52 82 67 88c-21 27 -27 60 -27 79c0 5 0 44 27 81c-38 33 -49 83 -49 123c0 165 159 165 201 165c43 0 95 -4 142 -35c38 23 88 35 119 35 c18 0 18 -2 22 -26c2 -10 7 -34 7 -38c0 -7 -5 -13 -13 -13c-3 0 -10 2 -14 4c-41 11 -76 11 -93 11c9 -11 31 -46 31 -104c0 -165 -159 -165 -201 -165c-21 0 -80 0 -127 26c-1 -6 -2 -8 -2 -15c0 -26 17 -41 21 -45c14 -12 28 -12 64 -12zM244 210c70 0 73 39 73 94 s-3 93 -73 93s-73 -39 -73 -94s3 -93 73 -93zM298 -17h-116c-23 0 -54 0 -54 -47c0 -70 107 -70 147 -70c32 0 146 0 146 67c0 36 -42 50 -123 50"],120309:[694,0,560,60,500,"500 325v-277c0 -35 -10 -48 -48 -48h-40c-41 0 -48 16 -48 48v275c0 51 -10 74 -58 74c-52 0 -110 -41 -110 -128v-221c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h37c41 0 48 -16 48 -48v-274h1c14 28 60 97 160 97c102 0 146 -41 146 -144"],120310:[695,0,254,54,201,"102 695h51c48 0 48 -21 48 -69c0 -47 0 -68 -48 -68h-52c-37 0 -47 13 -47 45v46c0 40 16 46 48 46zM194 410v-362c0 -35 -10 -48 -48 -48h-37c-37 0 -48 12 -48 48v362c0 37 12 48 48 48h37c41 0 48 -16 48 -48"],120311:[695,205,285,-71,224,"224 647v-41c0 -35 -10 -48 -48 -48h-51c-41 0 -48 16 -48 48v41c0 35 10 48 48 48h51c41 0 48 -16 48 -48zM-46 -99c8 0 30 -22 75 -22c62 0 62 40 62 66v465c0 35 10 48 48 48h37c41 0 48 -16 48 -48v-442c0 -99 -39 -173 -149 -173c-63 0 -146 28 -146 47 c0 3 2 11 9 33c5 19 6 26 16 26"],120312:[694,0,529,69,497,"313 272l174 -228c10 -13 10 -20 10 -22c0 -22 -22 -22 -36 -22h-48c-29 0 -36 3 -53 25l-130 170l-47 -43v-104c0 -35 -10 -48 -48 -48h-18c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h21c41 0 48 -16 48 -48v-343l144 134c23 21 42 21 60 21h58c14 0 34 0 34 -21 c0 -8 -3 -11 -15 -22"],120313:[694,0,254,61,194,"194 646v-598c0 -35 -10 -48 -48 -48h-37c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h37c41 0 48 -16 48 -48"],120314:[469,0,865,60,806,"191 366c18 38 66 103 164 103c42 0 127 -3 143 -99c36 66 91 99 162 99c102 0 146 -41 146 -144v-277c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v275c0 53 -11 74 -59 74c-49 0 -110 -38 -110 -128v-221c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v275 c0 53 -11 74 -59 74c-49 0 -110 -38 -110 -128v-221c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v368c0 37 12 48 48 48h34c41 0 48 -16 48 -48v-50h1"],120315:[469,0,560,60,500,"500 325v-277c0 -35 -10 -48 -48 -48h-40c-41 0 -48 16 -48 48v275c0 51 -10 74 -58 74c-52 0 -110 -41 -110 -128v-221c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v368c0 37 12 48 48 48h34c41 0 48 -16 48 -48v-50h1c13 27 58 103 163 103c102 0 146 -41 146 -144 "],120316:[475,11,549,31,518,"518 225c0 -164 -87 -236 -244 -236s-243 73 -243 236c0 161 77 250 244 250c163 0 243 -86 243 -250zM275 67c107 0 107 91 107 170c0 78 0 166 -108 166c-107 0 -107 -89 -107 -166c0 -80 0 -170 108 -170"],120317:[469,194,560,61,523,"197 39v-185c0 -35 -10 -48 -48 -48h-40c-37 0 -48 12 -48 48v556c0 37 12 48 48 48h37c34 0 48 -10 48 -47c36 34 89 58 148 58c181 0 181 -189 181 -238c0 -50 0 -242 -212 -242c-14 0 -69 0 -114 50zM197 358v-240c0 -18 0 -23 24 -40c12 -9 30 -17 53 -17 c113 0 113 128 113 168c0 44 -1 162 -103 162c-43 0 -76 -23 -87 -33"],120318:[469,194,560,37,499,"499 421v-567c0 -35 -10 -48 -48 -48h-40c-41 0 -48 16 -48 48v194c-34 -39 -71 -59 -124 -59c-186 0 -202 161 -202 239c0 80 16 241 209 241c17 0 76 0 120 -67v19c0 35 10 48 48 48h37c41 0 48 -16 48 -48zM366 115v198c0 12 0 30 -26 55c-18 19 -39 23 -55 23 c-112 0 -112 -124 -112 -164c0 -39 0 -166 102 -166c46 0 73 29 91 54"],120319:[469,0,371,61,356,"194 230v-182c0 -35 -10 -48 -48 -48h-37c-37 0 -48 12 -48 48v368c0 37 12 48 48 48h32c41 0 48 -16 48 -48v-65h1c47 108 120 118 148 118c18 0 18 -7 18 -27v-47c0 -26 -2 -26 -20 -27c-63 -4 -142 -43 -142 -138"],120320:[475,11,420,31,396,"353 347c-15 0 -44 57 -145 57c-39 0 -72 -8 -72 -49c0 -39 40 -47 95 -57c42 -8 77 -14 117 -50c18 -16 48 -51 48 -107c0 -148 -126 -152 -177 -152c-17 0 -58 0 -113 17c-1 1 -75 24 -75 42l14 81c1 6 3 15 13 15c18 0 50 -77 161 -77c39 0 78 7 78 54 c0 79 -135 39 -211 108c-27 24 -49 54 -49 103c0 143 134 143 179 143c89 0 162 -29 162 -46l-12 -69c-1 -4 -3 -13 -13 -13"],120321:[589,11,403,20,373,"373 47c0 -23 -102 -58 -180 -58c-69 0 -105 43 -105 144v253h-20c-12 0 -48 0 -48 36s37 36 48 36h23v83c0 37 12 48 48 48h31c41 0 48 -16 48 -48v-83h89c12 0 48 0 48 -36s-36 -36 -48 -36h-89v-236c0 -66 15 -83 39 -83c56 0 82 37 92 37c9 0 11 -6 16 -26 c6 -21 8 -28 8 -31"],120322:[458,11,560,60,500,"500 410v-362c0 -35 -10 -48 -48 -48h-37c-41 0 -48 16 -48 48v26c-47 -85 -118 -85 -145 -85c-104 0 -162 30 -162 137v284c0 37 12 48 48 48h40c41 0 48 -16 48 -48v-293c0 -53 6 -67 62 -67c71 0 106 57 106 123v237c0 35 10 48 48 48h40c41 0 48 -16 48 -48"],120323:[458,0,499,26,473,"468 411l-119 -369c-14 -42 -45 -42 -63 -42h-73c-18 0 -49 0 -63 42l-119 369c-5 14 -5 16 -5 19c0 28 25 28 40 28h24c48 0 55 -22 62 -43l99 -302l99 302c9 28 20 43 62 43h21c15 0 40 0 40 -28c0 -3 0 -5 -5 -19"],120324:[458,0,743,24,719,"719 430l-107 -384c-10 -34 -20 -46 -63 -46h-68c-50 0 -55 19 -66 55c-13 47 -57 205 -63 262l-1 -1c-4 -52 -40 -178 -63 -261c-11 -40 -19 -55 -66 -55h-28c-25 0 -51 0 -64 46l-106 384c3 28 24 28 43 28h20c41 0 52 -11 62 -45l80 -283c35 117 79 290 88 305 c14 23 37 23 52 23c46 0 53 -23 59 -45l85 -300c15 57 59 213 81 290c14 52 26 55 73 55c31 0 49 0 52 -28"],120325:[458,0,499,24,474,"247 328c96 130 91 130 131 130h43c15 0 35 0 35 -22c0 -5 0 -9 -10 -21l-145 -178l162 -194c7 -9 11 -13 11 -21c0 -22 -21 -22 -35 -22h-44c-21 0 -33 0 -47 20l-102 144l-86 -128c-22 -33 -24 -36 -57 -36h-43c-14 0 -36 0 -36 22c0 8 4 13 10 20l158 195l-151 180 c-7 8 -10 12 -10 19c0 22 22 22 35 22h44c23 0 34 -1 51 -22"],120326:[458,205,499,29,473,"49 -95c11 0 30 -25 77 -25c68 0 77 68 97 120l-189 416c-2 5 -5 12 -5 16c0 26 25 26 39 26h26c50 0 57 -15 85 -78l89 -238h1l89 293c15 23 37 23 53 23h22c13 0 40 0 40 -27c0 -5 0 -7 -5 -20l-163 -474c-8 -24 -57 -142 -181 -142c-16 0 -70 2 -79 14 c-3 4 -9 73 -9 81c0 12 8 15 13 15"],120327:[458,0,475,31,441,"426 382l-244 -307h199c29 0 60 0 60 -38c0 -37 -35 -37 -48 -37h-314c-17 0 -48 0 -48 42c0 16 2 21 12 34l244 307h-182c-27 0 -59 0 -59 37c0 38 35 38 48 38h296c17 0 48 0 48 -42c0 -16 -2 -21 -12 -34"],120328:[694,0,665,28,638,"525 694l113 -694h-92l-33 204h-291l-120 -204h-74l408 694h89zM457 625l-201 -365h247"],120329:[694,0,665,97,696,"245 694h237c126 0 214 -66 214 -149c0 -90 -99 -158 -202 -182c90 -14 159 -72 159 -148c0 -103 -130 -215 -292 -215h-264zM263 391h116c152 0 238 82 238 143c0 60 -76 104 -175 104h-126zM192 56h153c123 0 227 74 227 152c0 70 -78 122 -177 122h-145"],120330:[716,22,637,131,722,"722 681l-32 -81h-1c-51 41 -104 52 -158 52c-161 0 -310 -193 -310 -388c0 -109 52 -222 184 -222c85 0 127 18 199 61l-9 -69c-89 -50 -142 -56 -204 -56c-179 0 -260 141 -260 281c0 225 192 457 415 457c87 0 136 -19 176 -35"],120331:[694,0,721,96,747,"244 694h262c150 0 241 -121 241 -268c0 -222 -191 -426 -388 -426h-263zM318 638l-124 -582h153c179 0 314 182 314 370c0 115 -63 212 -191 212h-152"],120332:[691,0,596,94,687,"253 329l-55 -258h371l-15 -71h-460l147 691h446l-14 -65h-357l-50 -236h330l-13 -61h-330"],120333:[691,0,568,94,673,"250 315l-67 -315h-89l147 691h432l-14 -65h-343l-53 -250h302l-13 -61h-302"],120334:[716,22,665,131,733,"663 299l-58 -272c-90 -39 -165 -49 -218 -49c-160 0 -256 127 -256 281c0 227 195 457 414 457c71 0 128 -13 188 -50l-32 -80c-44 42 -91 66 -170 66c-160 0 -310 -193 -310 -388c0 -125 66 -222 180 -222c40 0 91 8 123 18c10 3 11 4 14 19l34 159h-132l12 61h211"],120335:[694,0,707,94,761,"761 694l-148 -694h-89l70 330h-341l-70 -330h-89l148 694h89l-65 -303h341l65 303h89"],120336:[694,0,276,94,331,"331 694l-148 -694h-89l148 694h89"],120337:[694,22,471,46,536,"536 694l-117 -550c-14 -68 -73 -166 -211 -166c-85 0 -138 30 -162 43l30 74c41 -54 96 -56 116 -56c28 0 120 7 142 111l116 544h86"],120338:[694,0,693,96,785,"458 420l193 -420h-92l-167 364l-163 -137l-48 -227h-85l148 694h85l-79 -371l441 371h94"],120339:[694,0,540,94,513,"331 694l-134 -628c89 0 227 -1 316 0l-14 -66h-405l148 694h89"],120340:[694,0,874,100,922,"922 694l-148 -694h-78l132 620l-352 -598h-72c-12 71 -91 534 -93 597h-1l-132 -619h-78l148 694h113l96 -600h1l349 600h115"],120341:[694,0,707,96,759,"759 694l-148 -694h-125l-179 619h-1l-132 -619h-78l148 694h124l179 -619h1l133 619h78"],120342:[716,22,735,119,762,"762 436c0 -241 -202 -458 -399 -458c-147 0 -244 120 -244 277c0 229 196 461 401 461c135 0 242 -107 242 -280zM377 43c158 0 299 214 299 401c0 129 -73 208 -170 208c-144 0 -295 -171 -295 -389c0 -147 78 -220 166 -220"],120343:[694,0,637,96,690,"246 289l-61 -289h-89l148 694h249c121 0 197 -74 197 -166c0 -125 -138 -239 -276 -239h-168zM318 638l-62 -291h141c138 0 212 94 212 174c0 75 -60 117 -150 117h-141"],120344:[716,125,735,119,762,"522 -125l-60 121c-38 -14 -75 -18 -99 -18c-147 0 -244 120 -244 277c0 229 196 461 401 461c135 0 242 -107 242 -280c0 -121 -55 -290 -222 -403l85 -158h-103zM453 194l57 -105c107 78 166 221 166 346c0 146 -81 217 -170 217c-158 0 -301 -201 -301 -396 c0 -123 67 -214 172 -214c20 0 39 3 58 9l-70 143h88"],120345:[694,0,644,96,700,"488 326l129 -326h-89l-120 317h-159l-67 -317h-86l148 694h241c127 0 215 -69 215 -158c0 -77 -71 -171 -212 -210zM318 638l-56 -260h145c141 0 212 84 212 152c0 55 -45 108 -158 108h-143"],120346:[716,22,554,54,607,"607 667l-32 -81c-44 46 -91 64 -156 64c-96 0 -173 -68 -173 -140s64 -89 105 -100c91 -24 118 -31 151 -68c38 -43 42 -90 42 -112c0 -121 -121 -252 -274 -252c-83 0 -164 28 -216 70l32 80c57 -59 135 -81 198 -81c103 0 177 84 177 157c0 21 -6 46 -26 70 c-47 54 -160 29 -234 107c-30 32 -39 73 -39 102c0 110 116 233 272 233c70 0 118 -15 173 -49"],120347:[688,0,679,155,790,"518 623l-133 -623h-89l133 623h-261c-45 5 45 0 0 0l14 65h608l-14 -65h-258"],120348:[694,22,686,137,741,"741 694l-100 -466c-29 -137 -164 -250 -299 -250c-120 0 -205 84 -205 201c0 27 1 32 5 49l100 466h89l-100 -466c-1 -4 -5 -30 -5 -54c0 -106 70 -140 128 -140c86 0 182 68 209 194l100 466h78"],120349:[694,0,665,161,800,"800 694l-421 -694h-92l-126 694h93l101 -622h1l366 622h78"],120350:[694,0,943,161,1077,"1077 694l-351 -694h-87l-39 618h-1l-301 -618h-81l-56 694h86l36 -618h1c5 18 254 528 298 618h81l38 -620h1l302 620h73"],120351:[694,0,665,14,758,"447 366l205 -366h-104l-157 306l-281 -306h-96l346 366l-185 328h105l138 -261l244 261h96"],120352:[694,0,665,150,811,"811 694l-376 -416l-59 -278h-86l59 278l-199 416h105l160 -347l308 347h88"],120353:[694,0,610,56,701,"701 694c-9 -43 -8 -42 -17 -51l-501 -577h391l-14 -66h-504c1 4 6 27 6 29c3 13 3 15 12 24l502 580l-373 -1l13 62h485"],120354:[461,11,479,65,465,"462 294l-63 -294h-75c2 8 5 26 7 35c-64 -41 -149 -46 -175 -46c-87 0 -91 80 -91 96c0 136 210 156 283 159c4 0 21 1 24 2c3 2 15 60 15 77s-6 79 -68 79c-64 0 -115 -21 -161 -50c1 4 5 37 8 66c44 22 94 43 166 43c85 0 133 -58 133 -131c0 -13 0 -22 -3 -36z M349 130l13 64c-46 -2 -223 -10 -223 -90c0 -32 31 -54 81 -54c14 0 111 0 129 80"],120355:[694,11,515,82,535,"305 694l-64 -298c49 36 115 59 173 59c84 0 121 -82 121 -163c0 -164 -124 -303 -243 -303c-56 0 -94 24 -122 57l-10 -46h-78l148 694h75zM232 336l-47 -222c12 -40 39 -64 82 -64c113 0 189 114 189 227c0 77 -47 117 -106 117c-63 0 -108 -47 -118 -58"],120356:[461,11,443,77,499,"499 417l-26 -64c-45 33 -72 44 -132 44c-124 0 -185 -135 -185 -222c0 -71 38 -122 108 -122c73 0 129 29 168 54l-8 -67c-63 -37 -121 -51 -174 -51c-115 0 -173 89 -173 180c0 133 117 292 277 292c54 0 92 -6 145 -44"],120357:[694,11,515,76,582,"582 694l-148 -694h-78l11 52c-69 -56 -131 -63 -161 -63c-82 0 -130 74 -130 167c0 151 114 299 234 299c45 0 97 -16 134 -55l63 294h75zM385 134l40 191c2 8 2 11 2 13s-18 56 -84 56c-108 0 -188 -108 -188 -226c0 -96 64 -118 105 -118c65 0 120 58 125 84"],120358:[461,11,443,77,471,"333 461c63 0 138 -31 138 -154c0 -32 -6 -71 -10 -88h-310c-3 -13 -3 -24 -3 -37c0 -81 47 -132 114 -132c66 0 128 30 164 56c-2 -15 -5 -52 -7 -65c-39 -25 -103 -52 -170 -52c-106 0 -172 79 -172 182c0 146 127 290 256 290zM168 274h248c0 32 0 126 -95 126 c-55 0 -118 -46 -153 -126"],120359:[705,0,304,101,494,"258 386l-82 -386h-75l82 386h-74l12 58h74l21 95c19 89 102 166 201 166c25 0 52 -4 77 -11l-14 -67c-33 16 -64 17 -77 17c-51 0 -100 -27 -110 -77l-26 -123h113l-12 -58h-110"],120360:[455,206,499,11,571,"571 455c0 -61 -3 -62 -8 -62c-16 0 -36 2 -53 2c-24 0 -53 -4 -75 -4c-1 0 -4 3 -5 3c8 -14 17 -39 17 -68c0 -100 -96 -197 -197 -197c-29 0 -59 7 -87 30c-12 -14 -19 -32 -19 -50c0 -40 31 -40 48 -40h88c118 0 179 -35 179 -111c0 -111 -143 -164 -253 -164 c-118 0 -195 53 -195 117c0 37 28 90 88 121c-9 13 -13 35 -13 50c0 18 5 62 50 110c-11 20 -16 43 -16 66c0 98 94 197 198 197c27 0 60 -7 88 -32c39 15 94 32 165 32zM261 186c78 0 116 79 116 139c0 51 -33 73 -71 73c-78 0 -116 -80 -116 -139c0 -51 33 -73 71 -73z M265 -3h-90c-75 0 -93 -61 -93 -78c0 -39 59 -68 136 -68c87 0 169 37 169 87c0 48 -61 59 -122 59"],120361:[694,0,515,81,505,"499 304l-64 -304h-78l65 307c0 2 2 18 2 21c0 50 -34 66 -81 66c-65 0 -112 -57 -136 -167l-48 -227h-78l148 694h74l-65 -311c26 27 81 72 157 72c80 0 110 -39 110 -100c0 -15 0 -23 -6 -51"],120362:[680,0,237,81,307,"307 680l-19 -89h-89l19 89h89zM81 0l94 444h75l-94 -444h-75"],120363:[680,205,265,-97,329,"329 680l-19 -89h-89l19 89h89zM2 -141c83 0 92 72 92 72l109 513h75l-108 -508c-17 -79 -83 -141 -155 -141c-26 0 -72 6 -112 34l28 61c18 -21 42 -31 71 -31"],120364:[694,0,487,84,543,"339 272l132 -272h-82l-108 223l-98 -81l-30 -142h-69l148 694h72l-98 -458l248 208h89"],120365:[694,0,237,81,304,"304 694l-148 -694h-75l148 694h75"],120366:[455,0,793,81,783,"777 304l-64 -304h-78l65 307c0 2 2 18 2 21c0 53 -39 66 -81 66c-76 0 -117 -75 -137 -167l-48 -227h-78l65 307c0 2 2 18 2 21c0 53 -39 66 -81 66c-76 0 -117 -75 -137 -167l-48 -227h-78l96 450h72l-15 -71c22 24 81 76 161 76c86 0 106 -43 110 -83 c24 25 80 83 167 83c81 0 111 -39 111 -100c0 -23 -2 -33 -6 -51"],120367:[455,0,515,81,505,"499 304l-64 -304h-78l65 307c0 2 2 18 2 21c0 50 -34 66 -81 66c-65 0 -112 -57 -136 -167l-48 -227h-78l96 450h72l-15 -71c42 45 99 76 161 76c80 0 110 -39 110 -100c0 -15 0 -23 -6 -51"],120368:[461,11,499,71,522,"522 275c0 -154 -139 -286 -274 -286c-107 0 -177 79 -177 180c0 143 133 292 276 292c108 0 175 -85 175 -186zM261 53c114 0 185 127 185 227c0 74 -45 120 -112 120c-103 0 -183 -105 -183 -223c0 -97 63 -124 110 -124"],120369:[455,194,515,41,535,"170 46l-51 -240h-78l135 638h75l-10 -49c47 35 115 60 177 60c79 0 117 -77 117 -162c0 -166 -125 -304 -243 -304c-71 0 -107 40 -122 57zM228 318l-40 -188c0 -2 -3 -13 -3 -14c0 -4 14 -66 82 -66c92 0 187 102 187 215c0 75 -43 126 -105 126c-20 0 -60 -5 -104 -43 c-12 -12 -13 -13 -17 -30"],120370:[455,194,515,76,531,"531 455l-138 -649h-78l51 243c-32 -24 -93 -60 -163 -60c-80 0 -127 72 -127 166c0 162 125 300 242 300c70 0 110 -45 124 -67l14 67h75zM261 50c54 0 119 48 127 87l30 143l4 18c0 1 0 93 -80 93c-89 0 -186 -98 -186 -214c0 -68 37 -127 105 -127"],120371:[455,0,340,82,424,"424 455l-15 -67c-110 -3 -184 -89 -202 -154c-4 -15 -24 -111 -50 -234h-75l96 450h70l-19 -88h1c2 2 76 91 194 93"],120372:[461,11,382,36,434,"100 306c0 56 44 155 196 155c57 0 93 -10 138 -32l-25 -65h-1c-40 26 -79 38 -133 38c-100 0 -100 -71 -100 -73c0 -72 108 -39 177 -93c9 -8 38 -34 38 -82c0 -41 -31 -165 -196 -165c-96 0 -158 42 -158 45c0 4 3 10 4 13l22 54c50 -41 108 -49 144 -49 c110 0 110 79 110 80c0 79 -125 45 -183 102c-20 19 -33 42 -33 72"],120373:[571,11,360,101,410,"257 386l-55 -262c-1 -6 -3 -14 -3 -24c0 -47 27 -47 38 -47c26 0 61 8 97 33l4 -59c-49 -23 -101 -38 -154 -38c-38 0 -62 20 -62 77c0 38 50 263 63 320h-84l12 58h87l27 127h69l-27 -127h141l-12 -58h-141"],120374:[444,11,515,99,529,"529 444l-94 -444h-75l10 48c-55 -41 -128 -59 -185 -59c-75 0 -86 39 -86 76c0 14 1 28 4 41l72 338h78l-71 -336c-3 -14 -3 -21 -3 -24c0 -28 17 -40 68 -40c87 0 133 61 142 104l62 296h78"],120375:[444,0,460,108,540,"540 444l-266 -444h-88l-78 444h78l56 -391l1 -1l223 392h74"],120376:[444,0,682,108,762,"419 388c-20 -68 -171 -352 -189 -388h-79l-43 444h76l24 -386l1 -1l188 387h71l28 -389h1l192 389h73l-231 -444h-87l-24 388h-1"],120377:[444,0,460,0,538,"309 229l151 -229h-83l-110 188l-185 -188h-82l243 229l-143 215h83l104 -165l168 165h83"],120378:[444,205,460,1,540,"540 444l-329 -532c-70 -113 -111 -117 -140 -117c-16 0 -43 2 -70 8l8 65c26 -12 58 -16 75 -16c25 0 41 17 70 61c8 12 54 83 54 89l-100 442h79l62 -386h1l215 386h75"],120379:[444,0,433,28,494,"477 396l-338 -334h276l-13 -62h-374c7 48 19 51 21 53l335 332h-260l12 59h358l-5 -28c-3 -10 -3 -11 -12 -20"],120380:[695,0,696,28,670,"552 695l118 -695h-155l-33 184h-241l-120 -184h-93l408 695h116zM439 544l-156 -265h187"],120381:[695,0,749,68,781,"217 695h265c176 0 299 -74 299 -168c0 -80 -139 -140 -283 -162c120 -13 212 -67 212 -137c0 -109 -155 -228 -349 -228h-293zM293 413h86c171 0 267 60 267 105c0 47 -89 82 -204 82h-98zM222 95h123c123 0 227 58 227 119c0 55 -78 95 -177 95h-117"],120382:[733,37,700,131,785,"785 688l-32 -148h-1c-51 66 -104 84 -158 84c-161 0 -310 -172 -310 -346c0 -102 52 -207 184 -207c85 0 127 17 199 57v-83c-93 -73 -148 -82 -213 -82c-222 0 -323 156 -323 310c0 226 221 460 478 460c87 0 136 -24 176 -45"],120383:[695,0,781,66,807,"215 695h291c187 0 301 -120 301 -266c0 -224 -221 -429 -448 -429h-293zM347 600l-123 -505h123c179 0 314 164 314 334c0 93 -63 171 -191 171h-123"],120384:[690,0,596,63,687,"283 308l-53 -187h339l-15 -121h-491l148 690h476l-14 -110h-327l-48 -168h298l-13 -104h-300"],120385:[690,0,568,63,673,"280 294l-67 -294h-150l148 690h462l-14 -110h-313l-51 -182h270l-13 -104h-272"],120386:[733,37,743,131,811,"726 321l-58 -275c-90 -66 -165 -83 -218 -83c-199 0 -319 131 -319 290c0 238 225 480 477 480c77 0 138 -17 203 -67l-47 -98c-44 35 -91 56 -170 56c-160 0 -310 -181 -310 -363c0 -107 66 -190 180 -190c40 0 91 6 123 14c-9 3 -9 4 -12 21l33 111h-179l15 104h282"],120387:[695,0,737,63,791,"791 695l-148 -695h-150l69 309h-279l-70 -309h-150l149 695h149l-63 -282h279l65 282h149"],120388:[695,0,442,93,497,"497 695l-252 -695h-152l252 695h152"],120389:[695,37,500,33,565,"565 695l-117 -537c-16 -80 -83 -195 -240 -195c-92 0 -149 43 -175 61l43 95c41 -51 96 -53 116 -53c22 0 96 7 113 111l116 518h144"],120390:[695,0,707,66,799,"491 421l192 -421h-151l-155 327l-120 -97l-48 -230h-143l150 695h141l-64 -303l384 303h122"],120391:[695,0,540,62,513,"361 695l-132 -584h284l-14 -111h-437l150 695h149"],120392:[695,0,874,100,922,"922 695l-122 -695h-131l100 464l43 79c-30 -52 -34 -58 -119 -195l-208 -338h-101l-64 362c-11 56 -28 145 -30 196h38l-124 -568h-104l107 695h154l87 -355c10 -62 27 -152 30 -208h-30c12 31 69 122 138 235l186 328h150"],120393:[695,0,733,69,785,"785 695l-148 -695h-178l-169 568h34l-124 -568h-131l149 695h177l170 -567h-36l126 567h130"],120394:[733,37,769,119,796,"796 438c0 -250 -201 -475 -398 -475c-168 0 -279 133 -279 307c0 230 198 463 405 463c152 0 272 -113 272 -295zM409 73c127 0 241 199 241 372c0 111 -59 179 -137 179c-116 0 -238 -153 -238 -347c0 -136 63 -204 134 -204"],120395:[695,0,694,66,747,"276 269l-61 -269h-149l149 695h278c156 0 254 -74 254 -166c0 -136 -167 -260 -333 -260h-138zM347 600l-60 -232h110c138 0 212 76 212 140c0 59 -60 92 -150 92h-112"],120396:[733,132,770,119,797,"489 -132l-20 70c-28 19 -56 25 -74 25c-166 0 -276 133 -276 306c0 230 197 464 404 464c153 0 274 -113 274 -295c0 -115 -55 -275 -224 -382l79 -188h-163zM461 298l72 -138c76 62 118 177 118 277c0 126 -66 187 -139 187c-130 0 -247 -180 -247 -354 c0 -114 55 -199 141 -199c3 0 6 2 9 6l-121 253"],120397:[695,0,701,66,757,"513 316l135 -316h-147l-118 296h-105l-67 -296h-145l149 695h270c161 0 272 -76 272 -175c0 -75 -82 -166 -244 -204zM347 600l-54 -200h114c141 0 212 64 212 115c0 43 -45 85 -158 85h-114"],120398:[733,37,579,36,632,"632 665l-47 -99c-54 40 -113 55 -193 55c-48 0 -87 -58 -87 -119c3 -30 75 -63 174 -127c65 -49 75 -102 75 -127c0 -137 -105 -285 -237 -285c-108 0 -213 37 -281 93l50 98c69 -54 162 -74 238 -74c52 0 89 77 89 144c7 45 -76 70 -185 138c-51 37 -66 84 -66 117 c0 120 101 254 237 254c94 0 159 -21 233 -68"],120399:[686,0,679,168,790,"548 575l-133 -575h-150l132 575h-229l14 111h608l-14 -111h-228"],120400:[695,37,774,137,829,"829 695l-100 -460c-32 -149 -178 -272 -325 -272c-156 0 -267 97 -267 232c0 29 2 34 8 52l129 448h149l-126 -462c-2 -3 -9 -24 -9 -43c0 -100 70 -132 128 -132c75 0 159 66 183 189l100 448h130"],120401:[695,0,685,129,820,"820 695l-421 -695h-143l-127 695h156l92 -522c3 9 189 307 325 522h118"],120402:[695,0,963,131,1097,"1097 695l-351 -695h-134l-36 507l-261 -507h-127l-57 695h146l29 -497l276 497h81l63 -494l258 494h113"],120403:[695,0,683,-19,776,"479 368l209 -368h-168l-138 268l-252 -268h-149l346 368l-180 327h161l119 -216l217 216h132"],120404:[695,0,682,120,828,"828 695l-363 -413l-60 -282h-145l57 276l-197 419h169l141 -290l276 290h122"],120405:[695,0,613,54,704,"704 609l-485 -497h355l-14 -112h-504l-2 90l483 500h-334l13 105h488v-86"],120406:[480,30,479,47,484,"331 134l8 40c-136 -9 -181 -40 -181 -70c0 -22 25 -35 62 -35c20 0 97 2 111 65zM480 290l-66 -309h-114l5 19c-55 -23 -115 -30 -149 -30c-87 0 -109 72 -109 115c0 163 248 175 311 177c4 18 10 53 10 61c0 14 -4 60 -49 60c-60 0 -106 -19 -151 -46l-39 -25l11 45 c2 5 6 50 8 63l1 10l9 5c45 22 98 45 174 45c94 0 152 -66 152 -150c0 -13 0 -24 -4 -40"],120407:[712,30,515,60,553,"268 435c51 27 104 39 146 39c99 0 139 -96 139 -182c0 -172 -129 -322 -261 -322c-47 0 -82 16 -111 39l-6 -28h-115l155 731h113zM249 327l-44 -212c9 -30 28 -46 62 -46c99 0 171 102 171 208c0 65 -38 98 -88 98c-13 0 -58 -3 -101 -48"],120408:[480,30,443,59,521,"521 424l-39 -100l-20 14c-39 28 -63 40 -121 40c-111 0 -166 -123 -166 -203c0 -61 30 -104 89 -104c68 0 120 27 158 51l33 22l-14 -115c-55 -32 -114 -59 -191 -59c-126 0 -191 99 -191 199c0 141 123 311 295 311c59 0 100 -8 155 -48"],120409:[712,30,515,58,605,"366 138l41 191c1 4 1 6 1 7c-3 9 -16 38 -65 39c-95 2 -169 -96 -169 -207c0 -86 55 -99 86 -99c56 0 103 51 106 69zM605 712l-156 -731h-115l6 29c-66 -39 -120 -40 -134 -40c-89 0 -148 79 -148 186c0 163 122 318 252 318c40 0 85 -12 123 -39l59 277h113"],120410:[480,30,443,59,490,"490 307c0 -34 -6 -74 -11 -93l-3 -13h-309v-19c0 -71 39 -113 95 -113c62 0 118 27 153 52l35 25l-6 -43c-3 -22 -3 -43 -8 -73c-48 -31 -112 -60 -187 -60c-116 0 -190 88 -190 201c0 154 133 309 274 309c77 0 157 -43 157 -173zM198 293h199c-2 37 -13 88 -76 88 c-39 0 -89 -29 -123 -88"],120411:[724,19,304,78,515,"273 367l-82 -386h-113l82 386h-74l20 96h74l18 80c20 96 110 181 219 181c27 0 56 -5 82 -12l16 -5l-21 -107l-22 11c-29 14 -57 15 -69 15c-46 0 -84 -24 -92 -63l-21 -100h113l-20 -96h-110"],120412:[474,224,499,-8,590,"265 -22h-90c-63 0 -75 -50 -75 -59c0 -29 63 -49 118 -49c89 0 150 37 150 68c0 19 -16 40 -103 40zM261 205c64 0 97 66 97 120c0 43 -28 54 -52 54c-64 0 -97 -67 -97 -120c0 -44 28 -54 52 -54zM590 455c-1 -43 2 -81 -27 -81c-18 0 -38 2 -53 2s-33 -2 -51 -3 c4 -14 7 -29 7 -47c0 -109 -104 -216 -216 -216c-29 0 -56 7 -82 23c-5 -12 -13 -41 9 -45c3 -1 7 -1 15 -1h88c143 0 198 -51 198 -129c0 -128 -159 -182 -272 -182c-124 0 -214 56 -214 135c0 42 29 95 82 129c-5 16 -6 33 -6 42c0 14 3 61 46 113c-8 18 -13 38 -13 63 c0 109 104 216 217 216c28 0 61 -7 91 -30c40 15 94 30 162 30h19v-19"],120413:[712,19,515,59,523,"517 300l-67 -319h-115l68 329c1 6 2 16 2 18c0 30 -14 47 -62 47c-71 0 -103 -84 -118 -152l-51 -242h-115l155 731h112l-59 -280c37 25 81 42 128 42c94 0 128 -51 128 -119c0 -11 1 -22 -6 -55"],120414:[698,19,237,59,330,"330 698l-27 -125h-127l27 125h127zM59 -19l101 482h113l-102 -482h-112"],120415:[698,223,265,-120,352,"352 698l-27 -125h-126l26 125h127zM75 -66l113 529h113l-113 -531c-18 -85 -90 -155 -173 -155c-49 0 -92 15 -122 37l-13 8l47 102l18 -22c35 -41 122 -31 130 32"],120416:[712,19,487,62,594,"361 267l139 -286h-123l-102 213l-75 -62l-32 -151h-106l155 731h110l-92 -427l212 178h147"],120417:[712,19,237,59,327,"327 712l-156 -731h-112l155 731h113"],120418:[474,19,793,59,801,"795 300l-67 -319h-116l69 329c1 6 2 16 2 18c0 31 -16 47 -62 47c-24 0 -88 -7 -119 -152l-51 -242h-115l68 329c1 6 2 16 2 18c0 31 -16 47 -62 47c-24 0 -87 -7 -119 -152l-51 -242h-115l103 488h110l-9 -40c32 23 78 45 132 45c77 0 107 -33 120 -66 c32 29 85 66 157 66c95 0 129 -51 129 -119c0 -22 -2 -33 -6 -55"],120419:[474,19,515,59,523,"517 300l-67 -319h-115l68 329c1 6 2 16 2 18c0 30 -14 47 -62 47c-71 0 -103 -84 -118 -152l-51 -242h-115l103 488h110l-9 -40c39 28 84 45 132 45c94 0 128 -51 128 -119c0 -11 1 -22 -6 -55"],120420:[480,30,499,53,540,"248 -30c-117 0 -195 88 -195 199c0 155 143 311 294 311c119 0 193 -95 193 -205c0 -163 -146 -305 -292 -305zM261 71c104 0 166 122 166 209c0 63 -36 101 -93 101c-90 0 -164 -93 -164 -204c0 -87 54 -106 91 -106"],120421:[474,213,515,18,553,"246 314l-39 -186c-1 -6 -2 -10 -3 -12c2 -10 12 -46 63 -47c84 -2 169 95 169 196c0 65 -35 107 -87 107c-47 0 -96 -35 -101 -49c0 -2 0 -1 -2 -9zM181 9l-47 -222h-116l143 676h113l-6 -29c41 22 94 40 150 40c82 0 135 -76 135 -181c0 -174 -130 -323 -261 -323 c-53 0 -88 20 -111 39"],120422:[474,213,515,58,554,"554 474l-146 -687h-116l47 222c-38 -22 -85 -39 -136 -39c-87 0 -145 76 -145 185c0 170 131 319 260 319c52 0 90 -23 114 -45l9 45h113zM261 69c49 0 103 45 108 72l34 159c0 12 -5 72 -61 72c-76 0 -167 -86 -167 -195c0 -60 31 -108 86 -108"],120423:[474,19,340,60,447,"447 474l-23 -105h-15c-99 -3 -168 -80 -185 -140l-49 -233l-3 -15h-112l103 488h108l-12 -55c36 27 93 59 165 60h23"],120424:[480,30,382,18,458,"296 480c59 0 97 -10 146 -34l16 -8l-36 -92h-20c-43 27 -74 38 -127 37c-73 -1 -81 -38 -81 -54c0 -52 98 -20 169 -79c31 -25 45 -57 45 -96c0 -51 -38 -185 -214 -184c-94 1 -176 39 -176 64c0 8 4 18 5 20l32 77l18 -16c79 -63 221 -61 225 16v1 c0 58 -116 28 -178 88c-38 37 -39 73 -39 86c0 69 56 174 215 174"],120425:[590,30,360,78,433,"220 120c-7 -48 5 -49 17 -49c22 0 54 7 87 30l27 19l6 -104l-11 -6c-51 -24 -106 -40 -162 -40c-46 0 -81 26 -81 96c0 30 31 177 59 301h-84l20 96h87l28 127h106l-27 -127h141l-20 -96h-141"],120426:[463,30,515,80,552,"552 463l-102 -482h-112l5 28c-54 -28 -115 -39 -158 -39c-83 0 -105 45 -105 95c0 14 1 30 5 45l75 353h116l-76 -359c-5 -27 -2 -33 10 -37c6 -2 19 -4 37 -4c78 0 116 54 123 89l66 311h116"],120427:[463,19,460,86,573,"573 463l-289 -482h-113l-85 482h116l51 -356l202 356h118"],120428:[463,19,682,87,792,"246 -8l-5 -11h-107l-47 482h115l20 -336l163 336h101l23 -341l168 341h115l-250 -482h-115l-21 328c-50 -111 -146 -289 -160 -317"],120429:[463,19,460,-47,585,"333 226l162 -245h-129l-103 177l-174 -177h-136l265 250l-152 232h128l96 -155l158 155h137"],120430:[463,223,460,-20,573,"226 -97c-74 -121 -122 -126 -155 -126c-17 0 -45 2 -74 8l-17 3l14 108l23 -11c24 -11 54 -14 67 -14c14 0 25 8 55 53c14 21 42 66 50 80l-105 459h119l56 -351l195 351h119"],120431:[463,19,433,7,516,"184 80h254l-21 -99h-410l3 22c12 68 26 64 26 64l303 299h-238l20 97h395l-9 -51c-3 -10 -4 -16 -17 -29"],120662:[694,0,732,42,690,"98 0c-29 0 -56 -1 -56 27c0 14 199 578 223 639c14 35 61 28 101 28c49 0 81 6 101 -28c21 -63 223 -625 223 -639c0 -34 -41 -27 -67 -27c-30 1 -65 -4 -80 29l-43 121h-284l-19 -57c-31 -88 -33 -93 -99 -93zM359 569c-7 -34 -92 -276 -112 -332h223"],120663:[694,0,732,91,671,"139 694h246c42 0 255 0 255 -180c0 -78 -53 -130 -145 -151c37 -6 176 -28 176 -169c0 -181 -184 -194 -258 -194h-274c-37 0 -48 12 -48 48v598c0 37 12 48 48 48zM232 397h116c25 0 156 0 156 114c0 94 -116 97 -155 97h-117v-211zM232 86h145c48 0 155 7 155 110 c0 129 -139 129 -174 129h-126v-239"],120664:[690,0,579,92,537,"483 586h-251c0 -271 2 -555 0 -561c-9 -28 -32 -25 -72 -25l-41 1c-6 2 -19 12 -27 27v636c2 5 6 19 26 26c3 1 238 0 389 0c42 -10 30 -88 24 -79c-10 -29 -38 -25 -48 -25"],120665:[694,-8,915,60,855,"392 694h133c17 -7 30 -14 41 -32c1 -3 286 -621 289 -635c0 -7 -2 -14 -8 -19h-779c-6 5 -8 12 -8 19c5 23 283 620 289 635c7 16 25 28 43 32zM653 130l-205 446l-205 -446h410"],120666:[691,0,640,91,595,"238 311v-201h301c33 0 56 0 56 -48v-14c0 -35 -10 -48 -48 -48h-408c-37 0 -48 12 -48 48v595c0 37 12 48 48 48h393c41 0 48 -16 48 -48v-8c0 -48 -23 -48 -58 -48h-284v-182h263c35 0 48 -10 48 -47c0 -40 -17 -47 -48 -47h-263"],120667:[694,0,671,61,616,"600 607l-360 -502h319c34 0 57 0 57 -48v-9c0 -35 -10 -48 -48 -48h-459c-37 0 -48 12 -48 48v8c0 20 0 22 10 35l362 505h-296c-40 0 -61 0 -61 49c0 32 6 49 48 49h438c47 0 48 -23 48 -50c0 -22 0 -24 -10 -37"],120668:[694,0,793,91,702,"702 646v-598c0 -35 -10 -48 -48 -48h-51c-41 0 -48 16 -48 48v266h-317v-266c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h51c41 0 48 -16 48 -48v-238h317v238c0 35 10 48 48 48h51c41 0 48 -16 48 -48"],120669:[716,22,854,62,792,"426 716c293 0 366 -175 366 -375c0 -163 -51 -232 -90 -271c-37 -37 -110 -92 -274 -92c-275 0 -366 152 -366 362c0 301 174 376 364 376zM427 55c183 0 229 121 229 285c0 188 -44 302 -231 302c-96 0 -230 -96 -230 -302c0 -151 41 -285 232 -285zM260 348 c0 25 -4 56 26 63h282c30 -8 26 -38 26 -62c0 -25 4 -55 -26 -63c-121 0 -277 -1 -282 0c-30 8 -26 38 -26 62"],120670:[694,0,329,92,239,"239 646v-598c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h51c41 0 48 -16 48 -48"],120671:[694,0,762,91,701,"421 405l280 -376c0 -29 -14 -29 -35 -29h-60c-19 0 -34 0 -50 21l-224 300l-111 -105v-168c0 -35 -10 -48 -48 -48h-34c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h34c41 0 48 -16 48 -48v-276l322 304c21 20 41 20 58 20h61c14 0 34 0 34 -21c0 -4 0 -10 -12 -21"],120672:[694,0,671,41,630,"335 694c40 0 86 6 99 -29c0 0 196 -618 196 -642c-20 -28 -134 -36 -146 4l-158 542c-11 -81 -155 -543 -157 -542c-16 -31 -52 -27 -74 -27c-21 0 -44 3 -54 23c0 24 196 642 196 642c9 25 45 33 98 29"],120673:[694,0,976,91,886,"687 651c17 41 42 43 67 43h84c41 0 48 -16 48 -48v-598c0 -35 -10 -48 -48 -48h-27c-41 0 -48 16 -48 48v517h-1l-199 -500c-18 -43 -42 -43 -76 -43s-58 0 -76 43c-24 59 -185 455 -196 500h-1v-517c0 -35 -10 -48 -48 -48h-27c-37 0 -48 12 -48 48v598 c0 37 12 48 48 48h83c18 0 50 0 67 -41l200 -504c-1 4 129 326 198 502"],120674:[694,0,793,91,702,"303 654l275 -533h1v525c0 35 10 48 48 48h27c41 0 48 -16 48 -48v-598c0 -35 -10 -48 -48 -48h-93c-20 0 -50 0 -71 40l-275 533h-1v-525c0 -35 -10 -48 -48 -48h-27c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h93c20 0 50 0 71 -40"],120675:[687,0,732,45,687,"64 571c-13 10 -12 39 -12 60c-4 18 4 57 27 56c3 1 349 0 575 0c32 -8 26 -48 26 -59c0 -12 5 -48 -26 -58c-3 0 -570 1 -590 1zM149 415h435c32 -8 26 -46 26 -57s6 -49 -26 -57c-232 0 -436 -5 -445 1c-22 13 -17 45 -17 58c0 20 1 62 27 55zM632 120 c13 0 42 4 52 -26c1 -10 17 -93 -24 -93h-586c-22 -8 -28 29 -28 56c0 15 -6 59 26 63h560"],120676:[716,22,793,61,732,"732 342c0 -205 -78 -364 -336 -364c-270 0 -335 177 -335 364c0 183 59 374 336 374c262 0 335 -173 335 -374zM397 58c188 0 188 211 188 300c0 90 0 280 -189 280c-188 0 -188 -192 -188 -280c0 -91 0 -300 189 -300"],120677:[690,2,793,92,700,"92 664c2 5 6 19 26 26c3 1 339 -1 558 -1c12 -3 21 -12 24 -23c2 -6 0 -335 0 -641c-7 -25 -31 -25 -70 -25s-62 0 -69 25l-2 571h-326l-1 -571c-9 -28 -32 -25 -72 -25c-11 -4 -67 -7 -67 21"],120678:[694,0,701,91,641,"238 268v-220c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v598c0 37 12 48 48 48h250c154 0 252 -65 252 -216c0 -144 -94 -210 -251 -210h-152zM358 608h-123v-261h122c134 0 145 64 145 131c0 70 -13 130 -144 130"],120679:[716,22,854,62,792,"426 716c293 0 366 -175 366 -375c0 -163 -51 -232 -90 -271c-37 -37 -110 -92 -274 -92c-275 0 -366 152 -366 362c0 301 174 376 364 376zM653 284h-457c17 -102 113 -229 231 -229c130 0 216 136 226 229zM191 413h462c-45 192 -134 219 -228 229 c-81 8 -217 -82 -234 -229"],120680:[693,-1,793,61,732,"62 667c2 5 6 19 26 26c3 1 375 -1 618 -1c30 -8 26 -40 26 -50c0 -13 7 -42 -22 -49l-439 -1c96 -113 200 -229 200 -245c-2 -5 -121 -160 -186 -238c1 0 262 2 376 2c35 0 61 1 69 -24c0 -11 12 -78 -18 -86h-623c-29 10 -27 28 -27 35c2 4 152 198 238 302 c-42 45 -208 238 -238 283v46"],120681:[688,0,732,40,692,"293 587l-201 -3c-28 0 -52 0 -52 48v8c0 37 12 48 48 48h556c41 0 48 -16 48 -48v-8c0 -48 -24 -48 -52 -48c-44 0 -156 3 -200 3v-539c0 -35 -10 -48 -48 -48h-51c-37 0 -48 12 -48 48v539"],120682:[715,0,854,61,792,"355 441c-5 9 5 167 -98 167c-12 0 -59 -4 -62 -52c-1 -22 -12 -29 -27 -36h-79c-29 10 -27 30 -27 39c32 246 371 156 365 -37c18 82 78 193 173 193c113 0 192 -74 192 -156c0 -35 -24 -35 -26 -39h-80c-45 22 -13 34 -39 67c-14 16 -31 21 -51 21 c-107 -24 -109 -216 -106 -176c0 -190 8 -405 7 -409c-11 -28 -46 -23 -72 -23c-37 0 -83 -4 -68 28"],120683:[695,0,793,62,731,"463 32c-6 -39 -25 -30 -72 -32l-38 1c-7 1 -27 9 -27 27v79c-37 0 -149 20 -207 80c-40 44 -57 99 -57 160c0 62 17 116 57 160c58 59 169 81 207 81v79c8 24 28 28 45 28h41c34 0 48 -10 48 -29v-78c21 -7 271 -3 271 -241c0 -237 -250 -235 -268 -241v-74zM592 347 c0 90 -41 148 -128 166c-2 0 -2 -1 -2 -166s0 -166 2 -166c85 17 128 73 128 166zM201 346c2 -133 64 -153 124 -164v330c-79 -19 -124 -70 -124 -166"],120684:[694,0,732,37,694,"433 368l252 -326c4 -5 9 -13 9 -20c0 -22 -23 -22 -36 -22h-83c-19 0 -35 0 -50 22l-167 240l-161 -238c-12 -18 -16 -24 -49 -24h-75c-14 0 -36 0 -36 23c0 7 1 8 10 20l236 325l-219 284c-10 12 -10 14 -10 20c0 22 21 22 35 22h83c21 0 34 0 51 -23l135 -185l131 187 c15 21 32 21 49 21h75c14 0 36 0 36 -23c0 -7 -1 -8 -10 -20"],120685:[695,0,854,62,793,"62 583c0 23 15 30 27 36c36 3 118 17 143 -46c46 -115 -34 -325 124 -386l1 480c12 36 51 27 67 27c14 0 58 7 67 -25c2 -6 3 -251 3 -482c83 31 108 126 108 239c0 99 8 161 57 185c14 7 62 5 108 6c16 -4 26 -18 26 -34c0 -9 -11 -39 -41 -43c-9 -16 -13 -52 -13 -114 c0 -155 -56 -289 -246 -318c0 -27 5 -98 -26 -106c-5 -2 -49 -1 -83 -1c-6 2 -17 5 -25 20l-3 87c-223 28 -241 202 -241 317c0 91 -8 121 -21 121c-29 4 -32 29 -32 37"],120686:[716,0,793,48,744,"397 643c-154 0 -193 -73 -193 -179c0 -81 50 -156 68 -202c30 -77 116 -205 45 -260c-5 -2 -136 -1 -240 -1c-32 11 -28 39 -28 51c0 11 -5 47 26 55c10 -1 143 -1 179 1c-59 102 -182 242 -192 353c-15 173 161 262 334 255c130 0 187 -25 226 -48 c103 -60 109 -162 109 -207c0 -137 -129 -246 -192 -353h153c10 0 41 4 50 -24c2 -5 2 -9 2 -29c0 -11 4 -46 -26 -53l-240 -1c-4 1 -16 4 -25 21c0 165 135 286 135 442c0 106 -38 179 -191 179"],120687:[694,-8,915,60,855,"392 8h133c17 7 30 14 41 32c1 3 286 621 289 635c0 7 -2 14 -8 19h-779c-6 -5 -8 -12 -8 -19c5 -23 283 -620 289 -635c7 -16 25 -28 43 -32zM653 572l-205 -446l-205 446h410"],120688:[469,13,742,47,720,"562 113c22 0 19 36 28 41h130c0 -63 -61 -165 -159 -165c-54 0 -104 30 -144 88c-17 -22 -65 -67 -126 -84c-43 -12 -241 -36 -244 236c0 72 9 138 59 188c19 19 62 52 143 52c36 0 109 -18 176 -99c3 14 32 93 34 96l3 3l134 -2c0 -17 -72 -204 -96 -256 c21 -51 47 -98 62 -98zM184 229c0 -71 0 -116 65 -116c54 0 91 77 106 97c-21 56 -49 135 -108 135c-66 0 -63 -54 -63 -116"],120689:[733,90,549,46,503,"278 -11c-58 0 -89 32 -95 37l-1 -116c-58 0 -132 -1 -134 0c0 277 -10 668 28 737c46 83 141 86 175 86c120 0 196 -47 196 -189c0 -25 -4 -85 -67 -129c52 -25 123 -71 123 -210c0 -144 -128 -216 -225 -216zM241 638c-62 0 -58 -57 -58 -137v-54c11 2 28 3 44 8 c95 26 84 91 84 99c-3 89 -39 84 -70 84zM275 92c52 0 91 48 91 113c0 121 -109 146 -183 152c0 -57 0 -145 1 -155c3 -22 18 -110 91 -110"],120690:[469,201,610,32,577,"390 206c0 -10 29 -104 29 -277c0 -44 -61 -146 -114 -129c-66 22 -96 74 -114 135c2 106 28 215 64 298c-16 66 -34 98 -54 98c-28 0 -32 -38 -33 -44h-136c6 100 114 182 170 182c50 0 101 -45 135 -110c56 56 164 98 240 102v-127c-105 -6 -187 -123 -187 -128"],120691:[719,10,518,46,475,"259 -10c-111 0 -213 46 -213 228c0 130 54 191 125 215l12 4l-17 9c-21 12 -87 46 -87 127c0 87 77 146 135 146c41 0 45 -3 165 -71c93 -53 96 -54 96 -61c0 -8 -28 -54 -29 -55c-2 -1 -4 -2 -7 -3c-14 0 -175 101 -197 108c-8 3 -16 4 -24 4c-26 0 -62 -24 -62 -66 c0 -18 8 -34 22 -46c3 -3 154 -92 170 -97c77 -26 124 -91 124 -214c0 -131 -53 -210 -173 -227c-9 -1 -25 -1 -40 -1zM180 165c0 -57 11 -102 59 -102c31 0 56 19 73 47c13 26 26 67 26 161c0 74 -22 101 -59 101c-55 0 -84 -62 -92 -107c-4 -23 -7 -67 -7 -100"],120692:[470,11,472,33,456,"34 164c0 25 -3 62 57 95c-24 28 -29 57 -29 70c0 112 125 139 164 139c66 14 112 -35 230 -124l-98 -96c-18 18 -90 84 -122 84c-22 0 -40 -10 -40 -25s16 -19 63 -21l32 -2v-70c-17 0 -62 1 -73 1c-30 0 -49 -2 -49 -25c0 -38 37 -69 76 -63c38 7 69 64 88 77l98 -90 c-36 -47 -137 -125 -196 -125c-76 0 -194 51 -201 175"],120693:[734,265,518,46,472,"65 734h125c4 -31 38 -50 56 -50c81 0 123 58 139 48c3 -2 72 -101 75 -107c-134 -98 -277 -232 -277 -373c0 -22 4 -92 57 -112c18 -7 55 4 116 -25c36 -18 116 -74 116 -184c0 -117 -130 -192 -224 -196l-1 120c12 16 78 -10 88 77c0 15 -12 68 -79 68 c-99 0 -210 93 -210 253c0 60 13 169 128 304c-86 35 -109 117 -109 177"],120694:[481,200,564,30,514,"150 294h-120c0 60 47 173 129 185c20 9 94 -7 117 -65c22 30 64 54 105 65c81 0 118 -74 130 -138c7 -32 2 -266 3 -541c-66 0 -130 -3 -133 2l-1 489c-6 19 -13 39 -33 39c-19 0 -27 -15 -34 -42l-1 -299l-138 -1c-1 151 1 341 -1 349c-2 11 -6 19 -9 19 c-7 0 -14 -43 -14 -62"],120695:[733,11,549,46,503,"46 359c0 165 56 374 228 374c183 0 229 -228 229 -372s-45 -372 -228 -372c-182 0 -229 226 -229 370zM272 595c-80 22 -92 -141 -88 -168h181c8 94 -33 180 -93 168zM274 126c55 0 86 43 92 198h-183c3 -106 18 -198 91 -198"],120696:[471,11,304,34,290,"34 471h139l-1 -357c12 -8 31 14 31 23h87c0 -31 -27 -148 -128 -148c-64 0 -111 52 -127 105"],120697:[471,12,531,62,547,"62 471h134c6 -4 3 -73 3 -147l57 53c82 76 87 82 160 82c15 0 49 -1 65 -1c1 -18 1 -105 0 -123c-21 0 -54 10 -68 10c-5 0 -8 -1 -12 -3c-5 -2 -104 -94 -104 -95c94 -92 250 -247 250 -249c0 -4 -10 -8 -12 -10h-178l-158 162v-162h-137v483"],120698:[734,1,549,19,530,"31 8c0 2 117 311 162 425c0 5 -49 108 -61 128c-7 12 -7 12 -17 10c-41 -12 -52 -40 -64 -40c-8 0 -12 7 -12 8c-2 8 -20 111 -20 115c0 15 85 80 152 80c21 0 47 -38 88 -119c61 -117 271 -603 271 -603c0 -15 -15 -12 -75 -12c-44 0 -71 1 -75 10l-108 245l-103 -254 c-59 0 -138 -8 -138 7"],120699:[470,204,610,35,618,"170 329c-1 -111 -20 -214 62 -214c47 0 58 49 61 62c1 7 0 161 1 286c6 9 72 3 137 3l2 -300c6 -22 17 -44 41 -44c9 0 39 23 44 61c2 16 -4 22 6 30h87c3 -12 7 -61 7 -73c0 -86 -69 -163 -164 -163c-73 0 -105 47 -121 68c8 -18 -61 -73 -101 -68c-12 0 -37 1 -61 19 c-1 0 -2 -142 -2 -196c-4 -6 -65 -4 -131 -4c-6 5 -1 318 -1 674h133v-141"],120700:[458,0,518,-12,500,"192 0c0 1 -85 251 -116 341c-14 41 -69 92 -88 105c0 4 3 9 7 11c2 0 132 -1 180 -1l103 -290c58 70 87 123 87 171c0 18 -10 92 -17 110c0 5 7 11 7 11c65 0 124 0 128 -5c2 -4 17 -91 17 -100c0 -110 -138 -296 -179 -353h-129"],120701:[760,211,518,46,472,"331 480c35 0 29 2 29 -66c0 -101 3 -57 -66 -78c-50 -12 -110 -46 -110 -116c0 -44 27 -71 53 -79c20 -9 56 3 119 -26c36 -18 116 -72 116 -181c0 -118 -72 -164 -162 -139l-1 116c10 13 26 4 26 23c0 18 -12 41 -31 53c-47 28 -84 -4 -160 48c-62 41 -98 110 -98 185 c0 39 20 137 123 210c1 0 -35 43 -35 107c0 32 12 62 19 71c-3 2 -52 20 -71 52c-16 18 -33 79 -13 100h121c15 -11 1 -34 19 -41c73 -11 169 29 183 29c0 0 22 -124 22 -130c0 -7 -10 -10 -77 -22c-81 -10 -130 -23 -130 -63c0 -34 26 -53 124 -53"],120702:[468,10,579,46,532,"46 229c0 148 125 239 234 239c201 0 252 -188 252 -239c0 -123 -100 -239 -242 -239s-244 119 -244 239zM290 332c-42 0 -106 -27 -106 -103c0 -87 83 -103 106 -103c53 0 106 41 106 103c0 58 -50 103 -106 103"],120703:[458,11,641,-24,642,"642 164c-6 -97 -54 -175 -136 -175c-54 0 -109 39 -128 128c-6 27 -6 27 -7 126v91h-86v-337h-137v337c-84 -9 -99 -32 -121 -32c0 0 -51 102 -51 110c8 6 81 45 152 45l460 1c2 -3 4 -5 7 -7v-117h-87v-81c0 -99 1 -110 8 -123c5 -10 16 -11 22 -2c14 28 1 45 15 59h40 c46 0 49 3 49 -23"],120704:[469,192,518,46,471,"46 -191c1 242 0 449 7 479c17 78 82 181 205 181c118 0 213 -101 213 -240c0 -146 -103 -240 -211 -240c-17 0 -70 9 -76 15l-1 -196c-59 0 -134 -1 -137 1zM334 229c0 113 -47 130 -79 130c-28 0 -71 -29 -71 -130c0 -96 53 -123 71 -129c60 0 79 72 79 129"],120705:[458,172,488,46,458,"254 124c118 -5 188 -75 188 -153c0 -96 -97 -143 -163 -143c-3 0 -10 -1 -16 7v109c-1 5 58 25 40 22c9 17 -3 32 -59 34c-23 1 -38 3 -53 7c-70 21 -145 87 -145 200c0 200 261 245 412 251v-130c-53 5 -262 -6 -275 -103c0 -44 16 -101 71 -101"],120706:[458,10,625,46,594,"511 321c19 -39 21 -77 21 -97c0 -19 6 -114 -104 -194c-47 -30 -93 -40 -138 -40c-72 0 -134 27 -178 74c-42 42 -66 96 -66 160c0 128 103 222 224 233c2 1 238 1 324 1v-137h-83zM396 224c0 73 -64 96 -104 96c-58 0 -108 -38 -108 -96c-4 -45 47 -102 106 -98 c46 -3 106 35 106 98"],120707:[458,11,503,-44,476,"476 163c-2 -71 -62 -174 -171 -174c-70 0 -146 47 -168 140c-5 20 -5 111 -5 205c-9 -1 -36 -2 -40 -2c-45 -10 -61 -40 -74 -29c-7 7 -62 102 -62 107c0 18 85 43 138 47c2 1 259 1 356 1v-123l-181 -1c0 -81 0 -168 2 -174c7 -22 31 -38 56 -38c21 0 49 16 53 48 c0 3 1 10 8 17h81"],120708:[458,10,549,34,503,"34 446c0 15 89 12 155 12c89 -133 52 -237 81 -302c13 -24 25 -29 41 -29c23 0 54 16 54 105c0 83 -33 161 -72 211c0 18 49 15 121 15c40 0 36 -1 42 -14c30 -64 47 -142 47 -213c0 -139 -84 -241 -192 -241c-110 0 -192 104 -192 239c0 72 -38 164 -85 217"],120709:[469,193,641,47,594,"47 233c0 114 50 207 68 225h146c7 -3 10 -14 5 -19c-3 -5 -110 -69 -110 -209c0 -73 72 -109 96 -120v210c21 90 99 149 167 149c105 0 175 -97 175 -236c0 -43 -5 -113 -92 -185c-29 -22 -78 -42 -113 -51v-184l-7 -6h-123l-7 6v184c-87 22 -205 96 -205 236zM485 243 c-1 47 -12 116 -60 116c-22 0 -30 -17 -36 -43v-206c103 41 96 108 96 133"],120710:[470,208,610,33,577,"577 -139c0 -65 7 -69 -42 -69c-89 0 -162 67 -233 194c-21 -28 -88 -168 -110 -190h-139c-3 2 -9 7 -7 14c1 1 135 236 184 324c-83 164 -153 199 -189 199c-3 2 -5 4 -7 7v61c0 67 -5 69 18 69c45 0 157 -15 256 -192c15 20 93 173 110 191h139c2 -2 9 -8 7 -15 c-1 -1 -133 -236 -182 -324c83 -163 151 -198 187 -198c9 -9 8 -7 8 -71"],120711:[722,193,641,46,595,"46 281c0 67 22 158 37 177h123c5 -7 5 -6 7 -11c0 -19 -57 -60 -57 -170c0 -36 19 -122 103 -166v611h123v-611c31 14 103 60 103 166c0 102 -57 158 -57 170c2 5 7 11 7 11h123c20 -20 37 -138 37 -177c0 -161 -105 -256 -212 -285v-183l-8 -6h-109l-7 6v183 c-89 24 -213 116 -213 285"],120712:[458,11,732,39,693,"693 201c0 -114 -83 -212 -192 -212c-48 0 -101 29 -135 66c-34 -37 -87 -66 -135 -66c-103 0 -192 89 -192 213c0 144 115 253 124 256l154 -4c2 -4 3 -10 1 -13c-80 -77 -225 -255 -87 -341c35 0 61 32 61 73v138c3 2 18 1 74 1s71 1 74 -1v-131c8 -45 40 -78 61 -80 c27 13 55 63 55 99c0 81 -75 175 -142 242c-2 3 -1 9 1 13c4 6 74 4 155 4c69 -69 123 -150 123 -257"],120713:[636,6,453,24,430,"245 390c-27 33 -107 112 -201 170l64 76c141 -93 324 -271 322 -448c-4 -119 -94 -194 -201 -194c-200 0 -262 224 -147 345c66 69 135 56 163 51zM119 201c-2 -59 45 -110 109 -110c60 0 103 43 104 110c0 63 -43 110 -104 110c-63 0 -109 -47 -109 -110"],120714:[519,-2,534,59,483,"483 431l-62 -62c-24 43 -81 50 -139 50c-90 0 -127 -42 -132 -115c79 -2 141 -3 221 -2v-83c-74 0 -147 0 -221 -2c16 -87 21 -121 126 -121c57 0 123 11 168 39l17 -59c-67 -55 -121 -74 -207 -74c-143 0 -195 91 -195 233c0 163 92 284 252 284c63 0 129 -41 172 -88"],120715:[712,22,627,62,609,"609 159l-94 23c-26 -100 -102 -204 -221 -204c-28 0 -49 9 -72 26c-50 38 -129 243 -160 269l75 72c52 -57 71 -251 188 -251c66 0 105 59 110 116c-121 44 -335 180 -335 320c0 123 112 206 231 176c153 6 218 -251 196 -405l82 -26v-116zM463 316 c25 86 0 293 -177 293c-33 0 -67 -28 -67 -61c0 -105 154 -190 244 -232"],120716:[518,8,574,19,538,"414 330l-311 -316c-11 -14 -22 -28 -45 -21c-20 6 -26 33 -15 50c73 138 25 350 -24 429l79 35c28 -32 98 -145 60 -307l298 298c12 14 39 25 62 18c20 -6 26 -44 15 -60c-6 -7 -21 -19 -41 -89c-45 -194 39 -321 44 -337l-78 -29c-36 22 -95 150 -44 329"],120717:[603,192,565,33,536,"536 214c0 125 -75 196 -203 209v180h-93l-1 -181c-148 -8 -206 -132 -206 -206c0 -142 103 -205 206 -205l1 -203h93v204c94 -2 203 55 203 202zM432 214c0 -64 -37 -108 -100 -109c-2 73 -2 148 0 223c63 0 100 -49 100 -114zM241 106c-68 0 -102 36 -102 109 c0 75 34 113 102 114c2 -75 2 -150 0 -223"],120718:[444,199,463,27,431,"387 64c-86 -88 -232 -72 -261 -21c0 -42 7 -92 8 -149l185 -1v-90c-110 10 -287 -43 -287 81c0 82 -5 196 -5 342c0 133 76 218 203 218c66 0 116 -24 159 -60c55 -45 60 -259 -2 -320zM343 225c0 67 -45 117 -113 117c-69 0 -112 -36 -112 -114c0 -62 39 -139 114 -123 c66 0 111 45 111 120"],120719:[514,11,834,33,800,"277 404c-10 -3 -131 -58 -131 -202c0 -124 89 -213 192 -213c48 0 101 29 135 66c34 -37 87 -66 135 -66c109 0 192 98 192 212c0 124 -71 281 -157 313h-415c-21 0 -107 -10 -160 -58c-35 -32 -34 -44 -34 -54l63 -63c39 49 121 65 180 65zM593 404 c38 -57 70 -117 70 -205c0 -36 -10 -83 -37 -96c-69 0 -93 114 -93 138c0 68 0 66 -6 69c-56 0 -100 2 -103 0c-1 -38 -3 -76 -4 -80c-7 -40 -65 -127 -100 -127c-27 13 -37 61 -37 97c0 86 51 144 111 204h199"],120720:[694,0,732,-14,634,"37 0c-29 0 -56 -1 -51 27c2 14 301 579 335 639c21 36 66 28 106 28c49 0 82 6 96 -28c10 -63 113 -625 111 -639c-6 -34 -46 -27 -72 -27c-30 1 -66 -4 -75 29l-22 121h-284l-29 -57c-46 -88 -49 -93 -115 -93zM397 569c-13 -34 -140 -276 -169 -332h223 c-38 221 -54 330 -54 332"],120721:[694,0,732,36,672,"200 694h246c42 0 255 0 223 -180c-13 -78 -75 -130 -171 -151c36 -6 171 -28 146 -169c-32 -181 -218 -194 -292 -194h-274c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48zM241 397h116c25 0 156 0 176 114c16 94 -99 97 -138 97h-117zM186 86h145c48 0 156 7 174 110 c23 129 -116 129 -151 129h-126"],120722:[690,0,579,36,595,"276 586c-47 -271 -97 -555 -100 -561c-14 -28 -37 -25 -77 -25l-41 1c-5 2 -17 12 -22 27l112 636c3 5 10 19 31 26c3 1 244 0 395 0c36 4 22 -104 -3 -104h-295"],120723:[694,-8,915,2,798,"453 694h133c16 -7 27 -14 35 -32c1 -3 177 -621 177 -635c-1 -7 -4 -14 -11 -19h-779c-5 5 -6 12 -5 19c9 23 392 620 401 635c10 16 30 28 49 32zM614 130l-126 446l-284 -446h410"],120724:[691,0,640,36,634,"232 311l-36 -201h301c33 0 56 0 48 -48l-2 -14c-7 -35 -19 -48 -57 -48h-408c-37 0 -46 12 -39 48l104 595c7 37 21 48 57 48h393c47 0 44 -22 38 -56c-8 -48 -25 -45 -60 -45h-290l-33 -185h263c35 0 47 -10 40 -47c-7 -40 -25 -47 -56 -47h-263"],120725:[694,0,671,6,664,"646 607l-449 -507h304c46 0 72 1 62 -52c-6 -35 -18 -48 -56 -48h-459c-46 0 -45 20 -38 56c3 20 4 22 16 35l451 509h-287c-40 0 -70 -4 -61 45c5 32 14 49 56 49h438c47 0 44 -23 39 -50c-4 -22 -4 -24 -16 -37"],120726:[694,0,793,36,756,"755 646l-106 -598c-6 -35 -18 -48 -56 -48h-51c-41 0 -45 16 -40 48l47 266h-317l-47 -266c-6 -35 -18 -48 -56 -48h-51c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48h51c41 0 45 -16 40 -48l-42 -238h317l42 238c6 35 18 48 56 48h51c41 0 45 -16 40 -48"],120727:[716,22,854,51,801,"491 716c293 0 335 -175 300 -375c-21 -119 -86 -363 -428 -363c-275 0 -339 152 -302 362c53 301 240 376 430 376zM376 55c183 0 249 121 279 285c33 188 9 302 -178 302c-94 0 -283 -100 -283 -302c0 -153 -9 -285 182 -285zM260 348c5 25 6 56 37 63 c121 0 277 1 282 0c29 -8 20 -38 15 -62c-4 -25 -5 -55 -37 -63c-121 0 -277 -1 -282 0c-28 8 -19 38 -15 62"],120728:[694,0,329,37,293,"291 646l-105 -598c-6 -35 -18 -48 -56 -48h-51c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48h51c41 0 45 -16 39 -48"],120729:[694,0,762,36,753,"431 405l214 -376c-5 -29 -19 -29 -40 -29h-60c-19 0 -34 0 -46 21l-172 300l-129 -105l-30 -168c-6 -35 -18 -48 -56 -48h-34c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48h34c41 0 45 -16 40 -48l-49 -276l376 304c24 20 44 20 61 20h61c14 0 34 0 30 -21 c0 -4 -1 -10 -15 -21"],120730:[694,0,671,-16,573,"396 694c40 0 87 6 94 -29c0 0 87 -618 83 -642c-23 -26 -140 -38 -145 4l-63 542c-25 -81 -251 -543 -252 -542c-22 -31 -57 -27 -79 -27c-21 0 -44 3 -50 23c4 24 309 642 309 642c13 25 51 33 103 29"],120731:[694,0,976,36,940,"741 651c24 41 49 43 74 43h84c41 0 45 -16 40 -48l-106 -598c-6 -35 -18 -48 -56 -48h-27c-41 0 -45 16 -40 48l91 517h-1l-287 -500c-25 -43 -49 -43 -83 -43s-58 0 -69 43l-108 500h-1l-91 -517c-6 -35 -18 -48 -56 -48h-27c-37 0 -46 12 -40 48l106 598 c6 37 20 48 56 48h83c18 0 50 0 60 -41l111 -504c14 35 257 451 287 502"],120732:[694,0,793,36,756,"357 654l181 -533h1l93 525c6 35 18 48 56 48h27c41 0 45 -16 40 -48l-106 -598c-6 -35 -18 -48 -56 -48h-93c-20 0 -50 0 -64 40l-181 533h-1l-93 -525c-6 -35 -18 -48 -56 -48h-27c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48h93c20 0 50 0 64 -40"],120733:[687,0,732,-10,734,"104 571c-11 10 -5 39 -1 60c-1 18 14 57 37 56c3 1 349 0 575 0c30 -8 17 -48 15 -59c-2 -12 -3 -48 -36 -58c-3 0 -570 1 -590 1zM162 415h435c30 -8 17 -46 16 -57c-2 -11 -3 -49 -37 -57c-232 0 -436 -5 -444 1c-20 13 -14 45 -12 58c-1 20 17 62 42 55zM593 123 c13 0 42 1 47 -29c-1 -10 1 -93 -40 -93h-586c-24 -8 -28 29 -23 56c3 15 9 59 42 63c87 10 399 1 560 3"],120734:[716,22,793,49,742,"731 342c-36 -205 -142 -364 -400 -364c-270 0 -304 177 -271 364c32 183 125 374 402 374c262 0 305 -173 269 -374zM346 58c188 0 225 211 241 300c16 90 49 280 -140 280c-188 0 -222 -192 -237 -280c-16 -91 -53 -300 136 -300"],120735:[690,2,793,35,757,"148 664c3 5 10 19 31 26c3 1 339 -1 558 -1c11 -3 19 -12 20 -23c1 -6 -59 -335 -113 -641c-12 -25 -36 -25 -75 -25s-62 0 -64 25l98 571h-326l-101 -571c-14 -28 -37 -25 -77 -25c-11 -4 -68 -7 -63 21"],120736:[694,0,701,36,668,"224 268l-39 -220c-6 -35 -18 -48 -56 -48h-51c-37 0 -46 12 -40 48l106 598c6 37 20 48 56 48h250c154 0 241 -65 214 -216c-25 -144 -131 -210 -288 -210h-152zM404 608h-123l-46 -261h122c134 0 156 64 168 131c12 70 10 130 -121 130"],120737:[716,22,854,51,801,"491 716c293 0 335 -175 300 -375c-21 -119 -86 -363 -428 -363c-275 0 -339 152 -302 362c53 301 240 376 430 376zM642 284h-457c-1 -102 71 -229 191 -229c129 0 239 136 266 229zM203 413h462c-12 192 -94 219 -188 229c-78 8 -231 -82 -274 -229"],120738:[693,-1,793,5,786,"118 667c3 5 10 19 31 26c3 1 375 -1 618 -1c28 -8 19 -40 17 -50c-2 -13 0 -42 -31 -49l-439 -1c76 -113 160 -229 157 -245c-3 -5 -149 -160 -228 -238c1 0 305 2 419 2c35 0 19 1 22 -24c-2 -11 -8 -78 -39 -86h-617c-27 10 -22 28 -21 35c3 4 187 198 291 302 c-34 45 -166 238 -188 283"],120739:[688,0,732,89,746,"336 584h-202c-39 0 -51 4 -42 56c7 37 21 48 57 48h556c47 0 44 -22 38 -56c-9 -48 -33 -48 -61 -48h-199l-95 -536c-6 -35 -19 -48 -57 -48h-51c-37 0 -46 12 -39 48"],120740:[715,0,854,96,829,"370 441c-4 9 34 167 -69 167c-12 0 -60 -4 -71 -52c-5 -22 -17 -29 -33 -36h-79c-28 10 -22 30 -20 39c75 246 398 156 358 -37c32 82 112 193 207 193c113 0 179 -74 165 -156c-7 -35 -31 -35 -33 -39h-80c-41 22 -7 34 -28 67c-11 16 -27 21 -47 21 c-111 -24 -147 -216 -137 -176c-33 -190 -63 -405 -65 -409c-16 -28 -50 -23 -76 -23c-37 0 -84 -4 -63 28"],120741:[695,0,793,57,735,"407 32c-13 -39 -30 -30 -77 -32l-38 1c-7 1 -26 9 -22 27l14 79c-37 0 -146 20 -193 80c-58 78 -42 230 56 320c69 59 183 81 221 81l14 79c13 24 33 28 50 28h41c34 0 47 -10 43 -29l-14 -78c20 -7 271 -3 229 -241c-42 -237 -292 -235 -311 -241zM592 347 c16 90 -15 148 -99 166c-3 0 -62 -332 -58 -332c88 17 141 73 157 166zM201 346c-22 -133 37 -153 95 -164l58 330c-82 -19 -136 -70 -153 -166"],120742:[694,0,732,-20,706,"437 368l194 -326c16 -26 -2 -42 -34 -42h-83c-19 0 -35 0 -46 22l-125 240l-203 -238c-15 -18 -20 -24 -53 -24h-75c-14 0 -36 0 -32 23c1 7 2 8 13 20l294 325l-169 284c-19 28 1 42 32 42h83c21 0 34 0 47 -23l103 -185l163 187c19 21 36 21 53 21h75 c14 0 36 0 32 -23c-1 -7 -2 -8 -13 -20"],120743:[695,0,854,102,834,"103 583c5 23 21 30 34 36c36 3 121 17 135 -46c25 -115 -92 -325 56 -386l85 480c19 36 56 27 72 27c14 0 59 7 63 -25c1 -6 -42 -251 -82 -482c88 31 130 126 150 239c17 99 36 161 89 185c16 7 63 5 109 6c41 -11 16 -71 -28 -77c-12 -16 -22 -52 -33 -114 c-28 -155 -107 -289 -302 -318c-5 -27 -13 -98 -45 -106c-5 -2 -49 -1 -83 -1c-6 2 -16 5 -22 20l13 87c-218 28 -206 202 -185 317c16 91 13 121 0 121c-28 4 -27 29 -26 37"],120744:[716,0,793,-7,754,"447 643c-154 0 -206 -73 -224 -179c-15 -81 22 -157 32 -202c17 -78 80 -205 -1 -260c-5 -2 -136 -1 -240 -1c-30 11 -21 39 -19 51c2 11 3 47 36 55c9 -1 142 -1 179 1c-41 102 -140 242 -130 353c15 173 207 262 379 255c130 0 183 -25 218 -48 c92 -60 80 -162 72 -207c-24 -137 -172 -246 -254 -353h153c10 0 42 4 46 -24c1 -5 0 -9 -4 -29c-1 -11 -4 -46 -35 -53l-240 -1c-4 1 -15 4 -21 21c29 165 185 286 213 442c18 106 -7 179 -160 179"],120745:[697,-8,915,117,912,"331 8h133c18 7 33 14 47 32c2 3 395 621 401 635c1 7 0 14 -5 19c-6 7 -557 0 -779 0c-7 -5 -10 -12 -11 -19c1 -23 174 -620 177 -635c4 -16 20 -28 37 -32zM692 572l-284 -446l-126 446h410"],120746:[469,13,695,40,707,"542 113c22 0 25 36 35 41h130c-11 -63 -90 -165 -188 -165c-54 0 -99 30 -128 88c-21 -22 -78 -67 -141 -84c-46 -12 -248 -36 -203 236c12 65 48 240 245 240c36 0 106 -18 158 -99l55 99l133 -2c-3 -17 -108 -204 -141 -256c12 -51 30 -98 45 -98zM184 229 c-12 -71 -20 -116 45 -116c54 0 104 77 123 97c-11 56 -25 135 -84 135c-66 0 -73 -54 -84 -116"],120747:[733,90,549,-25,491,"219 -11c-58 0 -83 32 -88 37l-22 -116c-58 0 -132 -1 -134 0c49 277 108 668 158 737c61 83 157 86 191 86c120 0 187 -47 162 -189c-4 -25 -19 -85 -89 -129c47 -25 110 -71 85 -210c-25 -144 -166 -216 -263 -216zM297 638c-62 0 -68 -57 -82 -137l-10 -54 c22 4 135 10 147 107c13 89 -24 84 -55 84zM235 92c52 0 99 48 110 113c22 121 -83 146 -156 152c-10 -57 -25 -145 -26 -155c-1 -22 -1 -110 72 -110"],120748:[469,201,610,59,635,"403 206c-2 -10 7 -104 -20 -277c-7 -44 -87 -146 -137 -129c-62 22 -83 74 -90 135c21 106 66 215 117 298c-5 66 -17 98 -37 98c-28 0 -39 -38 -41 -44h-136c24 100 146 182 202 182c50 0 93 -45 116 -110c66 56 181 98 258 102l-23 -127c-106 -6 -208 -123 -209 -128"],120749:[719,10,518,15,516,"195 -10c-111 0 -205 46 -173 228c23 130 88 191 163 215l13 4l-16 9c-19 12 -79 46 -64 127c15 87 102 146 160 146c41 0 45 -3 153 -71c84 -53 86 -54 85 -61c-4 -6 -14 -19 -46 -58c-14 0 -157 101 -178 108c-8 3 -15 4 -23 4c-51 0 -101 -71 -60 -112 c2 -3 138 -92 153 -97c72 -26 108 -91 86 -214c-23 -131 -90 -210 -213 -227c-9 -1 -25 -1 -40 -1zM282 372c-27 0 -106 -17 -135 -207c-10 -57 -7 -102 41 -102c31 0 59 19 81 47c17 26 38 67 54 161c13 74 -4 101 -41 101"],120750:[470,11,472,20,476,"22 164c5 25 8 62 74 95c-19 28 -19 57 -16 70c19 112 149 139 188 139c69 14 106 -35 208 -124l-115 -96c-15 18 -75 84 -107 84c-22 0 -42 -10 -44 -25c0 -15 12 -19 59 -21l32 -2l-13 -70c-17 0 -62 1 -73 1c-30 0 -49 -2 -53 -25c-7 -38 25 -69 65 -63 c39 7 80 64 101 77l83 -90c-45 -47 -160 -125 -218 -125c-76 0 -185 51 -171 175"],120751:[734,265,518,44,529,"153 734h125c-1 -31 29 -50 47 -50c81 0 133 58 148 48c2 -2 54 -101 56 -107c-151 -98 -318 -232 -343 -373c-4 -22 -12 -92 37 -112c17 -7 56 4 112 -25c33 -18 103 -74 83 -184c-20 -117 -163 -192 -258 -196l20 120c15 16 76 -10 102 77c2 15 0 68 -67 68 c-173 0 -283 267 16 557c-80 35 -89 117 -78 177"],120752:[481,200,564,57,546,"177 294h-120c11 60 78 173 162 185c21 9 93 -7 105 -65c28 30 74 54 117 65c81 0 105 -74 105 -138c2 -32 -45 -266 -92 -541c-66 0 -131 -3 -133 2l86 489c-3 19 -7 39 -27 39c-19 0 -29 -15 -41 -42l-54 -299l-138 -1c26 151 61 341 61 349c0 11 -3 19 -6 19 c-7 0 -22 -43 -25 -62"],120753:[733,11,549,34,513,"46 359c29 165 122 374 294 374c183 0 188 -228 163 -372s-111 -372 -294 -372c-182 0 -189 226 -163 370zM313 595c-76 22 -117 -141 -117 -168h181c24 94 -2 180 -64 168zM233 126c55 0 93 43 126 198h-183c-15 -106 -16 -198 57 -198"],120754:[471,11,304,11,274,"76 471h139l-64 -357c11 -8 34 14 35 23h88c-5 -31 -54 -148 -155 -148c-64 0 -102 52 -108 105"],120755:[470,12,531,21,521,"112 470c55 0 120 0 122 -1c6 -4 -5 -71 -18 -145l66 53c95 76 101 82 174 82c15 0 49 -1 65 -1l-28 -123c-21 0 -46 10 -60 10c-22 0 -103 -62 -133 -98c8 -27 158 -192 202 -248c0 -4 -7 -9 -10 -11h-178c-23 33 -73 94 -129 162l-31 -157l-133 2"],120756:[734,0,547,-37,478,"-37 0l242 433c-5 12 -31 143 -41 141l-94 -48c-1 8 -1 124 0 128c2 15 98 80 166 80c20 0 40 -38 67 -119c40 -117 175 -613 175 -613c-18 -4 -105 -1 -160 -2l-66 255l-148 -254"],120757:[470,206,610,-20,620,"205 329c-20 -110 -48 -191 11 -214c49 -19 81 40 85 62c9 47 29 149 53 291l134 -1l-49 -301c2 -22 9 -44 33 -44c12 -4 57 27 63 72h85c1 -12 1 -42 -1 -54c-15 -86 -97 -163 -192 -163c-73 0 -97 47 -110 68c-11 -44 -86 -79 -112 -68c-12 0 -37 1 -58 19 c-1 0 -27 -148 -37 -202h-129l-1 2l122 674h126"],120758:[458,0,518,29,523,"267 166c76 76 104 124 117 171v110c1 5 11 11 11 11l128 -1c1 -4 1 -95 0 -104c-20 -110 -191 -295 -242 -352c-62 0 -123 -5 -128 2l-57 338c-7 41 -43 76 -60 89c-9 7 -9 17 -1 27c2 0 132 -1 180 -1c8 -5 26 -120 52 -290"],120759:[768,202,518,38,476,"273 768c16 -13 -9 -50 38 -50c60 0 150 12 164 12l1 -112c-3 -7 -12 -10 -81 -22c-83 -10 -134 -23 -141 -63c-6 -34 16 -53 114 -53c35 0 30 2 18 -66c-18 -101 -7 -57 -80 -78c-148 -35 -160 -170 -91 -195c19 -7 56 3 114 -26c88 -49 100 -147 76 -212 c-24 -54 -94 -99 -173 -105l14 113c12 13 27 4 30 23c3 18 -5 41 -22 53c-42 28 -84 -4 -151 48c-106 79 -87 281 94 395c1 0 -27 43 -16 107c6 32 23 62 32 71c-3 2 -49 20 -62 52c-9 18 -21 91 1 108h121"],120760:[468,9,579,42,537,"46 229c26 148 167 239 276 239c201 0 215 -164 215 -203c0 -124 -122 -274 -255 -274c-179 0 -256 121 -236 238zM308 332c-41 0 -111 -27 -124 -103c-15 -87 65 -103 88 -103c53 0 113 41 124 103c10 58 -31 103 -88 103"],120761:[458,11,641,13,633,"632 164c-23 -97 -86 -175 -167 -175c-54 0 -102 39 -106 128c-1 27 -1 27 15 126l16 91h-86c-32 -184 -59 -339 -66 -343l-128 2l-2 4l59 337l-126 -32s-29 111 -27 119c9 6 84 30 155 36c6 0 337 1 460 1l-13 -117l-89 -7c-17 -99 -32 -191 -27 -204 c3 -10 14 -11 21 -2c19 28 9 45 26 59h40c45 0 49 3 45 -23"],120762:[469,194,518,-6,491,"487 229c-26 -146 -146 -240 -253 -240c-17 0 -69 9 -74 15l-28 -198c-59 0 -135 0 -138 2c44 242 73 450 86 480c30 78 113 181 236 181c118 0 196 -101 171 -240zM350 229c20 113 -24 130 -56 130c-88 0 -138 -230 -59 -258c62 -4 105 70 115 128"],120763:[460,165,488,53,502,"412 -29c-17 -96 -137 -136 -203 -136l19 109c9 0 40 15 44 22c-12 28 -20 43 -105 41c-66 21 -130 87 -110 200c36 200 249 244 401 250c4 0 35 3 44 3l-13 -138c-1 0 -30 -1 -56 -1c-93 -2 -195 -4 -236 -96c-7 -44 -1 -101 54 -101c88 -3 176 -53 161 -153"],120764:[458,10,625,42,634,"528 321c12 -39 0 -99 -6 -119c-55 -166 -178 -212 -273 -212c-217 0 -279 249 -109 393c42 38 103 68 171 74c2 1 237 1 323 1l-26 -137h-80zM396 224c15 142 -212 118 -212 0c0 -41 27 -91 65 -96c60 -22 139 24 147 96"],120765:[458,11,503,-11,491,"264 -11c-70 0 -138 47 -144 140c-1 20 15 111 31 205c-9 -1 -36 -2 -40 -2c-47 -10 -68 -40 -79 -29c-6 7 -44 102 -43 107c3 18 92 43 146 47c2 1 259 1 356 1l-21 -123l-182 -1c-14 -81 -29 -168 -28 -174c3 -22 24 -38 49 -38c21 0 63 33 73 65h85 c5 -83 -82 -198 -203 -198"],120766:[458,10,549,73,511,"294 127c77 0 109 198 38 316c3 18 51 15 123 15c40 0 36 -1 40 -14c18 -64 22 -142 9 -213c-24 -139 -126 -241 -234 -241c-110 0 -174 104 -150 239c13 72 -10 164 -47 217c3 15 91 12 157 12c66 -133 10 -237 28 -302c9 -24 20 -29 36 -29"],120767:[469,187,641,60,616,"64 233c20 114 86 207 107 225h146c7 -3 8 -14 2 -19c-4 -5 -122 -69 -147 -209c-13 -73 53 -109 75 -120l42 222c37 90 120 137 188 137c105 0 158 -97 134 -236c-8 -43 -25 -113 -125 -185c-33 -22 -85 -42 -122 -51l-32 -184h-136l31 184c-83 22 -188 96 -163 236z M504 243c8 47 8 116 -40 116c-22 0 -32 -17 -43 -43l-37 -206c110 31 116 109 120 133"],120768:[470,208,610,-11,621,"334 278c18 20 123 173 143 191h139c2 -2 8 -8 5 -15c-30 -72 -192 -269 -239 -324c54 -163 102 -199 138 -199l14 1c7 -9 6 -7 -5 -71c-11 -65 -5 -69 -54 -69c-119 0 -167 111 -199 194c-26 -28 -117 -168 -143 -190h-139c-3 2 -8 7 -5 14c1 1 175 236 240 325 c-55 163 -117 198 -153 198c-4 0 -6 1 -5 7l10 61c12 67 7 69 31 69c78 0 164 -41 222 -192"],120769:[722,193,641,43,604,"49 281c12 67 50 158 68 177h123c4 -7 4 -6 5 -11c-3 -19 -67 -60 -87 -170c-6 -36 -2 -122 74 -166l107 604c3 2 5 7 8 7h109c1 -3 3 -5 5 -7l-106 -604c33 14 114 60 132 166c18 102 -29 158 -27 170c3 5 9 11 9 11h123c17 -20 13 -138 6 -177 c-29 -161 -150 -256 -262 -285l-34 -183l-8 -6h-109l-5 6l32 183c-85 24 -193 116 -163 285"],120770:[458,11,732,31,693,"689 201c-20 -114 -120 -212 -229 -212c-48 0 -96 29 -124 66c-40 -37 -98 -66 -146 -66c-103 0 -177 89 -155 213c26 144 159 252 169 255c80 0 150 1 153 -5c2 -4 2 -8 -1 -11c-70 -60 -169 -155 -184 -241c-6 -36 -5 -84 20 -97c101 -22 97 171 121 207 c4 2 80 0 136 0c6 -3 6 -1 -6 -69c-4 -24 -33 -141 36 -141c59 0 66 63 73 99c15 81 -24 149 -79 216l-21 26c-1 3 1 7 3 11c5 6 75 6 156 6c57 -69 97 -150 78 -257"],120771:[636,6,453,0,411,"258 390c-21 33 -87 112 -171 170l78 76c124 -93 275 -271 242 -448c-25 -119 -128 -194 -235 -194c-200 0 -222 224 -86 345c78 69 145 56 172 51zM99 201c-13 -59 26 -110 90 -110c139 0 176 220 38 220c-63 0 -117 -47 -128 -110"],120772:[519,-2,534,47,513,"513 431l-73 -62c-16 43 -72 50 -130 50c-90 0 -134 -42 -152 -115c78 -2 140 -3 220 -2l-14 -83c-74 0 -147 0 -222 -2c1 -87 0 -121 105 -121c57 0 125 11 175 39l6 -59c-76 -55 -134 -74 -220 -74c-143 0 -179 91 -153 233c28 163 142 284 302 284 c63 0 121 -41 156 -88"],120773:[712,22,617,49,597,"576 159l-90 23c-43 -100 -138 -204 -257 -204c-28 0 -50 6 -67 26c-46 35 -86 243 -113 269l88 72c42 -57 27 -251 144 -251c66 0 115 59 130 116c-113 44 -300 180 -278 320c19 123 148 206 262 176c154 6 173 -251 124 -405l78 -26zM458 316c40 86 51 293 -125 293 c-80 0 -177 -140 125 -293"],120774:[518,24,574,-3,578,"427 330l-361 -314c-14 -14 -32 -30 -54 -23c-19 6 -20 33 -6 50c97 138 86 350 51 429l85 35c23 -32 73 -145 6 -307l351 298c14 14 43 25 65 18c19 -6 18 -44 4 -60c-7 -7 -24 -19 -56 -89c-79 -194 -18 -347 -16 -363l-67 -28c-32 22 -84 175 -2 354"],120775:[603,192,565,30,541,"537 214c23 125 -40 196 -166 209l32 180h-93l-33 -181c-112 -19 -216 -61 -242 -206c-25 -142 67 -205 170 -205c-17 -93 -25 -160 -35 -203h93c12 85 28 153 36 204c94 -2 213 55 238 202zM433 214c-11 -64 -56 -108 -119 -109c11 73 24 148 40 223 c63 0 91 -49 79 -114zM223 106c-68 0 -95 36 -82 109c13 75 54 113 122 114c-11 -75 -25 -150 -40 -223"],120776:[444,199,463,-11,457,"377 64c-102 -88 -245 -72 -265 -21c-7 -42 -9 -92 -18 -149l185 -1l-16 -90c-108 10 -295 -43 -273 81c14 82 30 196 55 342c24 133 115 218 242 218c66 0 111 -24 148 -60c47 -45 15 -259 -58 -320zM229 105c151 0 190 237 40 237c-69 0 -114 -37 -132 -114 c-15 -62 14 -139 92 -123"],120777:[514,11,834,61,798,"304 404c-11 -3 -141 -58 -167 -202c-22 -124 52 -213 155 -213c48 0 106 29 146 66c28 -37 76 -66 124 -66c109 0 204 99 229 212c28 124 -21 281 -102 313h-415c-21 0 -108 -10 -170 -58c-41 -32 -42 -44 -43 -54l51 -63c48 49 133 65 192 65zM620 404 c28 -57 51 -117 34 -205c-7 -36 -25 -83 -54 -96c-69 0 -73 114 -69 138c12 68 12 66 6 69c-56 0 -99 2 -103 0c-7 -38 -16 -76 -18 -80c-14 -40 -87 -127 -122 -127c-25 13 -26 61 -20 97c15 86 76 144 147 204h199"],120778:[682,3,556,28,539,"206 678c93 0 186 4 232 4c39 0 57 -1 95 -4l6 -7l-14 -142h-36l-8 75c-1 12 -44 19 -111 19c-34 0 -67 -1 -116 -3v-243c12 5 183 8 216 -1c-3 -78 2 -19 2 -30l3 -108h-40l-6 50c-3 26 -19 37 -57 37c-17 0 -101 -1 -118 -3v-187c0 -88 2 -92 48 -95l47 -3v-40 c-134 3 -134 3 -166 3c-34 0 -34 0 -155 -3v40l37 3c47 4 48 6 48 95v408c0 88 -3 94 -48 98l-37 3v37c149 -3 149 -3 178 -3"],120779:[499,237,522,20,506,"299 157c-35 0 -70 -1 -105 -4v-390c-26 5 -53 8 -88 8c-26 0 -52 -2 -86 -7v37l49 3c29 2 34 6 34 54c0 4 -1 636 -1 641c51 -3 78 -6 120 -7l256 7c38 -3 38 -71 0 -71c-34 0 -183 5 -218 2c-54 0 -64 -22 -66 -222c40 -3 79 -4 118 -4c44 0 78 2 111 5 c37 -1 35 -57 0 -57c-40 3 -84 5 -124 5"],120802:[689,22,499,42,457,"457 331c0 -108 -10 -176 -40 -242c-32 -70 -95 -111 -168 -111c-57 0 -133 27 -173 125c-30 74 -34 148 -34 228c0 82 5 166 38 240c41 91 111 118 170 118c51 0 120 -24 161 -101c42 -82 46 -176 46 -257zM250 40c32 0 85 16 111 108c18 64 18 128 18 195 c0 63 0 132 -19 188c-7 19 -33 96 -111 96c-81 0 -106 -83 -112 -103c-17 -56 -17 -117 -17 -181c0 -79 1 -134 17 -192c20 -73 61 -111 113 -111"],120803:[689,0,499,89,424,"299 689v-630h125v-59h-329v59h125v528c-50 -21 -98 -23 -131 -24v59c48 2 131 5 190 67h20"],120804:[689,0,499,42,449,"83 466c-24 35 -17 21 -41 56c24 88 80 167 194 167c132 0 213 -102 213 -218c0 -124 -116 -197 -188 -266c-31 -29 -107 -105 -139 -134h109c12 0 24 1 36 1h182v-72h-399v65l124 126l77 72c59 61 116 119 116 205c0 80 -51 156 -145 156c-91 0 -119 -79 -139 -158"],120805:[689,22,499,42,457,"92 522l-36 55c42 71 115 112 192 112c99 0 182 -67 182 -154c0 -64 -35 -134 -111 -175c68 -22 138 -85 138 -177c0 -107 -88 -205 -210 -205c-40 0 -132 10 -205 105l11 61c44 -69 123 -104 192 -104c77 0 123 65 123 144c0 51 -22 145 -130 145h-71v62c71 6 73 6 79 7 c61 15 102 75 102 137c0 65 -49 95 -101 95c-61 0 -126 -37 -155 -108"],120806:[667,0,499,28,471,"372 174v-174h-79v174h-265v62l253 431h91v-431h99v-62h-99zM106 236h193v386c0 -58 -141 -299 -193 -386"],120807:[667,22,499,39,449,"153 602v-210c29 27 67 42 107 42c103 0 189 -101 189 -228c0 -128 -100 -228 -219 -228c-81 0 -150 41 -191 99l33 57c29 -55 90 -94 157 -94c9 0 131 0 131 168c0 122 -54 164 -101 164c-58 0 -95 -40 -112 -80h-66v375h335v-65h-263"],120808:[689,22,499,42,457,"415 669v-60c-35 14 -67 21 -105 21c-92 0 -171 -81 -184 -245c61 76 137 84 169 84c96 0 162 -113 162 -243c0 -81 -19 -127 -64 -179c-39 -46 -82 -69 -142 -69c-85 0 -209 62 -209 349c0 219 126 362 267 362c14 0 55 0 106 -20zM127 223c3 -34 18 -183 124 -183 c57 0 83 34 100 63c24 41 24 88 24 123c0 45 -2 81 -23 118c-20 38 -48 63 -96 63c-83 0 -128 -81 -128 -156c-1 -12 -1 -14 -1 -28"],120809:[667,11,499,42,457,"42 594v73h415v-66c-78 -90 -221 -307 -221 -612h-84c0 238 89 440 237 605h-347"],120810:[689,22,499,42,457,"320 361c67 -24 137 -86 137 -178c0 -108 -89 -205 -208 -205c-115 0 -207 94 -207 205c0 88 65 152 137 178c-89 29 -123 96 -123 150c0 94 82 178 194 178c108 0 193 -82 193 -178c0 -54 -34 -121 -123 -150zM250 391c72 0 123 47 123 120c0 66 -45 119 -124 119 c-74 0 -123 -49 -123 -119s47 -120 124 -120zM250 40c64 0 126 45 126 144c0 105 -69 145 -127 145c-54 0 -126 -38 -126 -145c0 -96 58 -144 127 -144"],120811:[689,22,499,42,457,"72 24l31 54c36 -30 69 -38 103 -38c79 0 156 78 167 241c-43 -54 -107 -84 -169 -84c-96 0 -162 114 -162 244c0 44 2 111 65 180c35 37 76 68 146 68c85 0 204 -64 204 -349c0 -221 -121 -362 -252 -362c-62 0 -99 21 -133 46zM371 418c0 49 -11 212 -118 212 c-52 0 -80 -28 -100 -58c-28 -45 -29 -86 -29 -131c0 -44 1 -81 23 -121c21 -37 49 -61 96 -61c79 0 128 75 128 159"],120812:[689,21,549,43,506,"506 330c0 -134 0 -351 -232 -351c-231 0 -231 219 -231 351c0 134 0 359 232 359c231 0 231 -227 231 -359zM179 344c0 -144 -5 -296 95 -296c104 0 96 161 96 296c0 125 9 276 -96 276c-103 0 -95 -150 -95 -276"],120813:[689,0,549,76,473,"355 643v-574h70c11 0 48 0 48 -34c0 -35 -37 -35 -48 -35h-285c-13 0 -48 0 -48 35c0 34 35 34 48 34h78v497c-29 -8 -64 -13 -94 -13c-13 0 -48 0 -48 35c0 28 26 35 44 35c99 2 137 37 155 53c12 11 16 13 36 13c44 0 44 -28 44 -46"],120814:[689,0,549,46,494,"46 541c19 68 82 148 213 148c147 0 235 -104 235 -221c0 -86 -53 -139 -110 -189l-200 -176c20 0 282 1 289 -3c21 -9 21 -27 21 -42v-12c0 -33 -10 -46 -48 -46h-343c-37 0 -48 12 -48 46v8c0 16 0 18 10 28l191 196c60 62 98 126 98 188c0 58 -32 127 -110 127 c-64 0 -98 -41 -118 -107c-3 -12 -5 -18 -14 -18"],120815:[689,21,549,46,503,"61 587c0 8 67 102 204 102c65 0 208 -13 208 -166c0 -55 -27 -118 -108 -160c90 -22 138 -81 138 -168c0 -128 -68 -216 -240 -216c-131 0 -217 78 -217 90l25 77c67 -60 155 -77 187 -77c91 0 98 64 98 128c0 74 -11 129 -102 129h-44c-26 0 -27 1 -27 26v18 c0 28 0 24 40 27c30 2 55 4 79 31c31 37 31 83 31 96c0 48 -9 79 -73 79c-13 0 -98 -10 -149 -80"],120816:[668,0,549,31,518,"39 252l222 394c12 22 27 22 46 22h73c37 0 48 -12 48 -46v-384h42c35 0 48 -10 48 -46c0 -38 -17 -45 -48 -45h-42v-101c0 -33 -10 -46 -48 -46h-28c-41 0 -48 15 -48 46v101h-225c-41 0 -48 16 -48 46v24c0 18 0 21 8 35zM135 238h175v329h-1c-5 -17 -14 -46 -174 -329 "],120817:[668,21,549,37,494,"213 572v-151c24 9 48 13 73 13c153 0 208 -94 208 -221c0 -156 -88 -234 -249 -234c-143 0 -208 104 -208 111s23 41 37 63c6 8 8 13 16 13c7 0 10 -6 13 -12c40 -76 108 -85 139 -85c105 0 105 73 105 148s0 148 -65 148c-42 0 -76 -20 -100 -57 c-14 -23 -23 -23 -49 -23c-27 0 -50 1 -50 44v295c0 38 16 44 48 44h278c48 0 48 -23 48 -48c0 -26 -1 -48 -48 -48h-196"],120818:[689,21,549,46,503,"432 643v-36c0 -14 0 -26 -12 -26c-24 13 -65 22 -83 22c-150 0 -146 -140 -151 -228c27 56 67 86 122 86c195 0 195 -179 195 -234c0 -75 -6 -130 -58 -186c-41 -44 -89 -62 -166 -62c-233 0 -233 249 -233 348c0 85 0 362 288 362c10 0 37 0 63 -10 c35 -12 35 -13 35 -36zM278 69c85 0 85 65 85 159c0 95 0 164 -80 164c-71 0 -95 -73 -95 -153c0 -83 11 -170 90 -170"],120819:[669,11,549,46,503,"128 564c-67 0 -82 1 -82 46v13c0 46 20 46 68 46c49 0 313 0 347 -1c42 -1 42 -27 42 -48c0 -18 0 -25 -8 -35c-120 -145 -207 -319 -210 -558c0 -38 -32 -38 -48 -38h-46c-41 0 -48 16 -48 47c0 49 0 291 224 529c-23 0 -216 -1 -239 -1"],120820:[689,21,549,46,503,"380 360c80 -22 123 -77 123 -165c0 -144 -78 -216 -229 -216c-144 0 -228 66 -228 216c0 61 20 137 123 165c-90 29 -108 90 -108 141c0 150 105 188 214 188c119 0 213 -46 213 -188c0 -43 -12 -110 -108 -141zM275 396c84 0 85 49 85 104c0 69 -7 103 -86 103 s-85 -36 -85 -103c0 -57 2 -104 86 -104zM275 69c87 0 89 56 89 128c0 64 0 129 -90 129c-89 0 -89 -66 -89 -129c0 -74 2 -128 90 -128"],120821:[689,21,549,46,503,"127 109c13 0 30 -40 100 -40c152 0 132 190 136 225c-13 -30 -48 -87 -122 -87c-64 0 -110 14 -150 64c-43 55 -45 110 -45 171c0 74 7 122 49 175c52 61 118 72 180 72c227 0 228 -243 228 -346c0 -87 0 -364 -274 -364c-98 0 -150 51 -150 59c0 5 14 27 33 58 c5 8 7 13 15 13zM266 276c96 0 96 139 96 152c0 101 -16 175 -85 175c-91 0 -91 -69 -91 -162c0 -96 0 -165 80 -165"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/SansSerif/Regular/Main.js"); | PypiClean |
/Marine_Traffic_API-0.20.1-py3-none-any.whl/marinetrafficapi/vessels_positions/PS02_PS06_vessel_positions/models.py | from marinetrafficapi.models import Model
from marinetrafficapi.fields import NumberField, RealNumberField, DatetimeField, TextField
class FleetVesselPosition(Model):
"""Get positional information for a set of predefined vessels."""
mmsi = NumberField(index='MMSI',
desc="Maritime Mobile Service Identity - \n"
"a nine-digit number sent in digital \n"
"form over a radio frequency that identifies \n"
"the vessel's transmitter station")
imo = NumberField(index='IMO',
desc="International Maritime Organisation \n"
"number - a seven-digit number that \n"
"uniquely identifies vessels")
ship_id = NumberField(index='SHIP_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the subject vessel")
longitude = RealNumberField(index='LON',
desc="A geographic coordinate that specifies \n"
"the east-west position of the vessel \n"
"on the Earth's surface")
latitude = RealNumberField(index='LAT',
desc="a geographic coordinate that specifies \n"
"the north-south position of the vessel \n"
"on the Earth's surface")
speed = NumberField(index='SPEED',
desc="The speed (in knots x10) that the \n"
"subject vessel is reporting according \n"
"to AIS transmissions")
heading = NumberField(index='HEADING',
desc="The heading (in degrees) that the \n"
"subject vessel is reporting according \n"
"to AIS transmissions")
status = NumberField(index='STATUS',
desc="The AIS Navigational Status of the \n"
"subject vessel as input by the vessel's \n"
"crew - more. There might be discrepancies \n"
"with the vessel's detail page when vessel \n"
"speed is near zero (0) knots.")
course = NumberField(index='COURSE',
desc="The course (in degrees) that \n"
"the subject vessel is reporting \n"
"according to AIS transmissions")
timestamp = DatetimeField(index='TIMESTAMP',
desc="The date and time (in UTC) that \n"
"the subject vessel's position was \n"
"recorded by MarineTraffic",
format='%Y-%m-%dT%H:%M:%S')
dsrc = TextField(index='DSRC',
desc="Data Source - Defines whether the \n"
"transmitted AIS data was received by a \n"
"Terrestrial or a Satellite AIS Station")
utc_seconds = NumberField(index='UTC_SECONDS',
desc="The time slot that the subject \n"
"vessel uses to transmit information")
ship_name = TextField(index='SHIPNAME',
desc="The Shipname of the subject vessel")
ship_type = NumberField(index='SHIPTYPE',
desc="The Shiptype of the subject \n"
"vessel according to AIS transmissions")
call_sign = TextField(index='CALLSIGN',
desc="A uniquely designated identifier \n"
"for the vessel's transmitter station")
flag = TextField(index='FLAG',
desc="The flag of the subject vessel \n"
"according to AIS transmissions")
length = RealNumberField(index='LENGTH',
desc="The overall Length (in metres) \n"
"of the subject vessel")
width = RealNumberField(index='WIDTH',
desc="The Breadth (in metres) \n"
"of the subject vessel")
grt = NumberField(index='GRT',
desc="Gross Tonnage - unitless measure that \n"
"calculates the moulded volume of all \n"
"enclosed spaces of a ship")
dwt = NumberField(index='DWT',
desc="Deadweight - a measure (in metric tons) \n"
"of how much weight a vessel can safely \n"
"carry (excluding the vessel's own weight")
draught = NumberField(index='DRAUGHT',
desc="The Draught (in metres x10) of the \n"
"subject vessel according to the \n"
"AIS transmissions")
year_built = NumberField(index='YEAR_BUILT',
desc="The year that the subject vessel was built")
rot = NumberField(index='ROT',
desc="Rate of Turn")
type_name = TextField(index='TYPE_NAME',
desc="The Type of the subject vessel")
ais_type_summary = TextField(index='AIS_TYPE_SUMMARY',
desc="Further explanation of the SHIPTYPE ID")
destination = TextField(index='DESTINATION',
desc="The Destination of the subject \n"
"vessel according to the AIS transmissions")
eta = DatetimeField(index='ETA',
desc="The Estimated Time of Arrival to \n"
"Destination of the subject vessel \n"
"according to the AIS transmissions",
format='%Y-%m-%dT%H:%M:%S')
current_port = TextField(index='CURRENT_PORT',
desc="The name of the Port the subject \n"
"vessel is currently in (NULL \n"
"if the vessel is underway)")
last_port = TextField(index='LAST_PORT',
desc="The Name of the Last Port \n"
"the vessel has visited")
last_port_time = DatetimeField(index='LAST_PORT_TIME',
desc="The Date and Time (in UTC) that \n"
"the subject vessel departed from \n"
"the Last Port",
format='%Y-%m-%dT%H:%M:%S')
current_port_id = NumberField(index='CURRENT_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the Current Port")
current_port_unlocode = TextField(index='CURRENT_PORT_UNLOCODE',
desc="A uniquely assigned ID by \n"
"United Nations for the Current Port")
current_port_country = TextField(index='CURRENT_PORT_COUNTRY',
desc="The Country that the \n"
"Current Port is located at")
last_port_id = NumberField(index='LAST_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the Last Port")
last_port_unlocode = TextField(index='LAST_PORT_UNLOCODE',
desc="A uniquely assigned ID by \n"
"United Nations for the Last Port")
last_port_country = TextField(index='LAST_PORT_COUNTRY',
desc="The Country that the \n"
"Last Port is located at")
next_port_id = NumberField(index='NEXT_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the Next Port")
next_port_unlocode = TextField(index='NEXT_PORT_UNLOCODE',
desc="A uniquely assigned ID by \n"
"United Nations for the Next Port")
next_port_name = TextField(index='NEXT_PORT_NAME',
desc="The Name of the Next Port as \n"
"derived by MarineTraffic based \n"
"on the subject vessel's reported \n"
"Destination")
next_port_country = TextField(index='NEXT_PORT_COUNTRY',
desc="The Country that the \n"
"Next Port is located at")
eta_calc = DatetimeField(index='ETA_CALC',
desc="The Estimated Time of Arrival to \n"
"Destination of the subject vessel \n"
"according to the MarineTraffic calculations",
format='%Y-%m-%dT%H:%M:%S')
eta_updated = DatetimeField(index='ETA_UPDATED',
desc="The date and time (in UTC) that \n"
"the ETA was calculated by MarineTraffic",
format='%Y-%m-%dT%H:%M:%S')
distance_to_go = NumberField(index='DISTANCE_TO_GO',
desc="The Remaining Distance (in NM) \n"
"for the subject vessel to reach \n"
"the reported Destination")
distance_travelled = NumberField(index='DISTANCE_TRAVELLED',
desc="The Distance (in NM) that the \n"
"subject vessel has travelled \n"
"since departing from Last Port")
awg_speed = RealNumberField(index='AVG_SPEED',
desc="The average speed calculated for \n"
"the subject vessel during the latest \n"
"voyage (port to port)")
max_speed = RealNumberField(index='MAX_SPEED',
desc="The maximum speed reported by the \n"
"subject vessel during the latest \n"
"voyage (port to port)") | PypiClean |
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/context.py | from __future__ import annotations
import contextlib
import inspect
import warnings
from asyncio import iscoroutinefunction
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Iterator,
List,
NamedTuple,
Optional,
Type,
Union,
)
from strawberry.extensions import SchemaExtension
from strawberry.utils.await_maybe import AwaitableOrValue, await_maybe
if TYPE_CHECKING:
from types import TracebackType
from strawberry.extensions.base_extension import Hook
class WrappedHook(NamedTuple):
extension: SchemaExtension
initialized_hook: Union[AsyncIterator[None], Iterator[None]]
is_async: bool
class ExtensionContextManagerBase:
__slots__ = ("hooks", "deprecation_message", "default_hook")
def __init_subclass__(cls):
cls.DEPRECATION_MESSAGE = (
f"Event driven styled extensions for "
f"{cls.LEGACY_ENTER} or {cls.LEGACY_EXIT}"
f" are deprecated, use {cls.HOOK_NAME} instead"
)
HOOK_NAME: str
DEPRECATION_MESSAGE: str
LEGACY_ENTER: str
LEGACY_EXIT: str
def __init__(self, extensions: List[SchemaExtension]):
self.hooks: List[WrappedHook] = []
self.default_hook: Hook = getattr(SchemaExtension, self.HOOK_NAME)
for extension in extensions:
hook = self.get_hook(extension)
if hook:
self.hooks.append(hook)
def get_hook(self, extension: SchemaExtension) -> Optional[WrappedHook]:
on_start = getattr(extension, self.LEGACY_ENTER, None)
on_end = getattr(extension, self.LEGACY_EXIT, None)
is_legacy = on_start is not None or on_end is not None
hook_fn: Optional[Hook] = getattr(type(extension), self.HOOK_NAME)
hook_fn = hook_fn if hook_fn is not self.default_hook else None
if is_legacy and hook_fn is not None:
raise ValueError(
f"{extension} defines both legacy and new style extension hooks for "
"{self.HOOK_NAME}"
)
elif is_legacy:
warnings.warn(self.DEPRECATION_MESSAGE, DeprecationWarning, stacklevel=3)
return self.from_legacy(extension, on_start, on_end)
if hook_fn:
if inspect.isgeneratorfunction(hook_fn):
return WrappedHook(extension, hook_fn(extension), False)
if inspect.isasyncgenfunction(hook_fn):
return WrappedHook(extension, hook_fn(extension), True)
if callable(hook_fn):
return self.from_callable(extension, hook_fn)
raise ValueError(
f"Hook {self.HOOK_NAME} on {extension} "
f"must be callable, received {hook_fn!r}"
)
return None # Current extension does not define a hook for this lifecycle stage
@staticmethod
def from_legacy(
extension: SchemaExtension,
on_start: Optional[Callable[[], None]] = None,
on_end: Optional[Callable[[], None]] = None,
) -> WrappedHook:
if iscoroutinefunction(on_start) or iscoroutinefunction(on_end):
async def iterator():
if on_start:
await await_maybe(on_start())
yield
if on_end:
await await_maybe(on_end())
hook = iterator()
return WrappedHook(extension, hook, True)
else:
def iterator():
if on_start:
on_start()
yield
if on_end:
on_end()
hook = iterator()
return WrappedHook(extension, hook, False)
@staticmethod
def from_callable(
extension: SchemaExtension,
func: Callable[[SchemaExtension], AwaitableOrValue[Any]],
) -> WrappedHook:
if iscoroutinefunction(func):
async def async_iterator():
await func(extension)
yield
hook = async_iterator()
return WrappedHook(extension, hook, True)
else:
def iterator():
func(extension)
yield
hook = iterator()
return WrappedHook(extension, hook, False)
def run_hooks_sync(self, is_exit: bool = False) -> None:
"""Run extensions synchronously."""
ctx = (
contextlib.suppress(StopIteration, StopAsyncIteration)
if is_exit
else contextlib.nullcontext()
)
for hook in self.hooks:
with ctx:
if hook.is_async:
raise RuntimeError(
f"SchemaExtension hook {hook.extension}.{self.HOOK_NAME} "
"failed to complete synchronously."
)
else:
hook.initialized_hook.__next__() # type: ignore[union-attr]
async def run_hooks_async(self, is_exit: bool = False) -> None:
"""Run extensions asynchronously with support for sync lifecycle hooks.
The ``is_exit`` flag is required as a `StopIteration` cannot be raised from
within a coroutine.
"""
ctx = (
contextlib.suppress(StopIteration, StopAsyncIteration)
if is_exit
else contextlib.nullcontext()
)
for hook in self.hooks:
with ctx:
if hook.is_async:
await hook.initialized_hook.__anext__() # type: ignore[union-attr]
else:
hook.initialized_hook.__next__() # type: ignore[union-attr]
def __enter__(self):
self.run_hooks_sync()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
self.run_hooks_sync(is_exit=True)
async def __aenter__(self):
await self.run_hooks_async()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
await self.run_hooks_async(is_exit=True)
class OperationContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_operation.__name__
LEGACY_ENTER = "on_request_start"
LEGACY_EXIT = "on_request_end"
class ValidationContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_validate.__name__
LEGACY_ENTER = "on_validation_start"
LEGACY_EXIT = "on_validation_end"
class ParsingContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_parse.__name__
LEGACY_ENTER = "on_parsing_start"
LEGACY_EXIT = "on_parsing_end"
class ExecutingContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_execute.__name__
LEGACY_ENTER = "on_executing_start"
LEGACY_EXIT = "on_executing_end" | PypiClean |
/Deliverance-0.6.1.tar.gz/Deliverance-0.6.1/deliverance/editor/media/editarea/edit_area/reg_syntax/basic.js | editAreaLoader.load_syntax["basic"] = {
'COMMENT_SINGLE' : {1 : "'", 2 : 'rem'}
,'COMMENT_MULTI' : { }
,'QUOTEMARKS' : {1: '"'}
,'KEYWORD_CASE_SENSITIVE' : false
,'KEYWORDS' : {
'statements' : [
'if','then','for','wend','while',
'else','elseif','select','case','end select',
'until','next','step','to','end if', 'call'
]
,'keywords' : [
'sub', 'end sub', 'function', 'end function', 'exit',
'exit function', 'dim', 'redim', 'shared', 'const',
'is', 'absolute', 'access', 'any', 'append', 'as',
'base', 'beep', 'binary', 'bload', 'bsave', 'chain',
'chdir', 'circle', 'clear', 'close', 'cls', 'color',
'com', 'common', 'data', 'date', 'declare', 'def',
'defdbl', 'defint', 'deflng', 'defsng', 'defstr',
'double', 'draw', 'environ', 'erase', 'error', 'field',
'files', 'fn', 'get', 'gosub', 'goto', 'integer', 'key',
'kill', 'let', 'line', 'list', 'locate', 'lock', 'long',
'lprint', 'lset', 'mkdir', 'name', 'off', 'on', 'open',
'option', 'out', 'output', 'paint', 'palette', 'pcopy',
'poke', 'preset', 'print', 'pset', 'put', 'random',
'randomize', 'read', 'reset', 'restore', 'resume',
'return', 'rmdir', 'rset', 'run', 'screen', 'seg',
'shell', 'single', 'sleep', 'sound', 'static', 'stop',
'strig', 'string', 'swap', 'system', 'time', 'timer',
'troff', 'tron', 'type', 'unlock', 'using', 'view',
'wait', 'width', 'window', 'write'
]
,'functions' : [
'abs', 'asc', 'atn', 'cdbl', 'chr', 'cint', 'clng',
'cos', 'csng', 'csrlin', 'cvd', 'cvdmbf', 'cvi', 'cvl',
'cvs', 'cvsmbf', 'eof', 'erdev', 'erl', 'err', 'exp',
'fileattr', 'fix', 'fre', 'freefile', 'hex', 'inkey',
'inp', 'input', 'instr', 'int', 'ioctl', 'lbound',
'lcase', 'left', 'len', 'loc', 'lof', 'log', 'lpos',
'ltrim', 'mid', 'mkd', 'mkdmbf', 'mki', 'mkl', 'mks',
'mksmbf', 'oct', 'peek', 'pen', 'play', 'pmap', 'point',
'pos', 'right', 'rnd', 'rtrim', 'seek', 'sgn', 'sin',
'space', 'spc', 'sqr', 'stick', 'str', 'tab', 'tan',
'ubound', 'ucase', 'val', 'varptr', 'varseg'
]
,'operators' : [
'and', 'eqv', 'imp', 'mod', 'not', 'or', 'xor'
]
}
,'OPERATORS' :[
'+', '-', '/', '*', '=', '<', '>', '!', '&'
]
,'DELIMITERS' :[
'(', ')', '[', ']', '{', '}'
]
,'STYLES' : {
'COMMENTS': 'color: #99CC00;'
,'QUOTESMARKS': 'color: #333399;'
,'KEYWORDS' : {
'keywords' : 'color: #3366FF;'
,'functions' : 'color: #0000FF;'
,'statements' : 'color: #3366FF;'
,'operators' : 'color: #FF0000;'
}
,'OPERATORS' : 'color: #FF0000;'
,'DELIMITERS' : 'color: #0000FF;'
}
}; | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/graph/space_group.py |
# Ignore false-positives for redefined `product` functions:
# pylint: disable=function-redefined
import numpy as np
from functools import reduce
from math import pi
from typing import Optional, Iterable, Sequence
from .lattice import Lattice
from netket.utils import struct
from netket.utils.types import Array, Union
from netket.utils.float import prune_zeros
from netket.utils.dispatch import dispatch
from netket.utils.group import (
Identity,
PointGroup,
Permutation,
PermutationGroup,
)
class Translation(Permutation):
r"""
Custom subclass of `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))
shape = (
[1] * axis
+ [n_trans]
+ [1] * (self.lattice.ndim - 1 - axis)
+ [len(self.point_group_)]
)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).reshape(
-1, len(self.point_group_)
)
# Multiply the factors together and sum over the "p" PGSymmetry axis
# Translations are more major than point group operations
result = np.einsum(
"igp, tp -> itg", point_group_factors, trans_factors
).reshape(point_group_factors.shape[0], -1)
return prune_zeros(result)
def one_arm_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*, projected onto *k* itself.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the projected characters for a number of irreps of
the space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
# Convert k to reciprocal lattice vectors
k = _ensure_iterable(k)
# Little-group irrep factors
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k) * np.exp(
-1j * (self.point_group_.translations() @ k)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * k[axis] * np.arange(n_trans))
shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).ravel()
# Multiply the factors together
# Translations are more major than point group operations
result = np.einsum("ig, t -> itg", point_group_factors, trans_factors).reshape(
point_group_factors.shape[0], -1
)
return prune_zeros(result) | PypiClean |
/BuckFit-0.0.1.tar.gz/BuckFit-0.0.1/buckfit/potentials.py | from buckfit.potential_parameters import BuckinghamParameter, buckingham_parameters
from buckfit.atom_types import AtomType
class BuckinghamPotential():
"""
Class that contains the information for each buckingham potential, where parameters are
BuckinghamParameter objects.
"""
def __init__(self, labels, atom_type_index, a, rho, c):
"""
Initialise each parameter in each buckingham potential.
Args:
labels (list(str)): List of the atoms in the potential i.e. ['O','O'] for O-O potential.
atom_type_index (list(int)): List of the atom type index for the atoms in the potential.
a (obj): BuckinghamParameter objects including label_string (str), param_type (str), value (float), and sd (float).
rho (obj): BuckinghamParameter objects including label_string (str), param_type (str), value (float), and sd (float).
c (obj): BuckinghamParameter objects including label_string (str), param_type (str), value (float), and sd (float).
Returns:
None
"""
if not isinstance(labels, list) or not all(isinstance(label, str) for label in labels) or len(labels) !=2:
raise TypeError('labels should be a list of atoms in the potential. Each of the TWO items should be a string.')
if not isinstance(atom_type_index, list) or not all(isinstance(index, int) for index in atom_type_index):
raise TypeError('atom_type_index should be a list of the index for each atom type for the atoms in the potential. Each item type should be an integer.')
if not isinstance(a, BuckinghamParameter):
raise TypeError('"a" must be a BuckinghamParameter object.')
if not isinstance(rho, BuckinghamParameter):
raise TypeError('"rho" must be a BuckinghamParameter object.')
if not isinstance(c, BuckinghamParameter):
raise TypeError('"c" must be a BuckinghamParameter object.')
self.labels = labels
self.atype_index = atom_type_index
self.a = a
self.rho = rho
self.c = c
def potential_string(self):
"""
Prints potential string for lammps pair_coeff command.
Args:
None
Returns:
return_str (str): atype_index for atom pairs, and buckingham potential parameter values
formatted as a lammps command.
"""
return_str = 'pair_coeff {} {} {:6.4f} {:6.4f} {:6.4f}'.format(self.atype_index[0],
self.atype_index[1],
self.a.value,
self.rho.value,
self.c.value)
return return_str
def buckingham_potentials(potentials_dict, atom_types, parameters):
"""
Defines the buckingham potential for each given atom pair. Making sure if core-shell models are used, the interations are on teh shell, not the core.
Args:
potentials(dict): Contains buckingham potentials (list(float)), where the potentials keys are atom label pairs (str), example: 'Li-O'.
atom_types (list(obj)): AtomType objects including atom_type_index (int), label (str), mass (float), charge (float), and core_shell (str).
parameters (list(obj)): BuckinghamParameter objects including parameter_type (str), label_string (str), and value (float).
Returns:
potentials (list(obj)): BuckinghamPotential objects including labels (list(str)), atom_type_index (list(int)), a (obj), rho (obj), and c (obj). Each object is a BuckinghamParameter object.
"""
if not isinstance(potentials_dict, dict):
raise TypeError('Potentials should be stored in a dictionary. The keys should be at the atom pairs as a string (i.e. "Li-O") and the values a list of buckingham a, rho, c values.')
if not isinstance(atom_types, list) or not all(isinstance(at, AtomType) for at in atom_types):
raise TypeError('atom_types must be a list of AtomType objects.')
if not isinstance(parameters, list)or not all(isinstance(bp, BuckinghamParameter) for bp in parameters):
raise TypeError('parameters must be a list of BuckinghamPotential objects.')
i = 0 #parameter_counter
potentials = []
for key, value in potentials_dict.items():
at1, at2 = key.split('-') #at is atom_type
for atom in atom_types:
if at1 in atom.label and 'core' not in atom.label:
at1_index = atom.atom_type_index
if at2 in atom.label and 'core' not in atom.label:
at2_index = atom.atom_type_index
potentials.append(BuckinghamPotential(labels = [at1, at2],
atom_type_index = [at1_index, at2_index],
a = parameters[i],
rho=parameters[i+1],
c=parameters[i+2]))
i+=3
return potentials | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/cc.py |
__revision__ = "src/engine/SCons/Tool/cc.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Tool
import SCons.Defaults
import SCons.Util
CSuffixes = ['.c', '.m']
if not SCons.Util.case_sensitive_suffixes('.c', '.C'):
CSuffixes.append('.C')
def add_common_cc_variables(env):
"""
Add underlying common "C compiler" variables that
are used by multiple tools (specifically, c++).
"""
if '_CCCOMCOM' not in env:
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS'
# It's a hack to test for darwin here, but the alternative
# of creating an applecc.py to contain this seems overkill.
# Maybe someday the Apple platform will require more setup and
# this logic will be moved.
env['FRAMEWORKS'] = SCons.Util.CLVar('')
env['FRAMEWORKPATH'] = SCons.Util.CLVar('')
if env['PLATFORM'] == 'darwin':
env['_CCCOMCOM'] = env['_CCCOMCOM'] + ' $_FRAMEWORKPATH'
if 'CCFLAGS' not in env:
env['CCFLAGS'] = SCons.Util.CLVar('')
if 'SHCCFLAGS' not in env:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
compilers = ['cc']
def generate(env):
"""
Add Builders and construction variables for C compilers to an Environment.
"""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
add_common_cc_variables(env)
if 'CC' not in env:
env['CC'] = env.Detect(compilers) or compilers[0]
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '$CC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCC'] = '$CC'
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CFILESUFFIX'] = '.c'
def exists(env):
return env.Detect(env.get('CC', compilers))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/BobBuildTool-0.23.1.tar.gz/BobBuildTool-0.23.1/pym/bob/cmds/jenkins/exec.py |
from ... import BOB_INPUT_HASH
from ...archive import getArchiver, JenkinsArchive
from ...builder import LocalBuilder
from ...errors import BuildError
from ...share import getShare
from ...stringparser import Env, isTrue
from ...tty import setVerbosity, TRACE
from ...utils import asHexStr, EventLoopWrapper, removePath, isWindows, \
getPlatformString
from .intermediate import PartialIR
import argparse
import base64
import json
import lzma
import os.path
class Spec:
def __init__(self, specFile):
self.download = False
self.upload = False
self.artifactsCopy = "jenkins"
self.shareDir = None
self.shareQuota = None
self.auditMeta = {}
self.platform = None
self.__recipesAudit = []
self.__execIR = []
with open(specFile, "r") as f:
f.readline() # skip shebang
for l in f:
l = l.rstrip('\n')
if l.startswith('[') and l.endswith(']'):
if l == "[cfg]":
handler = self.__handleCfg
elif l == "[recipes-audit]":
handler = self.__handleRecipesAudit
elif l == "[audit]":
handler = self.__handleAuditMeta
elif l == "[exec]":
handler = self.__handleExecIR
else:
handler(l)
else:
handler(l)
def __handleCfg(self, line):
k, _, v = line.partition("=")
if k == "download":
self.download = isTrue(v)
elif k == "upload":
self.upload = isTrue(v)
elif k == "copy":
self.artifactsCopy = v
elif k == "share":
self.shareDir = v
elif k == "quota":
self.shareQuota = v
elif k == "platform":
self.platform = v
else:
raise AssertionError(line)
def __handleRecipesAudit(self, line):
self.__recipesAudit.append(line)
def __handleAuditMeta(self, line):
k, _, v = line.partition("=")
self.auditMeta[k] = v
def __handleExecIR(self, line):
self.__execIR.append(line)
@property
def recipesAudit(self):
if self.__recipesAudit:
return json.loads("".join(self.__recipesAudit))
else:
return None
@property
def execIR(self):
ir = base64.a85decode("".join(self.__execIR))
ir = lzma.decompress(ir)
ir = PartialIR.fromData(json.loads(ir))
ir.scmAudit = self.recipesAudit
return ir
def readBuildId(step):
try:
with open(JenkinsArchive.buildIdName(step), 'rb') as f:
return f.read()
except OSError as e:
raise BuildError("Could not read build-id: " + str(e),
help="This may happend if the job was modified while being queued.")
def cleanRecurse(d, whiteList, keep = lambda n: False):
with os.scandir(d) as it:
for entry in it:
if entry.name in whiteList:
subEntry = whiteList[entry.name]
if entry.is_dir(follow_symlinks=False) and isinstance(subEntry, dict):
cleanRecurse(entry.path, subEntry)
elif keep(entry.name):
pass
else:
print("Remove", entry.path)
removePath(entry.path)
def cleanWorkspace(spec):
# gather all files/directories that should be kept
whiteList = { }
# Chop off the trailing "/workspace" from the allowed paths because the
# files next to them must be kept too.
allowed = [ os.path.dirname(workspace) for workspace in spec.getAllWorkspaces() ]
allowed.extend(spec.getTransferFiles())
for workspace in allowed:
w = whiteList
for i in workspace.split('/'):
prevDir = w
w = w.setdefault(i, {})
prevDir[i] = True
# Special hack to retain coverage data in tests
if "COVERAGE_SOURCES" in os.environ:
keep = lambda n: n.startswith(".bob-") or n.startswith(".coverage")
else:
keep = lambda n: n.startswith(".bob-") # pragma: no cover
cleanRecurse(".", whiteList, keep)
# remove @tmp directories created by jenkins git plugins
for step in spec.getBuiltCheckoutSteps():
workspace = step.getWorkspacePath()
for scm in step.getScmDirectories():
path = os.path.join(workspace, scm+"@tmp")
if os.path.lexists(path):
print("Remove", path)
removePath(path)
def getDependencies(ir):
"""Gather all package steps that are build dependencies of the built
packages."""
ret = set()
for package in (s.getPackage() for s in ir.getRoots()):
ret.update(package.getPackageStep().getAllDepSteps())
buildStep = package.getBuildStep()
if buildStep.isValid():
ret.update(buildStep.getAllDepSteps())
checkoutStep = package.getCheckoutStep()
if checkoutStep.isValid():
ret.update(checkoutStep.getAllDepSteps())
ret.difference_update(ir.getRoots())
return [s for s in ret if s.isPackageStep()]
def doJenkinsExecute(argv, bobRoot):
parser = argparse.ArgumentParser(prog="bob _jexec")
parser.add_argument('subcommand', help="Subcommand")
parser.add_argument('args', nargs=argparse.REMAINDER,
help="Arguments for subcommand")
parser.add_argument('--version')
args = parser.parse_args(argv)
if args.version and (args.version != BOB_INPUT_HASH.hex()):
raise BuildError("Local Bob version incompatible to the one that created the Job!")
if args.subcommand == "run":
return doJenkinsExecuteRun(args.args, bobRoot)
elif args.subcommand == "check-shared":
return doJenkinsExecuteCheckShared(args.args, bobRoot)
else:
parser.error("Invalid sub-command")
return 3
def doJenkinsExecuteRun(argv, bobRoot):
parser = argparse.ArgumentParser(prog="bob _jexec run")
parser.add_argument('spec')
args = parser.parse_args(argv)
# Verify umask for predictable file modes. Can be set outside of Jenkins
# but Bob requires that umask is everywhere the same for stable Build-IDs.
# Mask 0022 is enforced on local builds and in the sandbox. But at this
# stage the SCMs provided by Jenkins have already run. Check it and bail
# out if different.
# TODO: check if MSYS2 should have this check
if not isWindows():
if os.umask(0o0022) != 0o0022:
raise BuildError("The umask is not 022.")
spec = Spec(args.spec)
ir = spec.execIR
if spec.platform != getPlatformString():
raise BuildError("Wrong execution environment! Configured: {} Actual: {}"
.format(spec.platform, getPlatformString()))
envWhiteList = ir.getRecipeSet().envWhiteList()
meta = spec.auditMeta.copy()
meta.update({
"jenkins-build-tag" : os.environ.get('BUILD_TAG', ""),
"jenkins-node" : os.environ.get('NODE_NAME', ""),
"jenkins-build-url" : os.environ.get('BUILD_URL', ""),
})
dependencyBuildIds = {
step.getWorkspacePath() : readBuildId(step)
for step in getDependencies(ir)
}
cleanWorkspace(ir)
with EventLoopWrapper() as (loop, executor):
setVerbosity(TRACE)
builder = LocalBuilder(TRACE, False, False, False, False, envWhiteList,
bobRoot, False, True)
builder.setBuildDistBuildIds(dependencyBuildIds)
builder.setExecutor(executor)
builder.setArchiveHandler(getArchiver(
ir.getRecipeSet(),
{ "xfer" : spec.artifactsCopy == "jenkins" }))
builder.setLinkDependencies(False)
builder.setAuditMeta(meta)
builder.setJenkinsDownloadMode(spec.download)
builder.setJenkinsUploadMode(spec.upload)
if spec.shareDir:
path = Env(os.environ).substitute(spec.shareDir, "shared.dir")
builder.setShareHandler(getShare({ 'path' : path,
'quota' : spec.shareQuota }))
builder.setShareMode(True, True)
builder.cook(ir.getRoots(), False, loop)
return 0
def doJenkinsExecuteCheckShared(argv, bobRoot):
parser = argparse.ArgumentParser(prog="bob _jexec check-shared")
parser.add_argument('share')
parser.add_argument('buildid')
args = parser.parse_args(argv)
path = Env(os.environ).substitute(args.share, "shared.dir")
share = getShare({ 'path' : path })
try:
with open(args.buildid, 'rb') as f:
buildId = f.read()
except OSError as e:
raise BuildError("Could not read build-id: " + str(e),
help="This may happend if the job was modified while being queued.")
ret = 1 if share.contains(buildId) else 0
print("{} in {}: {}".format(asHexStr(buildId), path, "found" if ret else "NOT FOUND"))
return ret | PypiClean |
/BMCTool-0.6.1-py3-none-any.whl/bmctool/utils/pulses/make_hsexp.py | from types import SimpleNamespace
import numpy as np
from pypulseq import Opts
from bmctool import GAMMA_HZ
from bmctool.utils.pulses.calculate_phase import calculate_phase
from bmctool.utils.pulses.create_arbitrary_pulse_with_phase import create_arbitrary_pulse_with_phase
from bmctool.utils.pulses.make_hypsec_half_passage import calculate_amplitude as hypsec_amp
def calculate_window_modulation(t: np.ndarray,
t0: float) \
-> np.ndarray:
"""
Calculates modulation function for HSExp pulses.
:param t: time points of the different sample points [s]
:param t0: reference time point (= last point for half passage pulse) [s]
:return:
"""
return 0.42 - 0.5 * np.cos(np.pi * t / t0) + 0.08 * np.cos(2 * np.pi * t / t0)
def calculate_frequency(t: np.ndarray,
t0: float,
bandwidth: float,
ef: float,
freq_factor: int) \
-> np.ndarray:
"""
Calculates modulation function for HSExp pulses.
:param t: time points of the different sample points [s]
:param t0: reference time point (= last point for half passage pulse) [s]
:param bandwidth: bandwidth of the pulse [Hz]
:param ef: dimensionless parameter to control steepness of the exponential curve
:param freq_factor: factor (-1 or +1) to switch between positive and negative offsets
"""
return -freq_factor * bandwidth * np.pi * np.exp(-t / t0 * ef)
def make_hsexp(amp: float = 1.0,
t_p: float = 12e-3,
mu: float = 65,
bandwidth: float = 2500,
t_window: float = 3.5e-3,
ef: float = 3.5,
tip_down: bool = True,
pos_offset: bool = True,
system: Opts = Opts(),
gamma_hz: float = GAMMA_HZ) \
-> SimpleNamespace:
"""
Creates a radio-frequency pulse event with amplitude and phase modulation of a HSExp pulse.
:param amp: maximum amplitude value [µT]
:param t_p: pulse pulse_duration [s]
:param mu: parameter µ of hyperbolic secant pulse
:param bandwidth: bandwidth of hyperbolic secant pulse [Hz]
:param t_window: pulse_duration of window function
:param ef: dimensionless parameter to control steepness of the exponential curve
:param tip_down: flag to switch between tip down (True) and tip up (False) pulses
:param pos_offset: flag to switch between positive (True) and negative (False) offsets
:param system: system limits of the MR scanner
:param gamma_hz: gyromagnetic ratio [Hz]
"""
samples = int(t_p * 1e6)
t_pulse = np.divide(np.arange(1, samples + 1), samples) * t_p # time point array
# find start index of window function
idx_window = np.argmin(np.abs(t_pulse - t_window))
if tip_down:
shift_idx = -1
else:
shift_idx = 0
# calculate amplitude of hyperbolic secant (HS) pulse
w1 = hypsec_amp(t_pulse, t_pulse[shift_idx], amp, mu, bandwidth)
# calculate and apply modulation function to convert HS into HSExp pulse
window_mod = calculate_window_modulation(t_pulse[:idx_window], t_pulse[idx_window])
if tip_down:
w1[:idx_window] = w1[:idx_window] * window_mod
else:
w1[-idx_window:] = w1[-idx_window:] * np.flip(window_mod)
# calculate freq modulation of pulse
if tip_down and pos_offset:
dfreq = calculate_frequency(t_pulse, t_pulse[-1], bandwidth, ef, 1)
elif tip_down and not pos_offset:
dfreq = calculate_frequency(t_pulse, t_pulse[-1], bandwidth, ef, -1)
elif not tip_down and pos_offset:
dfreq = calculate_frequency(np.flip(t_pulse), t_pulse[-1], bandwidth, ef, 1)
elif not tip_down and not pos_offset:
dfreq = calculate_frequency(np.flip(t_pulse), t_pulse[-1], bandwidth, ef, -1)
# make freq modulation end (in case of tip-down) or start (in case of tip-up) with dw = 0
diff_idx = np.argmin(np.abs(dfreq))
dfreq -= dfreq[diff_idx]
# calculate phase (= integrate over dfreq)
dphase = calculate_phase(dfreq, t_p, samples, shift_idx=shift_idx, pos_offsets=pos_offset)
# create pypulseq rf pulse object
signal = w1 * np.exp(1j * dphase) # create complex array with amp and phase
flip_angle = gamma_hz * 2 * np.pi
hsexp = create_arbitrary_pulse_with_phase(signal=signal, flip_angle=flip_angle, system=system)
return hsexp
def generate_hsexp_dict(amp: float = 1.0,
t_p: float = 12e-3,
mu: float = 65,
bandwidth: float = 2500,
t_window: float = 3.5e-3,
ef: float = 3.5,
system: Opts = Opts(),
gamma_hz: float = GAMMA_HZ) \
-> dict:
"""
Creates a dictionary with the 4 different hsexp pulses (tip-down/up and pos/neg offsets)
:param amp: maximum amplitude value [µT]
:param t_p: pulse pulse_duration [s]
:param mu: parameter µ of hyperbolic secant pulse
:param bandwidth: bandwidth of hyperbolic secant pulse [Hz]
:param t_window: pulse_duration of window function
:param ef: dimensionless parameter to control steepness of the exponential curve
:param system: system limits of the MR scanner
:param gamma_hz: gyromagnetic ratio [Hz]
:return:
"""
pulse_dict = {} # create empty dict for the 4 different pulses
# tip-down positive offset
pre_pos = make_hsexp(amp=amp,
t_p=t_p,
mu=mu,
bandwidth=bandwidth,
t_window=t_window,
ef=ef,
tip_down=True,
pos_offset=True,
system=system,
gamma_hz=gamma_hz)
pulse_dict.update({'pre_pos': pre_pos})
# tip-down negative offset
pre_neg = make_hsexp(amp=amp,
t_p=t_p,
mu=mu,
bandwidth=bandwidth,
t_window=t_window,
ef=ef,
tip_down=True,
pos_offset=False,
system=system,
gamma_hz=gamma_hz)
pulse_dict.update({'pre_neg': pre_neg})
# tip-up positive offsets
post_pos = make_hsexp(amp=amp,
t_p=t_p,
mu=mu,
bandwidth=bandwidth,
t_window=t_window,
ef=ef,
tip_down=False,
pos_offset=True,
system=system,
gamma_hz=gamma_hz)
pulse_dict.update({'post_pos': post_pos})
# tip-up negative offsets
post_neg = make_hsexp(amp=amp,
t_p=t_p,
mu=mu,
bandwidth=bandwidth,
t_window=t_window,
ef=ef,
tip_down=False,
pos_offset=False,
system=system,
gamma_hz=gamma_hz)
pulse_dict.update({'post_neg': post_neg})
return pulse_dict | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/gruntfile.js |
module.exports = function(grunt) {
/* include the dependency definitions */
let fileHandling = grunt.file.readJSON('static_dependencies.json');
/* rewirte the path from node_modules to lib/ */
let assetsMinifiedJs = fileHandling["assets.min.js"].map (
fileSegment => 'lib/js/' + fileSegment.substring(fileSegment.lastIndexOf('/')+1)
);
let leafletPluginsMinifiedJs = fileHandling["leaflet-plugins.min.js"].map (
fileSegment => 'lib/js/' + fileSegment.substring(fileSegment.lastIndexOf('/')+1)
);
let openlayersPluginsMinifiedJs = fileHandling["openlayers-plugins.min.js"].map(
fileSegment => 'lib/js/' + fileSegment.substring(fileSegment.lastIndexOf('/') + 1)
);
let assetsMinifiedCss = fileHandling["assets.min.css"].map (
fileSegment => 'lib/css/' + fileSegment.substring(fileSegment.lastIndexOf('/')+1)
);
let leafletMinifiedCss = fileHandling["leaflet.plugins.min.css"].map (
fileSegment => 'lib/css/' + fileSegment.substring(fileSegment.lastIndexOf('/')+1)
);
let openlayersMinifiedCss = fileHandling["openlayers.plugins.min.css"].map(
fileSegment => 'lib/css/' + fileSegment.substring(fileSegment.lastIndexOf('/') + 1)
);
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
jshint: {
// files to lint
files: ['gruntfile.js'],
// configure JSHint (see http://www.jshint.com/docs/)
options: {
globals: {
jQuery: true,
console: true,
module: true
}
}
},
clean: {
lib: ['lib/']
},
less: {
development: {
options: {
paths: [
'geonode/less'
]
},
files: [
{
'geonode/css/base.css': 'geonode/less/base.less',
'geonode/css/crop_widget.css': 'geonode/less/crop_widget.less',
'geonode/css/geonode-rtl.css': 'geonode/less/geonode-rtl.less'
}
]
},
production: {
options: {
paths: [
'geonode/less',
'node_modules/bootstrap/less'
],
yuicompress: true
},
files: [
{
'geonode/css/base.css': 'geonode/less/base.less',
'geonode/css/crop_widget.css': 'geonode/less/crop_widget.less',
'geonode/css/geonode-rtl.css': 'geonode/less/geonode-rtl.less'
}
]
}
},
concat: {
bootstrap: {
files: [{
expand: true,
flatten: true,
cwd: 'node_modules',
dest: 'lib/js',
src: fileHandling.concatBootstrap
}]
}
},
copy: {
default: {
files: [{
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/css',
src: [fileHandling["assets.min.css"], fileHandling["leaflet.plugins.min.css"], fileHandling["openlayers.plugins.min.css"]]
}, {
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/img',
src: fileHandling.images
},
{
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/fonts',
src: fileHandling.lib_fonts
},{
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/css/fonts',
src: fileHandling.lib_css_fonts
},{
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/css/assets',
src: fileHandling.lib_css_assets
}, {
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/css',
src: fileHandling.lib_css_png
}, {
expand: true,
flatten: true,
nonull: true,
cwd: 'node_modules',
dest: 'lib/js',
src: [fileHandling["assets.min.js"], fileHandling.other_dependencies, fileHandling["leaflet-plugins.min.js"], fileHandling["openlayers-plugins.min.js"]]
}]
}
},
replace: {
default: {
src: ['lib/css/*.css'],
overwrite: true,
/*
* We separate each pattern so it will be easy for us to read
* and recognize
*/
replacements: [
/*
* Pattern:
* url('img/image _.png') or url("img/image _.png")
*/
{
from: /url\([\"\']?(img\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$2")'
},
/*
* Pattern:
* url('images/image _.png') or url("images/image _.png")
*/
{
from: /url\([\"\']?(images\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$2")'
},
/*
* Pattern:
* url('image/image _.png') or url("image/image _.png")
*/
{
from: /url\([\"\']?(image\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$2")'
},
/*
* Pattern:
* url('./image _.png') or url("./image _.png")
*/
/*{
from: /url\([\"\']?(\.\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$2")'
},*/
/*
* Pattern:
* url('image _.png') or url("image _.png")
*/
/*{
from: /url\([\"\']?([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$1")'
},*/
/*
* Pattern:
* url('../images/image _.png') or url("../images/image _.png")
*/
{
from: /url\([\"\']?(\.\.\/images\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$1")'
},
/*
* Pattern:
* url('../image/image _.png') or url("../image/image _.png")
*/
{
from: /url\([\"\']?(\.\.\/image\/)([\w-\.\s@]+)[\"\']?\)/g,
to: 'url("../img/$1")'
}
]
}
},
cssmin: {
default: {
options: {
// the banner is inserted at the top of the output
banner: '/*! <%= pkg.name %> <%= grunt.template.today("dd-mm-yyyy") %> */\n',
cwd: 'l'
},
files: {
'lib/css/assets.min.css': assetsMinifiedCss,
'lib/css/leaflet-plugins.min.css': leafletMinifiedCss,
'lib/css/openlayers-plugins.min.css': openlayersMinifiedCss,
'geonode/css/geonode-rtl.min.css': ['geonode/css/geonode-rtl.css']
}
}
},
babel: {
options: {
sourceMap: true,
presets: ['@babel/preset-env']
},
dist: {
files: {
'geonode/js/crop_widget/crop_widget_es5.js': 'geonode/js/crop_widget/crop_widget.js',
'geonode/js/messages/message_recipients_autocomplete_es5.js': 'geonode/js/messages/message_recipients_autocomplete.js'
}
}
},
uglify: {
options: {
// the banner is inserted at the top of the output
banner: '/*! <%= pkg.name %> <%= grunt.template.today("dd-mm-yyyy") %> */\n'
},
development: {
options: {
beautify: true,
compress: false,
mangle: false
},
files: {
'lib/js/assets.min.js': assetsMinifiedJs,
'lib/js/leaflet-plugins.min.js': leafletPluginsMinifiedJs,
'lib/js/openlayers-plugins.min.js': openlayersPluginsMinifiedJs
}
},
production: {
options: {
beautify: false,
compress: true,
mangle: false
},
files: {
'lib/js/assets.min.js': assetsMinifiedJs,
'lib/js/leaflet-plugins.min.js': leafletPluginsMinifiedJs,
'lib/js/openlayers-plugins.min.js': openlayersPluginsMinifiedJs
}
}
},
// automated build on file change during development
watch: {
less: {
files: ['geonode/less/*.less'],
tasks: ['less:development']
}
}
});
// Load libs
require('load-grunt-tasks')(grunt);
// test
grunt.registerTask('test', ['jshint']);
// build development
grunt.registerTask('development', ['jshint', /*'clean:lib',*/ 'less:development', 'concat:bootstrap', 'copy', 'replace', 'cssmin', 'uglify:development', 'babel']);
grunt.registerTask('build-less-dev', ['less:development']);
// build production
grunt.registerTask('production', ['jshint', /*'clean:lib',*/ 'less:production', 'concat:bootstrap', 'copy', 'replace', 'cssmin', 'uglify:production', 'babel']);
grunt.registerTask('build-less-prod', ['less:production']);
}; | PypiClean |
/Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/launcher/multinode_runner.py |
# DeepSpeed Team
import os
import sys
import shutil
import subprocess
import warnings
from shlex import split
from abc import ABC, abstractmethod
from deepspeed.accelerator import get_accelerator
from ..utils import logger
from .constants import PDSH_MAX_FAN_OUT, MVAPICH_TMP_HOSTFILE
class MultiNodeRunner(ABC):
def __init__(self, args, world_info_base64):
self.args = args
self.validate_args()
self.user_arguments = self.parse_user_args()
self.user_script = args.user_script
self.world_info_base64 = world_info_base64
self.exports = {}
@abstractmethod
def backend_exists(self):
"""Return whether the corresponding backend exists"""
@abstractmethod
def get_cmd(self, environment, active_resources):
"""Return the command to execute on node"""
def add_export(self, key, var):
self.exports[key.strip()] = var.strip()
def parse_user_args(self):
return self.args.user_args
@property
def name(self):
"""Return the name of the backend"""
return self.__class__.__name__
def validate_args(self):
"""Validate self.args"""
class PDSHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64):
super().__init__(args, world_info_base64)
def backend_exists(self):
return shutil.which('pdsh')
@property
def name(self):
return "pdsh"
def parse_user_args(self):
return list(map(lambda x: x if x.startswith("-") else f"'{x}'", self.args.user_args))
def get_cmd(self, environment, active_resources):
environment['PDSH_RCMD_TYPE'] = 'ssh'
active_workers = ",".join(active_resources.keys())
logger.info("Running on the following workers: %s" % active_workers)
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd_args = ['pdsh', '-S', '-f', str(PDSH_MAX_FAN_OUT), '-w', active_workers] + split(
self.args.launcher_args)
exports = ""
for key, val in self.exports.items():
exports += "export {}={}; ".format(key, val)
# https://linux.die.net/man/1/pdsh
# %n will be replaced by pdsh command
deepspeed_launch = [
exports, f"cd {os.path.abspath('.')};", sys.executable, "-u", "-m", "deepspeed.launcher.launch",
f'--world_info={self.world_info_base64}', "--node_rank=%n", f"--master_addr={self.args.master_addr}",
f"--master_port={self.args.master_port}"
]
if self.args.no_python:
deepspeed_launch.append("--no_python")
if self.args.module:
deepspeed_launch.append("--module")
if self.args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if self.args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
if self.args.elastic_training:
deepspeed_launch.append("--enable_elastic_training")
deepspeed_launch.append(f"--max_elastic_nodes={self.args.max_elastic_nodes}")
deepspeed_launch.append(f"--min_elastic_nodes={self.args.min_elastic_nodes}")
cmd_to_search = [i + "\\" for i in deepspeed_launch[2:6]]
kill_command = pdsh_cmd_args + ["pkill -f ", " ".join(cmd_to_search)[:-2]]
return pdsh_cmd_args + deepspeed_launch + [self.user_script] + self.user_arguments, kill_command
class OpenMPIRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
self.add_export('UCX_TLS', 'tcp')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
return shutil.which('ompi_info')
@property
def name(self):
return "openmpi"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
total_process_count = sum(self.resource_pool.values())
mpirun_cmd = [
'mpirun',
'-n',
f'{total_process_count}',
'-hostfile',
f'{self.args.hostfile}',
'--mca',
'btl',
'^openib',
'--mca',
'btl_tcp_if_include',
'eth0',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-x', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
class MPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
#TODO: if IB is available we should suggestion mpich
return shutil.which('mpirun') #mpich_info
@property
def name(self):
return "mpich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
hosts = ""
for i, host in enumerate(self.resource_pool.keys()):
if i == 0:
hosts = f"{host}"
else:
hosts += f",{host}"
mpirun_cmd = [
'mpirun',
'-n',
f'{total_process_count}',
'-ppn',
f'{process_per_node}',
'-hosts',
f'{hosts}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-genv', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
class SlurmRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
return shutil.which('sinfo')
@property
def name(self):
return 'slurm'
def get_cmd(self, environment, active_resources):
assert not getattr(self.args, 'detect_nvlink_pairs',
False), "slurm backend does not support remapping visible devices"
total_process_count = sum(self.resource_pool.values())
srun_cmd = [
'srun',
'-n',
f'{total_process_count}',
] + split(self.args.launcher_args)
if getattr(self.args, 'slurm_comment', ''):
srun_cmd += ['--comment', self.args.slurm_comment]
if self.args.include != "":
srun_cmd.append('--include')
srun_cmd.append(f'{self.args.include}')
if self.args.exclude != "":
srun_cmd.append('--exclude')
srun_cmd.append(f'{self.args.exclude}')
if self.args.num_nodes > 0:
srun_cmd.append('--nodes')
srun_cmd.append(f'{self.args.num_nodes}')
if self.args.num_gpus > 0:
srun_cmd.append('--gpus')
srun_cmd.append(f'{self.args.num_gpus}')
exports = '--export=ALL'
for key, val in self.exports.items():
exports += f",{key}={val}"
python_exec = [sys.executable, "-u"]
command = srun_cmd + [exports] + python_exec + [self.user_script] + self.user_arguments
return command
class MVAPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
# Disable the CMA kernel module, not available on Ubuntu systems
self.add_export('MV2_SMP_USE_CMA', '0')
# If we fail this will output more verbose logging
self.add_export('MV2_DEBUG_SHOW_BACKTRACE', '1')
# Enabled cuda-aware communication
if get_accelerator().device_name() == 'cuda':
self.add_export('MV2_USE_CUDA', '1')
# Support deep learning frameworks: http://hidl.cse.ohio-state.edu/userguide/horovod/
self.add_export('MV2_SUPPORT_DL', '1')
# Support MPI_THREAD_MULTIPLE
self.add_export('MV2_ENABLE_AFFINITY', '0')
# Performance tuning flags for allgather
self.add_export('MV2_INTER_ALLGATHER_TUNING', '5')
self.add_export('MV2_CUDA_USE_NAIVE', '0')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
mpiname_exists = shutil.which('mpiname')
exists = False
if not mpiname_exists:
warnings.warn("mpiname does not exist, mvapich is not installed properly")
else:
results = subprocess.check_output('mpiname', shell=True)
mpiname_results = results.decode('utf-8').strip()
if "MVAPICH2-GDR" in mpiname_results:
exists = True
else:
warnings.warn(f"Expected MVAPICH2-GDR as return for mpiname but received {mpiname_results}")
return exists
@property
def name(self):
return "mvapich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("mvapich requires same number of devices per node")
with open(MVAPICH_TMP_HOSTFILE, 'w') as fd:
for host in self.resource_pool.keys():
fd.write(f'{host}\n')
mpirun_cmd = [
'mpirun',
'-np',
f'{total_process_count}',
'-ppn',
f'{process_per_node}',
'--hostfile',
f'{MVAPICH_TMP_HOSTFILE}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-env', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments | PypiClean |
/Captcha-Impulse-0.0.9.tar.gz/Captcha-Impulse-0.0.9/src/impulse/yolov5/utils/plots.py | import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw, ImageFont
from utils.general import (LOGGER, Timeout, clip_coords, increment_path, is_ascii, is_chinese,
try_except, user_config_dir, xywh2xyxy, xyxy2xywh)
from utils.metrics import fitness
# Settings
CONFIG_DIR = user_config_dir() # Ultralytics settings dir
RANK = int(os.getenv('RANK', -1))
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb('#' + c) for c in hex]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
def check_font(font='Arial.ttf', size=10):
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
font = Path(font)
font = font if font.exists() else (CONFIG_DIR / font.name)
try:
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
except Exception as e: # download if missing
url = "https://ultralytics.com/assets/" + font.name
print(f'Downloading {url} to {font}...')
torch.hub.download_url_to_file(url, str(font), progress=False)
try:
return ImageFont.truetype(str(font), size)
except TypeError:
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
class Annotator:
if RANK in (-1, 0):
check_font() # download TTF if necessary
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
self.pil = pil or not is_ascii(example) or is_chinese(example)
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w, h = self.font.getsize(label) # text width, height
outside = box[1] - h >= 0 # label fits outside box
self.draw.rectangle([box[0],
box[1] - h if outside else box[1],
box[0] + w + 1,
box[1] + 1 if outside else box[1] + h + 1], fill=color)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h - 3 >= 0 # label fits outside box
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
thickness=tf, lineType=cv2.LINE_AA)
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255)):
# Add text to image (PIL-only)
w, h = self.font.getsize(text) # text width, height
self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
def result(self):
# Return annotated image as array
return np.asarray(self.im)
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
"""
x: Features to be visualized
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
"""
if 'Detect' not in module_type:
batch, channels, height, width = x.shape # batch, channels, height, width
if height > 1 and width > 1:
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
n = min(n, channels) # number of plots
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
ax = ax.ravel()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
for i in range(n):
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
ax[i].axis('off')
print(f'Saving {f}... ({n}/{channels})')
plt.savefig(f, dpi=300, bbox_inches='tight')
plt.close()
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
from scipy.signal import butter, filtfilt
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
if np.max(images[0]) <= 1:
images *= 255 # de-normalise (optional)
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Build Image
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, im in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
im = im.transpose(1, 2, 0)
mosaic[y:y + h, x:x + w, :] = im
# Resize (optional)
scale = max_size / ns / max(h, w)
if scale < 1:
h = math.ceil(scale * h)
w = math.ceil(scale * w)
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
# Annotate
fs = int((h + w) * ns * 0.01) # font size
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
for i in range(i + 1):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths:
annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0:
ti = targets[targets[:, 0] == i] # image targets
boxes = xywh2xyxy(ti[:, 2:6]).T
classes = ti[:, 1].astype('int')
labels = ti.shape[1] == 6 # labels if no conf column
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale < 1: # absolute coords need scale if image scales
boxes *= scale
boxes[[0, 2]] += x
boxes[[1, 3]] += y
for j, box in enumerate(boxes.T.tolist()):
cls = classes[j]
color = colors(cls)
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
annotator.box_label(box, label, color=color)
annotator.im.save(fname) # save
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_val_txt(): # from utils.plots import *; plot_val()
# Plot val.txt histograms
x = np.loadtxt('val.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
# Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
save_dir = Path(file).parent if file else Path(dir)
plot2 = False # plot additional results
if plot2:
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
for f in sorted(save_dir.glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
if plot2:
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
for i in range(7):
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 57)
ax2.set_ylim(25, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
f = save_dir / 'study.png'
print(f'Saving {f}...')
plt.savefig(f, dpi=300)
@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
def plot_labels(labels, names=(), save_dir=Path('')):
# plot dataset labels
LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
# Plot evolve.csv hyp evolution results
evolve_csv = Path(evolve_csv)
data = pd.read_csv(evolve_csv)
keys = [x.strip() for x in data.columns]
x = data.values
f = fitness(x)
j = np.argmax(f) # max fitness index
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, k in enumerate(keys[7:]):
v = x[:, 7 + i]
mu = v[j] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print(f'{k:>15}: {mu:.3g}')
f = evolve_csv.with_suffix('.png') # filename
plt.savefig(f, dpi=200)
plt.close()
print(f'Saved {f}')
def plot_results(file='path/to/results.csv', dir=''):
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
save_dir = Path(file).parent if file else Path(dir)
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
files = list(save_dir.glob('results*.csv'))
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
for fi, f in enumerate(files):
try:
data = pd.read_csv(f)
s = [x.strip() for x in data.columns]
x = data.values[:, 0]
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
y = data.values[:, j]
# y[y == 0] = np.nan # don't show zero values
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
ax[i].set_title(s[j], fontsize=12)
# if j in [8, 9, 10]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print(f'Warning: Plotting error for {f}: {e}')
ax[1].legend()
fig.savefig(save_dir / 'results.png', dpi=200)
plt.close()
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print(f'Warning: Plotting error for {f}; {e}')
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_coords(xyxy, im.shape)
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
if save:
file.parent.mkdir(parents=True, exist_ok=True) # make directory
cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop)
return crop | PypiClean |
/ModuleZooTorch-1.1.3a0-py3-none-any.whl/moduleZoo/attention/conv_multi_head_attention.py | from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from ..convolution import ConvNormActivation1d, ConvNormActivation2d
from .utils import split_cat
class MultiHeadAttention2d(nn.Module):
""" Multi HeadSelf attention Layer"""
def __init__(self,
in_channels: int,
out_channels: Optional[int] = None,
y_in_channels: Optional[int] = None,
y_out_channels: Optional[int] = None,
n_heads: int = 1,
residual: bool = True,
kernel_size: Union[int, Tuple[int, int]] = 1,
interpolation_mode: Optional[str] = 'nearest'):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.y_in_channels = self.in_channels if y_in_channels is None else y_in_channels
self.y_out_channels = self.out_channels if y_out_channels is None else y_out_channels
self.n_heads = n_heads
self.residual = residual
self.interpolation_mode = interpolation_mode
self.out_channels *= n_heads
self.y_out_channels *= n_heads
self.query_conv = ConvNormActivation2d(self.in_channels, self.out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
self.key_conv = ConvNormActivation2d(self.in_channels, self.out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
self.value_conv = ConvNormActivation2d(self.y_in_channels, self.y_out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
# gamma as the shape of expanded dims with n_heads, so [n_heads, 1, _, _, ...]
self.gamma = nn.Parameter(torch.rand((n_heads, 1, 1) if n_heads == 1 else (n_heads, 1, 1, 1))+0.001) if self.residual else None
self.proj = 'id' if self.y_out_channels//self.n_heads == self.y_in_channels and not self.residual else 'projection'
self.projection = ConvNormActivation2d(self.y_in_channels, self.y_out_channels//n_heads, 1, bias=False) if self.proj == 'projection' else None
self.softmax = nn.Softmax(dim=-1)
def forward(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:
"""
inputs :
x : input feature maps( B X C X W X H)
y : feature map attention to be applied
returns :
out : self attention value or + input feature
"""
b, c, w, h = x.size()
B, C, W, H = y.size()
assert(b == B)
if w != W and h != H:
assert(self.interpolation_mode is not None)
x = interpolate(x, (W, H), mode=self.interpolation_mode) # B, c, W, H
proj_query = self.query_conv(x).view(B, -1, W*H) # B X (C/r*n) X N
proj_key = self.key_conv(x).view(B, -1, W*H) # B X (C/r*n) X N
proj_value = self.value_conv(y).view(B, -1, W*H) # B X (OC*n) X N
if self.n_heads != 1:
split_size = [self.out_channels//self.n_heads, self.out_channels//self.n_heads, self.y_out_channels//self.n_heads]
# n_heads*B X (C/r) X N, n_heads*B X (C/r) X N, n_heads*B X OC X N
proj_query, proj_key, proj_value = \
split_cat(proj_query, split_size[0], 1, 0), split_cat(proj_key, split_size[1], 1, 0), split_cat(proj_value, split_size[2], 1, 0)
proj_query = proj_query.permute(0, 2, 1) # n_heads*B X N X (C/r)
energy = torch.bmm(proj_query,proj_key) # transpose check # n_heads*B X N X N
attention = self.softmax(energy) # n_heads*B X N X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1)) # n_heads*B X OC X N
if self.residual:
if self.projection is not None:
y = self.projection(y).view(B, -1, W*H) # B X OC X W*H
if self.n_heads != 1:
out = split_cat(out, B, 0, -1) # n_heads X B X OC X N
y = y.unsqueeze(dim=0).repeat(self.n_heads, 1, 1, 1) # n_head X B X OC X N
out = (self.gamma*out + y)/(1 + self.gamma)
if self.n_heads != 1:
out = split_cat(out, 1, 0, 2).squeeze(dim=0) # B X OC*n_heads X N
out = out.view(B, self.out_channels, W, H) # B X OC*n_heads X W X H
return out
out = split_cat(out, B, 0, 1) # B X OC*n_heads X N
out = out.view(B, self.y_out_channels, W, H) # B X OC*n_heads X W X H
return out
def shape(self, in_shape: Tuple[int, int], y_in_shape: Tuple[int, int]):
return y_in_shape
class MultiHeadAttention1d(nn.Module):
""" Multi HeadSelf attention Layer"""
def __init__(self,
in_channels: int,
out_channels: Optional[int] = None,
y_in_channels: Optional[int] = None,
y_out_channels: Optional[int] = None,
n_heads: int = 1,
residual: bool = True,
kernel_size: Union[int, Tuple[int, int]] = 1,
interpolation_mode: Optional[str] = 'nearest'):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.y_in_channels = self.in_channels if y_in_channels is None else y_in_channels
self.y_out_channels = self.out_channels if y_out_channels is None else y_out_channels
self.n_heads = n_heads
self.residual = residual
self.interpolation_mode = interpolation_mode
self.out_channels *= n_heads
self.y_out_channels *= n_heads
self.query_conv = ConvNormActivation1d(self.in_channels, self.out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
self.key_conv = ConvNormActivation1d(self.in_channels, self.out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
self.value_conv = ConvNormActivation1d(self.y_in_channels, self.y_out_channels, kernel_size, padding='stride_effective') # TODO@ShivamPR21: Padding `same` applied though proxy (`stride_effective, stride=1`) for onnx support
# gamma as the shape of expanded dims with n_heads, so [n_heads, 1, _, _, ...]
self.gamma = nn.Parameter(torch.rand((n_heads, 1, 1) if n_heads == 1 else (n_heads, 1, 1, 1))+0.001) if self.residual else None
self.proj = 'id' if self.y_out_channels//self.n_heads == self.y_in_channels and not self.residual else 'projection'
self.projection = ConvNormActivation1d(self.y_in_channels, self.y_out_channels//self.n_heads, 1, bias=False) if self.proj == 'projection' else None
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
inputs :
x : input feature maps( B X C X N)
y : feature map attention to be applied
returns :
out : self attention value or + input feature
"""
b, c, n = x.size()
B, C, N = y.size()
assert(b == B)
if n != N:
assert(self.interpolation_mode is not None)
x = interpolate(x, N, mode=self.interpolation_mode) # B, c, N
proj_query = self.query_conv(x) # B X (C/r*n) X N
proj_key = self.key_conv(x) # B X (C/r*n) X N
proj_value = self.value_conv(y) # B X (OC*n) X N
if self.n_heads != 1:
split_size = [self.out_channels//self.n_heads, self.out_channels//self.n_heads, self.y_out_channels//self.n_heads]
# n_heads*B X (C/r) X N, n_heads*B X (C/r) X N, n_heads*B X OC X N
proj_query, proj_key, proj_value = \
split_cat(proj_query, split_size[0], 1, 0), split_cat(proj_key, split_size[1], 1, 0), split_cat(proj_value, split_size[2], 1, 0)
proj_query = proj_query.permute(0, 2, 1) # n_heads*B X N X (C/r)
energy = torch.bmm(proj_query,proj_key) # transpose check # n_heads*B X N X N
attention = self.softmax(energy) # n_heads*B X N X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1)) # n_heads*B X OC X N
if self.residual:
if self.projection is not None:
y = self.projection(y) # B X OC X N
if self.n_heads != 1:
out = split_cat(out, B, 0, -1) # n_heads X B X OC X N
y = y.unsqueeze(dim=0).repeat(self.n_heads, 1, 1, 1) # n_head X B X OC X N
out = (self.gamma*out + y)/(1 + self.gamma)
if self.n_heads != 1:
out = split_cat(out, 1, 0, 2).squeeze(dim=0) # B X OC*n_heads X N
return out
out = split_cat(out, B, 0, 1) # B X OC*n_heads X N
return out
def shape(self, in_shape: int, y_in_shape: int):
return y_in_shape
class MultiHeadSelfAttention2d(MultiHeadAttention2d):
def __init__(self,
in_channels: int,
out_channels: Optional[int] = None,
n_heads: int = 1,
residual: bool = True,
kernel_size: Union[int, Tuple[int, int]] = 1):
super().__init__(in_channels, out_channels, None, None, n_heads, residual, kernel_size, None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return super().forward(x, x)
class MultiHeadSelfAttention1d(MultiHeadAttention1d):
def __init__(self,
in_channels: int,
out_channels: Optional[int] = None,
n_heads: int = 1,
residual: bool = True,
kernel_size: Union[int, Tuple[int, int]] = 1):
super().__init__(in_channels, out_channels, None, None, n_heads, residual, kernel_size, None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return super().forward(x, x) | PypiClean |
/MufSim-1.2.2.tar.gz/MufSim-1.2.2/mufsim/insts/descriptors.py | import mufsim.gamedb as db
import mufsim.stackitems as si
from mufsim.interface import network_interface as netifc
from mufsim.logger import log
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
@instr("descriptors")
class InstDescriptors(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
for descr in descrs:
fr.data_push(descr)
fr.data_push(len(descrs))
@instr("descr_array")
class InstDescrArray(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
fr.data_push_list(descrs)
@instr("descrcon")
class InstDescrCon(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_con(descr))
@instr("descrdbref")
class InstDescrDBRef(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
ref = netifc.descr_dbref(descr)
obj = si.DBRef(ref)
fr.data_push(obj)
@instr("descr_setuser")
class InstDescrSetUser(Instruction):
def execute(self, fr):
fr.check_underflow(3)
pw = fr.data_pop(str)
who = fr.data_pop_object()
descr = fr.data_pop(int)
if who.objtype != "player":
raise MufRuntimeError("Expected player dbref.")
was = netifc.descr_dbref(descr)
if db.getobj(who).password != pw:
raise MufRuntimeError("Incorrect password!")
if netifc.descr_set_user(descr, who.dbref):
was = db.getobj(was)
# TODO: actually check password?
log("RECONNECTED DESCRIPTOR %d FROM %s TO %s USING PW '%s'" %
(descr, was, who, pw))
@instr("descrboot")
class InstDescrBoot(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.descr_disconnect(descr):
log("BOOTED DESCRIPTOR %d: %s" % (descr, db.getobj(who)))
@instr("descrnotify")
class InstDescrNotify(Instruction):
def execute(self, fr):
fr.check_underflow(2)
msg = fr.data_pop(str)
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.is_descr_online(descr):
log("NOTIFY TO DESCR %d, %s: %s" %
(descr, db.getobj(who), msg))
@instr("descrflush")
class InstDescrFlush(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
if descr == -1:
netifc.flush_all_descrs()
log("DESCRFLUSH ALL DESCRS.")
elif netifc.is_descr_online(descr):
netifc.descr_flush(descr)
who = netifc.descr_dbref(descr)
log("DESCRFLUSH %d, %s" % (descr, db.getobj(who)))
@instr("descr")
class InstDescr(Instruction):
def execute(self, fr):
# TODO: get real descr.
fr.data_push(db.getobj(fr.user).descr)
@instr("firstdescr")
class InstFirstDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[0])
else:
fr.data_push(0)
@instr("lastdescr")
class InstLastDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[-1])
else:
fr.data_push(0)
@instr("nextdescr")
class InstNextDescr(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
descrs = netifc.get_descriptors()
if descr in descrs:
pos = descrs.index(descr) + 1
if pos >= len(descrs):
fr.data_push(0)
else:
fr.data_push(descrs[pos])
else:
fr.data_push(0)
@instr("descrbufsize")
class InstDescrBufSize(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_bufsize(descr))
@instr("descrsecure?")
class InstDescrSecureP(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(1 if netifc.descr_secure(descr) else 0)
@instr("descruser")
class InstDescrUser(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_user(descr)
if who >= 0:
fr.data_push(db.getobj(who).name)
else:
fr.data_push("")
@instr("descrhost")
class InstDescrHost(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_host(descr))
@instr("descrtime")
class InstDescrTime(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_time(descr)))
@instr("descridle")
class InstDescrIdle(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_idle(descr)))
@instr("descrleastidle")
class InstDescrLeastIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(min(idles))
@instr("descrmostidle")
class InstDescrMostIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(max(idles))
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap | PypiClean |
/Catactor-0.1.2.tar.gz/Catactor-0.1.2/shell_scripts/preprocessing/visualization_GSE111586_distal.sh | set -euxo pipefail
GSE=GSE111586
sf_marker='GABAergic_markers_fc.txt,Glutamatergic_markers_fc.txt,Non.Neuronal_markers_fc.txt'
ta_marker='tasic2016_gaba.csv,tasic2016_glu.csv,tasic2016_gli.csv'
cu_marker='cusanovich2018_inh.txt,cusanovich2018_ext.txt,cusanovich2018_gli.txt'
tn_marker='tasic2018_gaba.txt,tasic2018_glu.txt,tasic2018_gli.txt'
OPTIONS=" --na_filtering --cfilter genome_flag --verbose "
DR_OPTIONS=" --pca 40 --tsne-params nn=15,perplexity=40,learning_rate=100 "
if [ -z "$1" ]; then
METHOD="preprocess"
else
METHOD=$1
fi
if [ -z "$2" ]; then
MDIR="../marker_genes/"
else
MDIR=$2
fi
if [ -z "$3" ]; then
DDIR="../mat_data/${GSE}"
else
DDIR=$3
fi
COLUMN_DATA=${GSE}_bin_ng_Wholebrain1_with_bins_annot.csv,${GSE}_bin_ng_Wholebrain2_with_bins_annot.csv,${GSE}_bin_ng_Prefrontal_with_bins_annot.csv
ROW_DATA=${GSE}_cell_ng_Wholebrain1_meta.csv,${GSE}_cell_ng_Wholebrain2_meta.csv,${GSE}_cell_ng_Prefrontal_meta.csv
MAT_DATA=${GSE}_sparse_mat_Wholebrain1.mtx,${GSE}_sparse_mat_Wholebrain2.mtx,${GSE}_sparse_mat_Prefrontal.mtx
CLUSTERS=Ident,cluster_leiden,cluster_louvain,id,cell_label,celltype
REFERENCE=
for dist in gene proximal distal
do
if [ $dist == "distal" ]; then
clabel="id_order_distal"
elif [ $dist == "proximal" ]; then
clabel="id_proximal"
else
clabel="id_order_gene"
fi
for marker in stefan tasic cusanovich ntasic
do
if [ $marker == "stefan" ]; then
marker_file=$sf_marker
mdir="${MDIR}/SF_markers/"
elif [ $marker == "tasic" ]; then
marker_file=$ta_marker
mdir="${MDIR}/TA_markers/"
elif [ $marker == "cusanovich" ]; then
marker_file=$cu_marker
mdir="${MDIR}/CU_markers/"
else
marker_file=$tn_marker
mdir="${MDIR}/TN_markers/"
fi
if [ "$METHOD" == "average" ]; then
Catactor --update $OPTIONS $DR_OPTIONS --gene-name '' --clabel global_index_5000 --cindex global_index_5000 --rindex local_index --rlabel '' \
--dir $DDIR --adir $DDIR --row $ROW_DATA --column $COLUMN_DATA \
--mdir $mdir --markers $marker_file --cluster $CLUSTERS \
--output ${GSE}_${dist} preprocess $MAT_DATA
break
elif [ "$METHOD" == "preprocess" ]; then
Catactor $OPTIONS $DR_OPTIONS --gene-name gene_name --clabel $clabel --cindex global_index_5000 --rindex local_index --rlabel '' \
--dir $DDIR --adir $DDIR --row $ROW_DATA --column $COLUMN_DATA \
--mdir $mdir --markers $marker_file --cluster $CLUSTERS \
--output ${GSE}_${dist} preprocess $MAT_DATA
break
elif [ "$METHOD" == "test" ]; then
Catactor $OPTIONS --update --test-vis --clabel $clabel --cindex global_index_5000 --rindex local_index --rlabel '' \
--dir $DDIR --adir $DDIR --row $ROW_DATA --column $COLUMN_DATA \
--mdir $mdir --markers $marker_file --cluster $CLUSTERS \
--output ${GSE}_${dist} visualization $MAT_DATA
break
elif [ "${METHOD}" == "rank" ]; then
Catactor $OPTIONS $DR_OPTIONS --gene-name gene_name --clabel $clabel --cindex global_index_5000 --rindex local_index --rlabel '' \
--dir $DDIR --adir $DDIR --row $ROW_DATA --column $COLUMN_DATA \
--mdir $mdir --markers $marker_file --cluster $CLUSTERS \
--output ${GSE}_${dist} visualization $MAT_DATA
fi
done
done | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/lib/simple_config.py | import ast
import json
import threading
import os
from copy import deepcopy
from util import user_dir, print_error, print_msg, print_stderr, PrintError
from bitcoin import MAX_FEE_RATE, FEE_TARGETS
SYSTEM_CONFIG_PATH = "/etc/electrum-vtc.conf"
config = None
def get_config():
global config
return config
def set_config(c):
global config
config = c
class SimpleConfig(PrintError):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are 3 different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
3. System configuration (in /etc/)
They are taken in order (1. overrides config options set in 2., that
override config set in 3.)
"""
def __init__(self, options={}, read_system_config_function=None,
read_user_config_function=None, read_user_dir_function=None):
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.fee_estimates = {}
# The following two functions are there for dependency injection when
# testing.
if read_system_config_function is None:
read_system_config_function = read_system_config
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# Portable wallets don't use a system config
if self.cmdline_options.get('portable', False):
self.system_config = {}
else:
self.system_config = read_system_config_function()
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
# Upgrade obsolete keys
self.fixup_keys({'auto_cycle': 'auto_connect'})
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_path from command line / system configuration
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
if self.get('testnet'):
path = os.path.join(path, 'testnet')
elif self.get('nolnet'):
path = os.path.join(path, 'nolnet')
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
os.mkdir(path)
self.print_error("electrum directory", path)
return path
def fixup_config_keys(self, config, keypairs):
updated = False
for old_key, new_key in keypairs.iteritems():
if old_key in config:
if not new_key in config:
config[new_key] = config[old_key]
del config[old_key]
updated = True
return updated
def fixup_keys(self, keypairs):
'''Migrate old key names to new ones'''
self.fixup_config_keys(self.cmdline_options, keypairs)
self.fixup_config_keys(self.system_config, keypairs)
if self.fixup_config_keys(self.user_config, keypairs):
self.save_user_config()
def set_key(self, key, value, save = True):
if not self.is_modifiable(key):
print_stderr("Warning: not changing config key '%s' set on the command line" % key)
return
with self.lock:
self.user_config[key] = value
if save:
self.save_user_config()
return
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key)
if out is None:
out = self.system_config.get(key, default)
return out
def is_modifiable(self, key):
return not key in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
f = open(path, "w")
f.write(s)
f.close()
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd'), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
dirpath = os.path.join(self.path, "wallets")
if not os.path.exists(dirpath):
if os.path.islink(dirpath):
raise BaseException('Dangling link: ' + dirpath)
os.mkdir(dirpath)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum-vtc.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.print_error("session timeout -> %d seconds" % seconds)
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def max_fee_rate(self):
f = self.get('max_fee_rate', MAX_FEE_RATE)
if f==0:
f = MAX_FEE_RATE
return f
def dynfee(self, i):
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(5*MAX_FEE_RATE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
l = self.fee_estimates.items() + [(1, self.dynfee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def has_fee_estimates(self):
return len(self.fee_estimates)==4
def is_dynfee(self):
return self.get('dynamic_fees', False)
def fee_per_kb(self):
dyn = self.is_dynfee()
if dyn:
fee_rate = self.dynfee(self.get('fee_level', 2))
else:
fee_rate = self.get('fee_per_kb', self.max_fee_rate()/10)
return fee_rate
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_system_config(path=SYSTEM_CONFIG_PATH):
"""Parse and return the system config settings in /etc/electrum-vtc.conf."""
result = {}
if os.path.exists(path):
try:
import ConfigParser
except ImportError:
print "cannot parse electrum-vtc.conf. please install ConfigParser"
return
p = ConfigParser.ConfigParser()
try:
p.read(path)
for k, v in p.items('client'):
result[k] = v
except (ConfigParser.NoSectionError, ConfigParser.MissingSectionHeaderError):
pass
return result
def read_user_config(path):
"""Parse and store the user config settings in electrum-vtc.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r") as f:
data = f.read()
result = json.loads(data)
except:
print_error("Warning: Cannot read config file.", config_path)
return {}
if not type(result) is dict:
return {}
return result | PypiClean |
/NREL-gaps-0.4.3.tar.gz/NREL-gaps-0.4.3/gaps/cli/execution.py | import logging
import datetime as dt
from warnings import warn
from inspect import signature
from gaps.hpc import submit
from gaps.status import (
DT_FMT,
Status,
HardwareOption,
StatusOption,
StatusField,
QOSOption,
)
from gaps.warnings import gapsWarning
from gaps.exceptions import gapsConfigError
logger = logging.getLogger(__name__)
def kickoff_job(ctx, cmd, exec_kwargs):
"""Kickoff a single job (a single command execution).
Parameters
----------
ctx : click.Context
Context object with a `.obj` attribute that contains at least
the following keys:
NAME : str
Job name.
OUT_DIR : path-like
Path to output directory.
COMMAND_NAME : str
Name of command being run.
cmd : str
String form of command to kickoff.
exec_kwargs : dict
Keyword-value pairs to pass to the respective `submit` function.
These will be filtered, so they may contain extra values. If
some required inputs are missing from this dictionary, a
`gapsConfigError` is raised.
Raises
------
gapsConfigError
If `exec_kwargs` is missing some arguments required by the
respective `submit` function.
"""
hardware_option = HardwareOption(exec_kwargs.pop("option", "local"))
if hardware_option.manager is None:
_kickoff_local_job(ctx, cmd)
return
ctx.obj["MANAGER"] = hardware_option.manager
exec_kwargs = _filter_exec_kwargs(
exec_kwargs, hardware_option.manager.make_script_str, hardware_option
)
_kickoff_hpc_job(ctx, cmd, hardware_option, **exec_kwargs)
def _filter_exec_kwargs(kwargs, func, hardware_option):
"""Filter out extra keywords and raise error if any are missing."""
sig = signature(func)
kwargs_to_use = {
k: v for k, v in kwargs.items() if k in sig.parameters.keys()
}
extra_keys = set(kwargs) - set(kwargs_to_use)
if extra_keys:
msg = (
f"Found extra keys in 'execution_control'! The following "
f"inputs will be ignored: {extra_keys}. To silence this warning, "
"please remove the extra keys from the 'execution_control' block."
)
warn(msg, gapsWarning)
required = {
name for name, p in sig.parameters.items() if p.default == p.empty
}
required -= {"self", "cmd", "name"}
missing = {k for k in required if k not in kwargs_to_use}
if missing:
msg = (
f"The 'execution_control' block is missing the following "
f"required keys: {missing}"
)
raise gapsConfigError(msg)
if hardware_option.supports_categorical_qos:
qos = kwargs_to_use.get("qos", "normal")
try:
qos = QOSOption(qos)
except ValueError as err:
msg = (
f"Requested Quality-of-service option ({qos!r}) not "
f"recognized! Available options are: "
f"{QOSOption.members_as_str()}."
)
raise gapsConfigError(msg) from err
kwargs_to_use["qos"] = f"{qos}"
return kwargs_to_use
def _kickoff_local_job(ctx, cmd):
"""Run a job (command) locally."""
if not _should_run(ctx):
return
name = ctx.obj["NAME"]
command = ctx.obj["COMMAND_NAME"]
logger.info("Running %r locally with job name %r.", command, name)
logger.debug("Submitting the following command:\n%s", cmd)
Status.mark_job_as_submitted(
ctx.obj["OUT_DIR"],
command=ctx.obj["COMMAND_NAME"],
job_name=name,
replace=True,
job_attrs={
StatusField.JOB_STATUS: StatusOption.SUBMITTED,
StatusField.HARDWARE: HardwareOption.LOCAL,
StatusField.TIME_SUBMITTED: dt.datetime.now().strftime(DT_FMT),
},
)
stdout, stderr = submit(cmd)
if stdout:
logger.info("Subprocess received stdout: \n%s", stdout)
if stderr:
logger.warning("Subprocess received stderr: \n%s", stderr)
msg = f"Completed job {name!r}."
logger.info(msg)
def _kickoff_hpc_job(ctx, cmd, hardware_option, **kwargs):
"""Run a job (command) on the HPC."""
if not _should_run(ctx):
return
name = ctx.obj["NAME"]
command = ctx.obj["COMMAND_NAME"]
logger.debug("Submitting the following command:\n%s", cmd)
out = ctx.obj["MANAGER"].submit(name, cmd=cmd, **kwargs)[0]
id_msg = f" (Job ID #{out})" if out else ""
msg = f"Kicked off {command!r} job {name!r}{id_msg}"
Status.mark_job_as_submitted(
ctx.obj["OUT_DIR"],
command=ctx.obj["COMMAND_NAME"],
job_name=name,
replace=True,
job_attrs={
StatusField.JOB_ID: out,
StatusField.HARDWARE: hardware_option,
StatusField.QOS: kwargs.get("qos") or QOSOption.UNSPECIFIED,
StatusField.JOB_STATUS: StatusOption.SUBMITTED,
StatusField.TIME_SUBMITTED: dt.datetime.now().strftime(DT_FMT),
},
)
logger.info(msg)
def _should_run(ctx):
"""Determine wether a command should be run based on status."""
name = ctx.obj["NAME"]
out_dir = ctx.obj["OUT_DIR"]
status = Status.retrieve_job_status(
out_dir,
command=ctx.obj["COMMAND_NAME"],
job_name=name,
subprocess_manager=ctx.obj.get("MANAGER"),
)
if status == StatusOption.NOT_SUBMITTED:
return True
if status in {StatusOption.SUCCESSFUL, StatusOption.COMPLETE}:
msg = (
f"Job {name!r} is successful in status json found in {out_dir!r}, "
f"not re-running."
)
logger.info(msg)
return False
if status is not None and "fail" not in str(status).lower():
msg = (
f"Job {name!r} was found with status {status!r}, not resubmitting"
)
logger.info(msg)
return False
return True | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/io.py | import codecs
import warnings
from typing import Any, List
from docutils import nodes
from docutils.core import Publisher
from docutils.frontend import Values
from docutils.io import FileInput, Input, NullOutput
from docutils.parsers import Parser
from docutils.parsers.rst import Parser as RSTParser
from docutils.readers import standalone
from docutils.transforms import Transform
from docutils.transforms.references import DanglingReferences
from docutils.writers import UnfilteredWriter
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment import BuildEnvironment
from sphinx.errors import FiletypeNotFoundError
from sphinx.transforms import (
AutoIndexUpgrader, DoctreeReadEvent, FigureAligner, SphinxTransformer
)
from sphinx.transforms.i18n import (
PreserveTranslatableMessages, Locale, RemoveTranslatableInline,
)
from sphinx.transforms.references import SphinxDomains
from sphinx.util import logging, get_filetype
from sphinx.util import UnicodeDecodeErrorHandler
from sphinx.util.docutils import LoggingReporter
from sphinx.versioning import UIDTransform
if False:
# For type annotation
from typing import Type # for python3.5.1
from sphinx.application import Sphinx
logger = logging.getLogger(__name__)
class SphinxBaseReader(standalone.Reader):
"""
A base class of readers for Sphinx.
This replaces reporter by Sphinx's on generating document.
"""
transforms = [] # type: List[Type[Transform]]
def __init__(self, *args: Any, **kwargs: Any) -> None:
from sphinx.application import Sphinx
if len(args) > 0 and isinstance(args[0], Sphinx):
self._app = args[0]
self._env = self._app.env
args = args[1:]
super().__init__(*args, **kwargs)
@property
def app(self) -> "Sphinx":
warnings.warn('SphinxBaseReader.app is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
return self._app
@property
def env(self) -> BuildEnvironment:
warnings.warn('SphinxBaseReader.env is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
return self._env
def setup(self, app: "Sphinx") -> None:
self._app = app # hold application object only for compatibility
self._env = app.env
def get_transforms(self) -> List["Type[Transform]"]:
transforms = super().get_transforms() + self.transforms
# remove transforms which is not needed for Sphinx
unused = [DanglingReferences]
for transform in unused:
if transform in transforms:
transforms.remove(transform)
return transforms
def new_document(self) -> nodes.document:
"""Creates a new document object which having a special reporter object good
for logging.
"""
document = super().new_document()
# substitute transformer
document.transformer = SphinxTransformer(document)
document.transformer.set_environment(self.settings.env)
# substitute reporter
reporter = document.reporter
document.reporter = LoggingReporter.from_reporter(reporter)
return document
class SphinxStandaloneReader(SphinxBaseReader):
"""
A basic document reader for Sphinx.
"""
def setup(self, app: "Sphinx") -> None:
self.transforms = self.transforms + app.registry.get_transforms()
super().setup(app)
def read(self, source: Input, parser: Parser, settings: Values) -> nodes.document:
self.source = source
if not self.parser:
self.parser = parser
self.settings = settings
self.input = self.read_source(settings.env)
self.parse()
return self.document
def read_source(self, env: BuildEnvironment) -> str:
"""Read content from source and do post-process."""
content = self.source.read()
# emit "source-read" event
arg = [content]
env.events.emit('source-read', env.docname, arg)
return arg[0]
class SphinxI18nReader(SphinxBaseReader):
"""
A document reader for i18n.
This returns the source line number of original text as current source line number
to let users know where the error happened.
Because the translated texts are partial and they don't have correct line numbers.
"""
def setup(self, app: "Sphinx") -> None:
super().setup(app)
self.transforms = self.transforms + app.registry.get_transforms()
unused = [PreserveTranslatableMessages, Locale, RemoveTranslatableInline,
AutoIndexUpgrader, FigureAligner, SphinxDomains, DoctreeReadEvent,
UIDTransform]
for transform in unused:
if transform in self.transforms:
self.transforms.remove(transform)
class SphinxDummyWriter(UnfilteredWriter):
"""Dummy writer module used for generating doctree."""
supported = ('html',) # needed to keep "meta" nodes
def translate(self) -> None:
pass
def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:
"""Bypass source object as is to cheat Publisher."""
return source
class SphinxFileInput(FileInput):
"""A basic FileInput for Sphinx."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs['error_handler'] = 'sphinx'
super().__init__(*args, **kwargs)
def read_doc(app: "Sphinx", env: BuildEnvironment, filename: str) -> nodes.document:
"""Parse a document and convert to doctree."""
# set up error_handler for the target document
error_handler = UnicodeDecodeErrorHandler(env.docname)
codecs.register_error('sphinx', error_handler) # type: ignore
reader = SphinxStandaloneReader()
reader.setup(app)
filetype = get_filetype(app.config.source_suffix, filename)
parser = app.registry.create_source_parser(app, filetype)
if parser.__class__.__name__ == 'CommonMarkParser' and parser.settings_spec == ():
# a workaround for recommonmark
# If recommonmark.AutoStrictify is enabled, the parser invokes reST parser
# internally. But recommonmark-0.4.0 does not provide settings_spec for reST
# parser. As a workaround, this copies settings_spec for RSTParser to the
# CommonMarkParser.
parser.settings_spec = RSTParser.settings_spec
input_class = app.registry.get_source_input(filetype)
if input_class:
# Sphinx-1.8 style
source = input_class(app, env, source=None, source_path=filename, # type: ignore
encoding=env.config.source_encoding)
pub = Publisher(reader=reader,
parser=parser,
writer=SphinxDummyWriter(),
source_class=SphinxDummySourceClass, # type: ignore
destination=NullOutput())
pub.process_programmatic_settings(None, env.settings, None)
pub.set_source(source, filename)
else:
# Sphinx-2.0 style
pub = Publisher(reader=reader,
parser=parser,
writer=SphinxDummyWriter(),
source_class=SphinxFileInput,
destination=NullOutput())
pub.process_programmatic_settings(None, env.settings, None)
pub.set_source(source_path=filename)
pub.publish()
return pub.document
deprecated_alias('sphinx.io',
{
'FiletypeNotFoundError': FiletypeNotFoundError,
'get_filetype': get_filetype,
},
RemovedInSphinx40Warning) | PypiClean |
/dragonflow-4.0.0.tar.gz/dragonflow-4.0.0/dragonflow/neutron/common/dhcp_opt_map.py |
import dragonflow.common.constants as const
from dragonflow.common import dhcp
dnsmasq_opts = {
"netmask": 1,
"router": 3,
"dns-server": 6,
"log-server": 7,
"lpr-server": 9,
"hostname": 12,
"domain-name": 15,
"swap-server": 16,
"root-path": 17,
"extension-path": 18,
"policy-filter": 21,
"broadcast": 28,
"router-solicitation": 32,
"static-route": 33,
"nis-domain": 40,
"nis-server": 41,
"ntp-server": 42,
"vendor-encap": 43,
"netbios-ns": 44,
"netbios-dd": 45,
"x-windows-fs": 48,
"x-windows-dm": 49,
"requested-address": 50,
"lease-time": 51,
"option-overload": 52,
"message-type": 53,
"server-identifier": 54,
"parameter-request": 55,
"message": 56,
"max-message-size": 57,
"T1": 58,
"T2": 59,
"client-id": 61,
"nis+-domain": 64,
"nis+-server": 65,
"tftp-server": 66,
"bootfile-name": 67,
"mobile-ip-home": 68,
"smtp-server": 69,
"pop3-server": 70,
"nntp-server": 71,
"irc-server": 74,
"FQDN": 81,
"agent-id": 82,
"subnet-select": 118,
"domain-search": 119,
"server-ip-address": "siaddr"
}
dhcpd_ops = {
"subnet-mask": 1,
"time-offset": 2,
"routers": 3,
"time-servers": 4,
"ien116-name-servers": 5,
"domain-name-servers": 6,
"log-servers": 7,
"cookie-servers": 8,
"lpr-servers": 9,
"impress-servers": 10,
"resource-location-servers": 11,
"host-name": 12,
"boot-size": 13,
"merit-dump": 14,
"domain-name": 15,
"swap-server": 16,
"root-path": 17,
"extensions-path": 18,
"ip-forwarding": 19,
"non-local-source-routing": 20,
"policy-filter": 21,
"max-dgram-reassembly": 22,
"default-ip-ttl": 23,
"path-mtu-aging-timeout": 24,
"path-mtu-plateau-table": 25,
"interface-mtu": 26,
"all-subnets-local": 27,
"broadcast-address": 28,
"perform-mask-discovery": 29,
"mask-supplier": 30,
"router-discovery": 31,
"router-solicitation-address": 32,
"static-routes": 33,
"trailer-encapsulation": 34,
"arp-cache-timeout": 35,
"ieee802-3-encapsulation": 36,
"default-tcp-ttl": 37,
"tcp-keepalive-interval": 38,
"tcp-keepalive-garbage": 39,
"nis-domain": 40,
"nis-servers": 41,
"ntp-servers": 42,
"vendor-encapsulated-options": 43,
"netbios-name-servers": 44,
"netbios-dd-server": 45,
"netbios-node-type": 46,
"netbios-scope": 47,
"font-servers": 48,
"x-display-manager": 49,
"dhcp-requested-address": 50,
"dhcp-lease-time": 51,
"dhcp-option-overload": 52,
"dhcp-message-type": 53,
"dhcp-server-identifier": 54,
"dhcp-parameter-request-list": 55,
"dhcp-message": 56,
"dhcp-max-message-size": 57,
"dhcp-renewal-time": 58,
"dhcp-rebinding-time": 59,
"vendor-class-identifier": 60,
"dhcp-client-identifier": 61,
"nwip-domain": 62,
"nwip-suboptions": 63,
"nisplus-domain": 64,
"nisplus-servers": 65,
"tftp-server-name": 66,
"bootfile-name": 67,
"mobile-ip-home-agent": 68,
"smtp-server": 69,
"pop-server": 70,
"nntp-server": 71,
"www-server": 72,
"finger-server": 73,
"irc-server": 74,
"streettalk-server": 75,
"user-class": 77,
"slp-directory-agent": 78,
"slp-service-scope": 79,
"fqdn": 81,
"relay-agent-information": 82,
"nds-servers": 85,
"nds-tree-name": 86,
"nds-context": 87,
"bcms-controller-names": 88,
"bcms-controller-address": 89,
"client-last-transaction-time": 91,
"associated-ip": 92,
"pxe-system-type": 93,
"pxe-interface-id": 94,
"pxe-client-id": 97,
"uap-servers": 98,
"geoconf-civic": 99,
"pcode": 100,
"tcode": 101,
"netinfo-server-address": 112,
"netinfo-server-tag": 113,
"default-url": 114,
"auto-config": 116,
"name-service-search": 117,
"subnet-selection": 118,
"domain-search": 119,
"vivco": 124,
"vivso": 125,
"pxe-undefined-1": 128,
"pxe-undefined-2": 129,
"pxe-undefined-3": 130,
"pxe-undefined-4": 131,
"pxe-undefined-5": 132,
"pxe-undefined-6": 133,
"pxe-undefined-7": 134,
"pxe-undefined-8": 135,
"pana-agent": 136,
"v4-lost": 137,
"capwap-ac-v4": 138,
"sip-ua-cs-domains": 141,
"ipv4-address-andsf": 142,
"rdnss-selection": 146,
"tftp-server-address": 150,
"v4-portparams": 159,
"v4-captive-portal": 160,
"pxelinux-magic": 208,
"loader-configfile": 209,
"loader-pathprefix": 210,
"loader-reboottime": 211,
"option-6rd": 212,
"v4-access-domain": 213
}
opt_mapping = {}
opt_mapping.update(dnsmasq_opts)
opt_mapping.update(dhcpd_ops)
def dhcp_app_tag_by_user_tag(usr_tag):
try:
user_tag_int = int(usr_tag)
if dhcp.is_tag_valid(user_tag_int):
return user_tag_int
except ValueError:
pass
if usr_tag == const.DHCP_SIADDR:
return usr_tag
return opt_mapping.get(usr_tag) | PypiClean |
/BRLibraryDocumentor-1.0.0-py3-none-any.whl/brLibrarydocumentor/docBuilder.py | import argparse
import os
from pathlib import Path
from os import makedirs, path, listdir
from functionblock import ASLibraryDataType, ASLibraryFunction, ASLibrary, BlockSection
from jinja2 import Environment, FileSystemLoader
def BuildFunctionBlockPage(funct : ASLibraryFunction) -> None:
if os.path.exists('./Output/') == False:
os.mkdir('./Output/')
if(os.path.exists('./Output/{0}.html'.format(funct._name))):
os.remove('./Output/{0}.html'.format(funct._name))
with open('./Output/{0}.html'.format(funct._name),"w") as f:
file_loader = FileSystemLoader("./src/Templates/")
env = Environment(loader = file_loader)
template = env.get_template("functionBlockPage.html")
output = template.render(functionBlock = funct)
f.write(output)
# Output all of the datatype files from the library's typ file(s)
def BuildTypePage(typ : ASLibraryDataType) -> None:
if os.path.exists('./Output/DataTypes') == False:
os.mkdir('./Output/DataTypes')
if(os.path.exists('./Output/DataTypes/{0}.html'.format(typ._name))):
os.remove('./Output/DataTypes/{0}.html'.format(typ._name))
with open('./Output/DataTypes/{0}.html'.format(typ._name),"w") as f:
file_loader = FileSystemLoader("./src/Templates/")
env = Environment(loader = file_loader)
template = env.get_template("libDatatypePage.html")
output = template.render(datatype = typ)
f.write(output)
# Output all of the datatype files from the library's typ file(s)
def BuildEnumPage(typ : ASLibraryDataType) -> None:
if os.path.exists('./Output/DataTypes') == False:
os.mkdir('./Output/DataTypes')
if(os.path.exists('./Output/DataTypes/{0}.html'.format(typ._name))):
os.remove('./Output/DataTypes/{0}.html'.format(typ._name))
with open('./Output/DataTypes/{0}.html'.format(typ._name),"w") as f:
file_loader = FileSystemLoader("./src/Templates/")
env = Environment(loader = file_loader)
template = env.get_template("libEnumPage.html")
output = template.render(datatype = typ)
f.write(output)
#Main classes if you're trying to call the script standalone
def main() -> None:
parser = argparse.ArgumentParser()
# parser.add_argument('-f', '--fun', help='Function File Path', dest='funFilePath', required=True)
# parser.add_argument('-t', '--type', help='Type File Path', dest='typFilePath', required=True)
parser.add_argument('-p', '--path', help='Library File Path', dest='libraryFilePath', required=True)
args = parser.parse_args()
itm = []
for f in os.listdir(os.path.join(args.libraryFilePath)):
suf = Path(f).suffix
if suf == '.typ' or suf == '.var' or suf == '.fun':
itm.append(os.path.join(args.libraryFilePath,f))
#Currently only supports 1 type,fun and var file!!
funFileContents = ''
typFileContents = ''
for i in itm:
if i.endswith('.typ'):
with open(i) as file:
typFileContents = file.read()
if i.endswith('.fun'):
with open(i) as file:
funFileContents = file.read()
lib = ASLibrary(funFileContents,typFileContents)
for funct in lib._functions:
BuildFunctionBlockPage(funct)
for typ in lib._datatypes:
BuildTypePage(typ)
for enum in lib._enums:
BuildEnumPage(enum)
if __name__ == '__main__':
main() | PypiClean |
/Makechr-1.5.tar.gz/Makechr-1.5/makechr/guess_best_palette.py | import errors
import rgb
import palette
import partitions
from constants import *
class GuessBestPalette(object):
def __init__(self):
self._bg_color = None
def set_bg_color(self, bg_color):
self._bg_color = bg_color
def is_subset(self, subject, target):
"""Return whether subject is a strict subset of target."""
return set(subject) <= set(target)
def get_uniq_color_sets(self, color_manifest):
"""Get unique color sets, by removing duplicates and sorting.
color_manifest: A dict of color sets.
"""
seen = {}
for color_needs in color_manifest:
# Desending order, making it easy to do subset comparisions later.
ordered_colors = sorted(color_needs, reverse=True)
name = '-'.join(['%02x' % e for e in ordered_colors])
seen[name] = ordered_colors
return sorted(seen.values())
def get_minimal_colors(self, uniq_color_sets):
"""Merge color sets and return the minimal set of needed color sets.
uniq_color_sets: List of ordered color sets.
"""
minimized = []
for i, color_set in enumerate(uniq_color_sets):
for j, target in enumerate(uniq_color_sets[i + 1:]):
if self.is_subset(color_set, target):
break
else:
minimized.append(color_set)
return minimized
def merge_color_sets(self, color_set_collection, merge_strategy):
"""Merge some elements of collection, and return a list of sets.
Given a collection of sets, pick elements according to the strategy to
return a collection of merged sets. For example, if color_set_collection
is [A, B, C, D] where A through D are sets, and merge_strategy is
[set([0, 2]), set([1, 3])] the return value is [merge(A|C), merge(B|D)].
color_set_collection: Potentional color sets to be merged.
merge_strategy: List of sets, where each set represents what to merge.
"""
result = []
for choices in merge_strategy:
merged = set()
for c in choices:
merged |= set(color_set_collection[c])
if len(merged) > PALETTE_SIZE:
return None
result.append(merged)
return result
def get_background_color(self, combined_colors):
"""Determine the global background color.
Given a list of colors, return the best background color. Prefer
black if possible, otherwise, use the smallest numerical value.
combined_colors: List of color needs.
"""
if self._bg_color is not None:
return self._bg_color
possibilities = set(combined_colors[0])
recommendations = set(possibilities)
for color_set in combined_colors[1:]:
if len(color_set) == PALETTE_SIZE:
possibilities = possibilities & set(color_set)
recommendations = recommendations & set(color_set)
if rgb.BLACK in possibilities:
return rgb.BLACK
if recommendations:
return min(recommendations)
if possibilities:
return min(possibilities)
return None
def colors_have_space_for(self, bg_color, combined_colors):
for color_set in combined_colors:
if not bg_color in color_set and len(color_set) == PALETTE_SIZE:
return False
return True
def get_valid_combinations(self, finalized, remaining):
"""Calculate all valid combinations of the palette.
Some of the color_sets are finalized (full PaletteOptions) the others
remaining need to be merged. Try all possible combinations, and for each
one determine the background color. Return all possibilities, at least 1.
finalized: List of color sets that take up the full size.
remaining: List of color sets that need to be merged.
"""
merged_color_possibilities = []
num_available = NUM_ALLOWED_PALETTES - len(finalized)
for merge_strategy in partitions.partitions(len(remaining)):
if len(merge_strategy) > num_available:
continue
merged_colors = self.merge_color_sets(remaining, merge_strategy)
if not merged_colors:
continue
combined_colors = finalized + merged_colors
bg_color = self.get_background_color(combined_colors)
if bg_color is None:
continue
if not self.colors_have_space_for(bg_color, combined_colors):
continue
merged_color_possibilities.append([bg_color, combined_colors])
if not len(merged_color_possibilities):
raise errors.PaletteTooManySubsets(finalized, to_merge=remaining)
return merged_color_possibilities
def get_merged_color_possibilities(self, minimal_colors):
"""Get all possible merged sets of colors.
minimal_colors: Set of minimal needed colors.
"""
finalized = []
remaining = []
# We know from earlier steps that minimal_colors is a set of color_sets
# such that none are subsets of each other. However, some may have some
# colors in common such that they could be merged. First, let's remove all
# full palettes, leaving only those that might be mergable.
for color_set in minimal_colors:
if len(color_set) == PALETTE_SIZE:
finalized.append(color_set)
else:
remaining.append(color_set)
if remaining:
# There are remaining unmerged palettes. Generate all valid combinations
# of merged palettes, which may fail if there is no way to merge them.
return self.get_valid_combinations(finalized, remaining)
elif len(finalized) > NUM_ALLOWED_PALETTES:
# The number of necessary palettes is more than the number allowed.
raise errors.TooManyPalettesError(minimal_colors)
else:
# There is only one valid combination.
bg_color = self.get_background_color(finalized)
return [[bg_color, finalized]]
def get_palette(self, possibilities):
"""Pick a single palette.
Given list of possible palettes, just pick and build the first one.
possibilities: List of possible palettes, must have at least one element.
"""
(bg_color, color_set_collection) = possibilities[0]
pal = palette.Palette()
pal.set_bg_color(bg_color)
for color_set in color_set_collection:
pal.add([bg_color] + sorted([c for c in color_set if c != bg_color],
reverse=True))
return pal
def guess_palette(self, color_needs_list):
uniq_color_sets = self.get_uniq_color_sets(color_needs_list)
minimal_colors = self.get_minimal_colors(uniq_color_sets)
possibilities = self.get_merged_color_possibilities(minimal_colors)
return self.get_palette(possibilities) | PypiClean |
/IBATS_Common-0.20.11-py3-none-any.whl/ibats_common/backend/factor.py | import datetime
import logging
import bisect
import ffn # NOQA
import numpy as np
import pandas as pd
import talib
from ibats_utils.mess import date_2_str
logger = logging.getLogger(__name__)
DEFAULT_OHLCV_COL_NAME_LIST = ["open", "high", "low", "close", "amount", "volume"]
def add_factor_of_trade_date(df: pd.DataFrame, trade_date_series):
"""
自然日,交易日相关因子
:param df:
:param trade_date_series:
:return:
"""
# 自然日相关因子
index_s = pd.Series(df.index)
df['dayofweek'] = index_s.apply(lambda x: x.dayofweek).to_numpy()
df['day'] = index_s.apply(lambda x: x.day).to_numpy()
df['month'] = index_s.apply(lambda x: x.month).to_numpy()
df['daysleftofmonth'] = index_s.apply(lambda x: x.days_in_month - x.day).to_numpy()
# 本月第几个交易日, 本月还剩几个交易日
groups = trade_date_series.groupby(trade_date_series.apply(lambda x: datetime.datetime(x.year, x.month, 1)))
def get_td_of_month(dt):
first_day_of_month = datetime.datetime(dt.year, dt.month, 1)
month_s = groups.get_group(first_day_of_month)
td_of_month = (month_s <= dt).sum()
td_left_of_month = (month_s > dt).sum()
return month_s.shape[0], td_of_month, td_left_of_month
result_arr = index_s.apply(get_td_of_month).to_numpy()
df['td_of_month'] = [_[0] for _ in result_arr]
df['td_pass_of_month'] = [_[1] for _ in result_arr]
df['td_left_of_month'] = [_[2] for _ in result_arr]
# 本周第几个交易日, 本周还剩几个交易日
groups = trade_date_series.groupby(trade_date_series.apply(lambda x: x.year * 100 + x.weekofyear))
def get_td_of_week(dt):
name = dt.year * 100 + dt.weekofyear
week_s = groups.get_group(name)
td_pass_of_week = (week_s <= dt).sum()
td_left_of_week = (week_s > dt).sum()
return week_s.shape[0], td_pass_of_week, td_left_of_week
result_arr = index_s.apply(get_td_of_week).to_numpy()
df['td_of_week'] = [_[0] for _ in result_arr]
df['td_pass_of_week'] = [_[1] for _ in result_arr]
df['td_left_of_week'] = [_[2] for _ in result_arr]
# 距离下一次放假交易日数(超过2天以上的休息日)
# 计算距离下一个交易日的日期
days_2_next_trade_date_s = (trade_date_series.shift(-1) - trade_date_series).fillna(pd.Timedelta(days=0))
days_2_next_trade_date_s.index = pd.DatetimeIndex(trade_date_series)
# 倒序循环,计算距离下一次放假的日期
days_count, trade_date_count, result_dic, first_date = np.nan, np.nan, {}, index_s[0]
for trade_date, delta in days_2_next_trade_date_s.sort_index(ascending=False).items():
if trade_date < first_date:
break
days = delta.days
if days > 3:
trade_date_count, days_count = 0, 0
elif pd.isna(days_count) >= 0 and days >= 1:
trade_date_count += 1
days_count += days
else:
trade_date_count, days_count = np.nan, np.nan
result_dic[trade_date] = [days_count, trade_date_count]
vacation_df = pd.DataFrame(result_dic).T.sort_index().rename(columns={0: 'days_2_vacation', 1: 'td_2_vacation'})
df = df.join(vacation_df, how='left')
return df
def add_factor_of_delivery_date(df, delivery_date_series):
index_s = pd.Series(df.index)
# 当期合约距离交割日天数
result_arr = []
for _, trade_date in index_s.items():
next_2_date = delivery_date_series[delivery_date_series > trade_date].head(2)
if next_2_date.shape[0] < 2:
day_2_first_del_date, day_2_second_del_date = np.nan, np.nan
else:
first_del_date, secend_del_date = next_2_date[0], next_2_date[1]
day_2_first_del_date = (first_del_date - trade_date).days
day_2_second_del_date = (secend_del_date - trade_date).days
result_arr.append([day_2_first_del_date, day_2_second_del_date])
df['days_2_first_del_date'] = [_[0] for _ in result_arr]
df['days_2_second_del_date'] = [_[1] for _ in result_arr]
return df
def add_factor_of_price(df: pd.DataFrame, ohlcav_col_name_list=DEFAULT_OHLCV_COL_NAME_LIST,
drop=False, log_av=True, add_pct_change_columns=True, with_diff_n=True):
"""
计算数据的因子
:param df:
:param ohlcav_col_name_list: 各因子列的标签名称,其中 amount 列,如果没有可以为 None
:param drop:
:param log_av: 对 amount volume 进行log
:param add_pct_change_columns: 对非平稳序列增加相应的 pct_change 列
:param with_diff_n: 增加N阶 diff,增加N阶Diff,
可能导致 factor_analysis 抛出 LinAlgError,
因此在进行 factor analysis 时,可以关掉次因子
:return:
"""
pct_change_columns = [_ for _ in ohlcav_col_name_list if _ is not None]
open_key = ohlcav_col_name_list[0]
high_key = ohlcav_col_name_list[1]
low_key = ohlcav_col_name_list[2]
close_key = ohlcav_col_name_list[3]
amount_key = ohlcav_col_name_list[4]
volume_key = ohlcav_col_name_list[5]
open_s = df[open_key]
high_s = df[high_key]
low_s = df[low_key]
close_s = df[close_key]
amount_s = df[amount_key] if amount_key is not None else None
volume_s = df[volume_key]
# 平均成交价格
if amount_s is None:
deal_price_s = (open_s * 2 + high_s + low_s + close_s * 2) / 6
else:
deal_price_s = amount_s / volume_s
deal_price_s[volume_s.isna()] = ((open_s * 2 + high_s + low_s + close_s * 2) / 6)[volume_s.isna()]
df[f'deal_price'] = deal_price_s
pct_change_columns.append('deal_price')
# N 阶价差
if with_diff_n:
for _ in range(2, 4):
# 因为 close diff(1) 已经在前面 ohlcav_col_name_list 中完成了对所有原始数据列的一阶差分,因此无需重复
df[f'diff{_}'] = close_s.diff(_)
# 均线因子
df[f'rr'] = close_s.to_returns()
for n in [5, 10, 15, 20, 30, 60, 120]:
df[f'MA{n}'] = ma_n = close_s.rolling(n).mean()
pct_change_columns.append(f'MA{n}')
df[f'c-MA{n}'] = (close_s - ma_n) / close_s
for m in [5, 10, 15, 20, 30, 60]:
# 生成均线差的因子 命名规则 MAm - MAn
if m >= n:
continue
df[f'MA{m}-MA{n}'] = (df[f'MA{m}'] - ma_n) / close_s
# 波动率因子
expanding = close_s.expanding(5)
df[f'volatility_all'] = expanding.std() / expanding.mean()
for n in [10, 20, 30, 60]:
df[f'volatility{n}'] = close_s.rolling(n).std() / close_s.rolling(n).mean()
# 收益率方差
rr = close_s.to_returns()
for n in [5, 10, 20, 30, 60]:
df[f'rr_std{n}'] = rr.rolling(n).std()
# 累积/派发线(Accumulation / Distribution Line,该指标将每日的成交量通过价格加权累计,
# 用以计算成交量的动量。属于趋势型因子
df['AD'] = talib.AD(high_s, low_s, close_s, volume_s)
pct_change_columns.append(f'AD')
# 佳庆指标(Chaikin Oscillator),该指标基于AD曲线的指数移动均线而计算得到。属于趋势型因子
df['ADOSC'] = talib.ADOSC(high_s, low_s, close_s, volume_s, fastperiod=3, slowperiod=10)
pct_change_columns.append(f'ADOSC')
# 平均动向指数,DMI因子的构成部分。属于趋势型因子
df['ADX'] = talib.ADX(high_s, low_s, close_s, timeperiod=14)
# 相对平均动向指数,DMI因子的构成部分。属于趋势型因子
df['ADXR'] = talib.ADXR(high_s, low_s, close_s, timeperiod=14)
# 绝对价格振荡指数
df['APO'] = talib.APO(close_s, fastperiod=12, slowperiod=26)
# Aroon通过计算自价格达到近期最高值和最低值以来所经过的期间数,
# 帮助投资者预测证券价格从趋势到区域区域或反转的变化,
# Aroon指标分为Aroon、AroonUp和AroonDown3个具体指标。属于趋势型因子
df['AROONDown'], df['AROONUp'] = talib.AROON(high_s, low_s, timeperiod=14)
df['AROONOSC'] = talib.AROONOSC(high_s, low_s, timeperiod=14)
# 均幅指标(Average TRUE Ranger),取一定时间周期内的股价波动幅度的移动平均值,
# 是显示市场变化率的指标,主要用于研判买卖时机。属于超买超卖型因子。
for n in [6, 14, 21]:
df[f'ATR{n}'] = talib.ATR(high_s, low_s, close_s, timeperiod=n)
# 布林带
df['Boll_Up'], df['Boll_Mid'], df['Boll_Down'] = \
talib.BBANDS(close_s, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)
pct_change_columns.extend(['Boll_Up', 'Boll_Mid', 'Boll_Down'])
# 均势指标
df['BOP'] = talib.BOP(open_s, high_s, low_s, close_s)
# 5日顺势指标(Commodity Channel Index),专门测量股价是否已超出常态分布范围。属于超买超卖型因子。
for n in [5, 10, 20, 88]:
df[f'CCI{n}'] = talib.CCI(high_s, low_s, close_s, timeperiod=5)
# 钱德动量摆动指标(Chande Momentum Osciliator),与其他动量指标摆动指标如
# 相对强弱指标(RSI)和随机指标(KDJ)不同,
# 钱德动量指标在计算公式的分子中采用上涨日和下跌日的数据。属于超买超卖型因子
df['CMO_Close'] = talib.CMO(close_s, timeperiod=14)
df['CMO_Open'] = talib.CMO(open_s, timeperiod=14)
# DEMA双指数移动平均线
for n in [6, 12, 26]:
df[f'DEMA{n}'] = talib.DEMA(close_s, timeperiod=n)
pct_change_columns.append(f'DEMA{n}')
# DX 动向指数
df['DX'] = talib.DX(high_s, low_s, close_s, timeperiod=14)
# EMA 指数移动平均线
for n in [6, 12, 26, 60]:
df[f'EMA{n}'] = talib.EMA(close_s, timeperiod=n)
pct_change_columns.append(f'EMA{n}')
# KAMA 适应性移动平均线
for n in [5, 10, 20, 30, 60]:
df[f'KAMA{n}'] = talib.KAMA(close_s, timeperiod=n)
pct_change_columns.append(f'KAMA{n}')
# MACD
df['MACD_DIF'], df['MACD_DEA'], df['MACD_bar'] = \
talib.MACD(close_s, fastperiod=12, slowperiod=24, signalperiod=9)
# 中位数价格 不知道是什么意思
df['MEDPRICE'] = talib.MEDPRICE(high_s, low_s)
pct_change_columns.append('MEDPRICE')
# 负向指标 负向运动
df['MiNUS_DI'] = talib.MINUS_DI(high_s, low_s, close_s, timeperiod=14)
df['MiNUS_DM'] = talib.MINUS_DM(high_s, low_s, timeperiod=14)
# 动量指标(Momentom Index),动量指数以分析股价波动的速度为目的,研究股价在波动过程中各种加速,
# 减速,惯性作用以及股价由静到动或由动转静的现象。属于趋势型因子
df['MOM'] = talib.MOM(close_s, timeperiod=10)
# 归一化平均值范围
df['NATR'] = talib.NATR(high_s, low_s, close_s, timeperiod=14)
# OBV 能量潮指标(On Balance Volume,OBV),以股市的成交量变化来衡量股市的推动力,
# 从而研判股价的走势。属于成交量型因子
df['OBV'] = talib.OBV(close_s, volume_s)
pct_change_columns.append('OBV')
# PLUS_DI 更向指示器
df['PLUS_DI'] = talib.PLUS_DI(high_s, low_s, close_s, timeperiod=14)
df['PLUS_DM'] = talib.PLUS_DM(high_s, low_s, timeperiod=14)
# PPO 价格振荡百分比
df['PPO'] = talib.PPO(close_s, fastperiod=6, slowperiod=26, matype=0)
# ROC 6日变动速率(Price Rate of Change),以当日的收盘价和N天前的收盘价比较,
# 通过计算股价某一段时间内收盘价变动的比例,应用价格的移动比较来测量价位动量。属于超买超卖型因子。
for n in [6, 20]:
df[f'ROC{n}'] = talib.ROC(close_s, timeperiod=n)
# 12日量变动速率指标(Volume Rate of Change),以今天的成交量和N天前的成交量比较,
# 通过计算某一段时间内成交量变动的幅度,应用成交量的移动比较来测量成交量运动趋向,
# 达到事先探测成交量供需的强弱,进而分析成交量的发展趋势及其将来是否有转势的意愿,
# 属于成交量的反趋向指标。属于成交量型因子
for n in [6, 20]:
df[f'VROC{n}'] = talib.ROC(volume_s, timeperiod=n)
# ROC 6日变动速率(Price Rate of Change),以当日的收盘价和N天前的收盘价比较,
# 通过计算股价某一段时间内收盘价变动的比例,应用价格的移动比较来测量价位动量。属于超买超卖型因子。
for n in [6, 20]:
df[f'ROCP{n}'] = talib.ROCP(close_s, timeperiod=n)
# 12日量变动速率指标(Volume Rate of Change),以今天的成交量和N天前的成交量比较,
# 通过计算某一段时间内成交量变动的幅度,应用成交量的移动比较来测量成交量运动趋向,
# 达到事先探测成交量供需的强弱,进而分析成交量的发展趋势及其将来是否有转势的意愿,
# 属于成交量的反趋向指标。属于成交量型因子
for n in [6, 20]:
df[f'VROCP{n}'] = talib.ROCP(volume_s, timeperiod=n)
# RSI
for n in [7, 14, 21, 28]:
df[f'RSI{n}'] = talib.RSI(close_s, timeperiod=n)
# SAR 抛物线转向
df['SAR'] = talib.SAR(high_s, low_s, acceleration=0.02, maximum=0.2)
pct_change_columns.append('SAR')
# TEMA
for n in [6, 12, 26]:
df[f'TEMA{n}'] = talib.TEMA(close_s, timeperiod=n)
pct_change_columns.append(f'TEMA{n}')
# TRANGE 真实范围
df['TRANGE'] = talib.TRANGE(high_s, low_s, close_s)
# TYPPRICE 典型价格
df['TYPPRICE'] = talib.TYPPRICE(high_s, low_s, close_s)
pct_change_columns.append('TYPPRICE')
# TSF 时间序列预测
for n in [7, 14, 21, 28]:
df[f'TSF{n}'] = talib.TSF(close_s, timeperiod=n)
pct_change_columns.append(f'TSF{n}')
# ULTOSC 极限振子
df['ULTOSC'] = talib.ULTOSC(high_s, low_s, close_s, timeperiod1=7, timeperiod2=14, timeperiod3=28)
# 威廉指标
df['WILLR'] = talib.WILLR(high_s, low_s, close_s, timeperiod=14)
# 价格分位数水平
data_list, data_count = [], [0]
def get_index_pct(x):
"""获取当前价格在历史价格数组中的位置的百分比"""
bisect.insort(data_list, x)
data_count[0] += 1
idx = bisect.bisect_left(data_list, x)
return idx / data_count[0]
df['index_pct'] = close_s.apply(get_index_pct)
pct_change_columns.append('index_pct')
# 对 volume amount 取 log
if log_av:
df[volume_key] = np.log(volume_s.fillna(0) + 1)
if amount_key is not None:
df[amount_key] = np.log(amount_s.fillna(0) + 1)
# 对非平稳的序列因子进行 pct_change 或 diff(1)一阶差分 处理,期待可以形成新的平稳的序列
if add_pct_change_columns:
for name in pct_change_columns:
_s = df[name]
if _s.std() < 100:
values = _s.diff(1)
df[f'{name}_diff1'] = values
else:
values = df[name].pct_change()
is_inf_values = np.isneginf(values)
values[is_inf_values] = np.min(values[~is_inf_values])
is_inf_values = np.isposinf(values)
values[is_inf_values] = np.max(values[~is_inf_values])
df[f'{name}_pct_chg'] = values
if drop:
df.dropna(inplace=True)
return df
def get_factor(df: pd.DataFrame, trade_date_series=None, delivery_date_series=None,
dropna=True, ohlcav_col_name_list=["open", "high", "low", "close", "amount", "volume"],
do_multiple_factors=False, price_factor_kwargs=None) -> (pd.DataFrame, dict):
"""
在当期时间序列数据基础上增加相关因子
目前已经支持的因子包括量价因子、时间序列因子、交割日期因子
:param df: 时间序列数据索引为日期
:param trade_date_series: 交易日序列
:param delivery_date_series:交割日序列
:param dropna: 是否 dropna
:param ohlcav_col_name_list: 量价因子相关列名称,默认["open", "high", "low", "close", "amount", "volume"]
:param do_multiple_factors: 对数据进行倍增处理,将指定列乘以因子,如果 != None 则,返回dict{adj_factor: DataFrame}
:param price_factor_kwargs: 计算价格因子时的可选参数
:return:
"""
price_factor_kwargs = {} if price_factor_kwargs is None else price_factor_kwargs
ret_df = df.copy()
# 交易日相关因子
if trade_date_series is not None:
ret_df = add_factor_of_trade_date(ret_df, trade_date_series)
# 交割日相关因子
if delivery_date_series is not None:
ret_df = add_factor_of_delivery_date(ret_df, delivery_date_series)
# 增加量价因子
train_df_dic = {}
if ohlcav_col_name_list is not None:
ret_df_tmp = ret_df.copy()
ret_df = add_factor_of_price(ret_df, ohlcav_col_name_list, **price_factor_kwargs)
if do_multiple_factors:
if dropna:
ret_df.dropna(inplace=True)
train_df_dic[1] = ret_df
for adj_factor in [0.5, 0.75, 1.25, 1.5, 1.75, 2]:
train_df_tmp = ret_df_tmp.copy()
# 将 O,H,L,C,A 前五项进行因子扩充
train_df_tmp.loc[:, ohlcav_col_name_list[:5]] *= adj_factor
factor_df = add_factor_of_price(
train_df_tmp, ohlcav_col_name_list=ohlcav_col_name_list, **price_factor_kwargs)
if dropna:
factor_df.dropna(inplace=True)
train_df_dic[adj_factor] = factor_df
if do_multiple_factors:
return train_df_dic
else:
if dropna:
ret_df.dropna(inplace=True)
return ret_df
def _test_get_factor(do_multiple_factors=False):
from ibats_common.example.data import load_data, get_trade_date_series, get_delivery_date_series
instrument_type = 'RU'
file_name = f"{instrument_type}.csv"
indexed_df = load_data(file_name).set_index('trade_date').drop('instrument_type', axis=1)
indexed_df.index = pd.DatetimeIndex(indexed_df.index)
# ohlcav_col_name_list is None
factor_df = get_factor(indexed_df,
trade_date_series=get_trade_date_series(),
delivery_date_series=get_delivery_date_series(instrument_type))
logger.info("data_multiplication_column_indexes=None\n%s\t%s", factor_df.shape, list(factor_df.columns))
# ohlcav_col_name_list is not None
ohlcav_col_name_list = ["open", "high", "low", "close", "amount", "volume"]
train_df_dic = get_factor(
indexed_df, ohlcav_col_name_list=ohlcav_col_name_list,
trade_date_series=get_trade_date_series(),
delivery_date_series=get_delivery_date_series(instrument_type),
do_multiple_factors=do_multiple_factors
)
logger.info("ohlcav_col_name_list=%s", ohlcav_col_name_list)
if do_multiple_factors:
for adj_factor, train_df in train_df_dic.items():
logger.info("adj_factor=%f, train_df: first close=%f\n%s\t%s",
adj_factor, train_df['close'].iloc[0], train_df.shape, list(train_df.columns))
else:
train_df = train_df_dic
logger.info("train_df: first close=%f\n%s\t%s",
train_df['close'].iloc[0], train_df.shape, list(train_df.columns))
idxs = np.where(np.isnan(train_df))
if len(idxs[0]) > 0:
logger.error("has nan value at %s", idxs)
idxs = np.where(np.isinf(train_df))
if len(idxs[0]) > 0:
logger.error("has inf value at %s", idxs)
pass
def transfer_2_batch(df: pd.DataFrame, n_step, labels=None, date_from=None, date_to=None):
"""
[num, factor_count] -> [num - n_step + 1, n_step, factor_count]
将 df 转化成 n_step 长度的一段一段的数据
labels 为与 df对应的数据,处理方式与index相同,如果labels不为空,则返回数据最后增加以下 new_ys
:param df:
:param n_step:
:param labels:如果不为 None,则长度必须与 df.shape[0] 一致
:param date_from:
:param date_to:
:return:
"""
df_len = df.shape[0]
if labels is not None and df_len != len(labels):
raise ValueError("ys 长度 %d 必须与 df 长度 %d 保持一致", len(labels), df_len)
# TODO: date_from, date_to 的逻辑可以进一步优化,延期为了省时间先保持这样
# 根据 date_from 对factor进行截取
if date_from is not None:
date_from = pd.to_datetime(date_from)
is_fit = df.index >= date_from
if np.any(is_fit):
start_idx = np.argmax(is_fit) - n_step
if start_idx < 0:
start_idx = 0
logger.warning("%s 为起始日期的数据,前向历史数据不足 %d 条,因此,起始日期向后推移至 %s",
date_2_str(date_from), n_step, date_2_str(df.index[60]))
df = df.iloc[start_idx:]
df_len = df.shape[0]
if labels is not None:
labels = labels[start_idx:]
else:
logger.warning("没有 %s 之后的数据,当前数据最晚日期为 %s",
date_2_str(date_from), date_2_str(max(df.index)))
if labels is not None:
return None, None, None, None
else:
return None, None, None
# 根据 date_from 对factor进行截取
if date_to is not None:
date_to = pd.to_datetime(date_to)
is_fit = df.index <= date_to
if np.any(is_fit):
to_idx = np.argmin(is_fit)
df = df.iloc[:to_idx]
df_len = df.shape[0]
if labels is not None:
labels = labels[:to_idx]
else:
logger.warning("没有 %s 之前的数据,当前数据最晚日期为 %s",
date_2_str(date_to), date_2_str(min(df.index)))
if labels is not None:
return None, None, None, None
else:
return None, None, None
new_shape = [df_len - n_step + 1, n_step]
new_shape.extend(df.shape[1:])
df_index, df_columns = df.index[(n_step - 1):], df.columns
data_arr_batch, factor_arr = np.zeros(new_shape), df.to_numpy(dtype=np.float32)
for idx_from, idx_to in enumerate(range(n_step, factor_arr.shape[0] + 1)):
data_arr_batch[idx_from] = factor_arr[idx_from: idx_to]
if labels is not None:
new_ys = labels[(n_step - 1):]
return df_index, df_columns, data_arr_batch, new_ys
else:
return df_index, df_columns, data_arr_batch
def _test_get_batch_factor():
data_len = 8
date_arr = pd.date_range(pd.to_datetime('2018-01-01'),
pd.to_datetime('2018-01-01') + pd.Timedelta(days=data_len * 2 - 1),
freq=pd.Timedelta(days=2))
date_index = pd.DatetimeIndex(date_arr)
df = pd.DataFrame(
{'a': list(range(data_len)),
'b': list(range(data_len * 2, data_len * 3)),
'c': list(range(data_len * 10, data_len * 11))},
index=date_index,
)
labels = list(range(data_len))
print("df\n", df)
n_step = 5
df_index, df_columns, data_arr_batch, new_labels = transfer_2_batch(df, n_step, labels)
print("new df_index", df_index)
print("new factor_columns", df_columns)
print('new reshaped data_arr_batch')
print(data_arr_batch)
print("df.shape: ", df.shape)
print("new_shape:", data_arr_batch.shape)
print("new_labels:", new_labels)
date_from = '2018-01-13'
df_index, df_columns, data_arr_batch, new_labels = transfer_2_batch(df, n_step, labels, date_from=date_from)
print('date_from=', date_from)
print("new df_index", df_index)
print("new factor_columns", df_columns)
print('new reshaped data_arr_batch')
print(data_arr_batch)
print("df.shape: ", df.shape)
print("new_shape:", data_arr_batch.shape)
print("new_labels:", new_labels)
def get_batch_factor(md_df: pd.DataFrame, n_step, labels=None, **factor_kwargs):
"""
get_factor(...) -> transfer_2_batch(...)
:param md_df:
:param n_step:
:param labels:
:param factor_kwargs:
:return:
"""
factor_df = get_factor(md_df, **factor_kwargs)
df_index, df_columns, data_arr_batch = transfer_2_batch(factor_df, n_step=n_step, labels=labels)
return df_index, df_columns, data_arr_batch
if __name__ == '__main__':
from ibats_common.config import *
logger = logging.getLogger()
_test_get_factor()
# _test_get_batch_factor() | PypiClean |
/Chatbot_by_IE-0.2.26-py3-none-any.whl/app/information.py |
from prettytable.colortable import ColorTable, Themes
# from prettytable import PrettyTable
def help_info_ab(*args, **kwargs):
"""help info for AddressBook"""
print(f"I N F O:")
x = ColorTable(theme=Themes.OCEAN)
x.field_names = ['commands', 'description', 'example']
x.align = 'l'
x.add_row(['>hello<', 'hello', '>command<'])
x.add_row(['>add<', 'add name and phone, if there is a name add phone', '>command< >name< >phone<'])
x.add_row(['>change<', 'change phone', '>command< >name< >old_phone< >new_phone<'])
x.add_row(['>show<', 'show all AddressBook', '>command<'])
x.add_row(['>phone<', 'show phone', '>command< >name<'])
x.add_row(['>del<', 'del contact', '>command< >name<'])
x.add_row(['>birth<', 'add birthday', '>command< >name< >date<'])
x.add_row(['>email<', 'add email', '>command< >name< >email<'])
x.add_row(['>address<', 'add address', '>command< >name< >address<'])
x.add_row(['>find<', 'search by matches', '>command< >target<'])
x.add_row(['>nextbirth<', 'next birthday for >n< number of days', '>command< >next number days<'])
x.add_row(['>info<', 'information', '>command<'])
x.add_row(['>0, ., close, exit<', 'exit', '>command<'])
return x
def help_info_nb(*args, **kwargs):
"""help info for NoteBook"""
print(f"I N F O:")
x = ColorTable(theme=Themes.OCEAN)
x.field_names = ['commands', 'description', 'example']
x.align = 'l'
x.add_row(['>hello<', 'hello', '>command<'])
x.add_row(['>add<', 'add new name, note and tag', '>command<'])
x.add_row(['>note<', 'add note', '>command<'])
x.add_row(['>del<', 'delete note', '>command<'])
x.add_row(['>show<', 'show all NoteBook', '>command<'])
x.add_row(['>tag<', 'add tags', '>command<'])
x.add_row(['>find<', 'find notes', '>command<'])
x.add_row(['>info<', 'information', '>command<'])
x.add_row(['>0, ., close, exit<', 'exit', '>command<'])
return x
def start_info_ab():
"""start info for AddressBook"""
x = ColorTable(theme=Themes.OCEAN)
x.field_names = [" A D D R E S S B O O K "]
x.align = 'l'
x.add_row([f"enter: info{chr(128227)}"])
return x
def start_info_nb():
"""start info for NoteBook"""
x = ColorTable(theme=Themes.OCEAN)
x.field_names = [" N O T E B O O K "]
x.align = 'l'
x.add_row([f"enter: info{chr(128227)}"])
return x
def start_info_sf():
x = ColorTable(theme=Themes.OCEAN)
x.field_names = [" S O R T I N G F I L E S "]
x.align = 'l'
x.add_row([f"Exit: 0"])
return x
def start_info_menu():
x = ColorTable(theme=Themes.OCEAN)
x.field_names = ['C H A T B O T']
x.align = 'l'
x.add_row(['0: Exit'])
x.add_row(['1: AddressBook'])
x.add_row(['2: NoteBook'])
x.add_row(['3: Sort files'])
x.add_row(['4: Games'])
return x
def start_info_games():
x = ColorTable(theme=Themes.OCEAN)
x.field_names = ['G A M E S']
x.align = 'l'
x.add_row(['0: Exit'])
x.add_row(['1: Magic layer'])
x.add_row(['2: Hangman'])
return x | PypiClean |
/Flask-SQLAlchemy-Meiqia-2016.8.1.zip/Flask-SQLAlchemy-Meiqia-2016.8.1/docs/signals.rst | Signalling Support
==================
Connect to the following signals to get notified before and after changes are committed to the database.
These changes are only tracked if ``SQLALCHEMY_TRACK_MODIFICATIONS`` is enabled in the config.
.. versionadded:: 0.10
.. versionchanged:: 2.1
``before_models_committed`` is triggered correctly.
.. deprecated:: 2.1
This will be disabled by default in a future version.
.. data:: models_committed
This signal is sent when changed models were committed to the database.
The sender is the application that emitted the changes.
The receiver is passed the ``changes`` parameter with a list of tuples in the form ``(model instance, operation)``.
The operation is one of ``'insert'``, ``'update'``, and ``'delete'``.
.. data:: before_models_committed
This signal works exactly like :data:`models_committed` but is emitted before the commit takes place.
| PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/button-group.js | module.exports =
/******/ (function (modules) { // webpackBootstrap
/******/ // The module cache
/******/
var installedModules = {};
/******/
/******/ // The require function
/******/
function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/
if (installedModules[moduleId]) {
/******/
return installedModules[moduleId].exports;
/******/
}
/******/ // Create a new module (and put it into the cache)
/******/
var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/
};
/******/
/******/ // Execute the module function
/******/
modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/
module.l = true;
/******/
/******/ // Return the exports of the module
/******/
return module.exports;
/******/
}
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/
__webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/
__webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/
__webpack_require__.d = function (exports, name, getter) {
/******/
if (!__webpack_require__.o(exports, name)) {
/******/
Object.defineProperty(exports, name, {enumerable: true, get: getter});
/******/
}
/******/
};
/******/
/******/ // define __esModule on exports
/******/
__webpack_require__.r = function (exports) {
/******/
if (typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/
Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'});
/******/
}
/******/
Object.defineProperty(exports, '__esModule', {value: true});
/******/
};
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/
__webpack_require__.t = function (value, mode) {
/******/
if (mode & 1) value = __webpack_require__(value);
/******/
if (mode & 8) return value;
/******/
if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/
var ns = Object.create(null);
/******/
__webpack_require__.r(ns);
/******/
Object.defineProperty(ns, 'default', {enumerable: true, value: value});
/******/
if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) {
return value[key];
}.bind(null, key));
/******/
return ns;
/******/
};
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/
__webpack_require__.n = function (module) {
/******/
var getter = module && module.__esModule ?
/******/ function getDefault() {
return module['default'];
} :
/******/ function getModuleExports() {
return module;
};
/******/
__webpack_require__.d(getter, 'a', getter);
/******/
return getter;
/******/
};
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/
__webpack_require__.o = function (object, property) {
return Object.prototype.hasOwnProperty.call(object, property);
};
/******/
/******/ // __webpack_public_path__
/******/
__webpack_require__.p = "/dist/";
/******/
/******/
/******/ // Load entry module and return exports
/******/
return __webpack_require__(__webpack_require__.s = 92);
/******/
})
/************************************************************************/
/******/({
/***/ 0:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */
__webpack_require__.d(__webpack_exports__, "a", function () {
return normalizeComponent;
});
/* globals __VUE_SSR_CONTEXT__ */
// IMPORTANT: Do NOT use ES2015 features in this file (except for modules).
// This module is a runtime utility for cleaner component module output and will
// be included in the final webpack user bundle.
function normalizeComponent(
scriptExports,
render,
staticRenderFns,
functionalTemplate,
injectStyles,
scopeId,
moduleIdentifier, /* server only */
shadowMode /* vue-cli only */
) {
// Vue.extend constructor export interop
var options = typeof scriptExports === 'function'
? scriptExports.options
: scriptExports
// render functions
if (render) {
options.render = render
options.staticRenderFns = staticRenderFns
options._compiled = true
}
// functional template
if (functionalTemplate) {
options.functional = true
}
// scopedId
if (scopeId) {
options._scopeId = 'data-v-' + scopeId
}
var hook
if (moduleIdentifier) { // server build
hook = function (context) {
// 2.3 injection
context =
context || // cached call
(this.$vnode && this.$vnode.ssrContext) || // stateful
(this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional
// 2.2 with runInNewContext: true
if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') {
context = __VUE_SSR_CONTEXT__
}
// inject component styles
if (injectStyles) {
injectStyles.call(this, context)
}
// register component module identifier for async chunk inferrence
if (context && context._registeredComponents) {
context._registeredComponents.add(moduleIdentifier)
}
}
// used by ssr in case component is cached and beforeCreate
// never gets called
options._ssrRegister = hook
} else if (injectStyles) {
hook = shadowMode
? function () {
injectStyles.call(this, this.$root.$options.shadowRoot)
}
: injectStyles
}
if (hook) {
if (options.functional) {
// for template-only hot-reload because in that case the render fn doesn't
// go through the normalizer
options._injectStyles = hook
// register for functioal component in vue file
var originalRender = options.render
options.render = function renderWithStyleInjection(h, context) {
hook.call(context)
return originalRender(h, context)
}
} else {
// inject component registration as beforeCreate hook
var existing = options.beforeCreate
options.beforeCreate = existing
? [].concat(existing, hook)
: [hook]
}
}
return {
exports: scriptExports,
options: options
}
}
/***/
}),
/***/ 92:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/button/src/button-group.vue?vue&type=template&id=3d8661d0&
var render = function () {
var _vm = this
var _h = _vm.$createElement
var _c = _vm._self._c || _h
return _c("div", {staticClass: "el-button-group"}, [_vm._t("default")], 2)
}
var staticRenderFns = []
render._withStripped = true
// CONCATENATED MODULE: ./packages/button/src/button-group.vue?vue&type=template&id=3d8661d0&
// CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/button/src/button-group.vue?vue&type=script&lang=js&
//
//
//
//
//
/* harmony default export */
var button_groupvue_type_script_lang_js_ = ({
name: 'ElButtonGroup'
});
// CONCATENATED MODULE: ./packages/button/src/button-group.vue?vue&type=script&lang=js&
/* harmony default export */
var src_button_groupvue_type_script_lang_js_ = (button_groupvue_type_script_lang_js_);
// EXTERNAL MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/runtime/componentNormalizer.js
var componentNormalizer = __webpack_require__(0);
// CONCATENATED MODULE: ./packages/button/src/button-group.vue
/* normalize component */
var component = Object(componentNormalizer["a" /* default */])(
src_button_groupvue_type_script_lang_js_,
render,
staticRenderFns,
false,
null,
null,
null
)
/* hot reload */
if (false) {
var api;
}
component.options.__file = "packages/button/src/button-group.vue"
/* harmony default export */
var button_group = (component.exports);
// CONCATENATED MODULE: ./packages/button-group/index.js
/* istanbul ignore next */
button_group.install = function (Vue) {
Vue.component(button_group.name, button_group);
};
/* harmony default export */
var packages_button_group = __webpack_exports__["default"] = (button_group);
/***/
})
/******/
}); | PypiClean |
/NetLSD-1.0.2.tar.gz/NetLSD-1.0.2/netlsd/kernels.py | import numpy as np
from .util import check_1d, check_2d, eigenvalues_auto, graph_to_laplacian, mat_to_laplacian
def compare(descriptor1, descriptor2):
"""
Computes the distance between two NetLSD representations.
Parameters
----------
descriptor1: numpy.ndarray
First signature to compare
descriptor2: numpy.ndarray
Second signature to compare
Returns
-------
float
NetLSD distance
"""
return np.linalg.norm(descriptor1-descriptor2)
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes NetLSD signature from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
kernel : str
Either 'heat' or 'wave'. Type of a kernel to use for computation.
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
NetLSD signature
"""
if kernel not in {'heat', 'wave'}:
raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel))
if not isinstance(normalized_laplacian, bool):
raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian))
if not isinstance(eigenvalues, (int, tuple, str)):
raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues)))
if not isinstance(timescales, np.ndarray):
raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales)))
if timescales.ndim != 1:
raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim))
if normalization not in {'complete', 'empty', 'none', True, False, None}:
if not isinstance(normalization, np.ndarray):
raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization))
if normalization.ndim != 1:
raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim))
if timescales.shape[0] != normalization.shape[0]:
raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0]))
eivals = check_1d(inp)
if eivals is None:
mat = check_2d(inp)
if mat is None:
mat = graph_to_laplacian(inp, normalized_laplacian)
if mat is None:
raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp)))
else:
mat = mat_to_laplacian(inp, normalized_laplacian)
eivals = eigenvalues_auto(mat, eigenvalues)
if kernel == 'heat':
return _hkt(eivals, timescales, normalization, normalized_laplacian)
else:
return _wkt(eivals, timescales, normalization, normalized_laplacian)
def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes heat kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
"""
return netlsd(inp, timescales, 'heat', eigenvalues, normalization, normalized_laplacian)
def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes wave kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
return netlsd(inp, timescales, 'wave', eigenvalues, normalization, normalized_laplacian)
def _hkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes heat kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
"""
nv = eivals.shape[0]
hkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
hkt[idx] = np.sum(np.exp(-t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return hkt / nv
if normalization == 'complete':
if normalized_laplacian:
return hkt / (1 + (nv - 1) * np.exp(-timescales))
else:
return hkt / (1 + nv * np.exp(-nv * timescales))
return hkt
def _wkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
nv = eivals.shape[0]
wkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
wkt[idx] = np.sum(np.exp(-1j * t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return wkt / nv
if normalization == 'complete':
if normalized_laplacian:
return wkt / (1 + (nv - 1) * np.cos(timescales))
else:
return wkt / (1 + (nv - 1) * np.cos(nv * timescales))
return wkt | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/model/managers/detectors/ThorCamSciManager__.py | import numpy as np
from imswitch.imcommon.model import initLogger
from .DetectorManager import DetectorManager, DetectorAction, DetectorNumberParameter, DetectorListParameter
class ThorCamSciManager(DetectorManager):
""" DetectorManager that deals with TheImagingSource cameras and the
parameters for frame extraction from them.
Manager properties:
- ``cameraListIndex`` -- the camera's index in the Allied Vision camera list (list
indexing starts at 0); set this string to an invalid value, e.g. the
string "mock" to load a mocker
- ``av`` -- dictionary of Allied Vision camera properties
"""
def __init__(self, detectorInfo, name, **_lowLevelManagers):
self.__logger = initLogger(self, instanceName=name)
self.detectorInfo = detectorInfo
try:
self.binningValue = detectorInfo.managerProperties['thorcamsci']["binning"]
except:
self.binningValue = 1
try:
self.cameraId = detectorInfo.managerProperties['cameraListIndex']
except:
self.cameraId = 1
try:
pixelSize = detectorInfo.managerProperties['cameraEffPixelsize'] # mum
except:
# returning back to default pixelsize
pixelSize = 1
self._camera = self._getThorCamSci(self.cameraId, self.binningValue)
fullShape = (self._camera.SensorWidth,
self._camera.SensorHeight)
model = self._camera.model
self._running = False
self._adjustingParameters = False
# Prepare parameters
parameters = {
'exposure': DetectorNumberParameter(group='Misc', value=1, valueUnits='ms',
editable=True),
'gain': DetectorNumberParameter(group='Misc', value=0, valueUnits='arb.u.',
editable=True),
'blacklevel': DetectorNumberParameter(group='Misc', value=0, valueUnits='arb.u.',
editable=True),
'binning': DetectorNumberParameter(group='Misc', value=1, valueUnits='arb.u.',
editable=True),
'image_width': DetectorNumberParameter(group='Misc', value=fullShape[0], valueUnits='arb.u.',
editable=False),
'image_height': DetectorNumberParameter(group='Misc', value=fullShape[1], valueUnits='arb.u.',
editable=False),
'frame_rate': DetectorNumberParameter(group='Misc', value=-1, valueUnits='fps',
editable=True),
'binning': DetectorNumberParameter(group="Misc", value=1, valueUnits="arb.u.", editable=True),
'trigger_source': DetectorListParameter(group='Acquisition mode',
value='Continous',
options=['Continous',
'Internal trigger',
'External trigger'],
editable=True),
'Camera pixel size': DetectorNumberParameter(group='Miscellaneous', value=pixelSize,
valueUnits='µm', editable=True)
}
# reading parameters from disk and write them to camrea
for propertyName, propertyValue in detectorInfo.managerProperties['thorcamsci'].items():
self._camera.setPropertyValue(propertyName, propertyValue)
parameters[propertyName].value = propertyValue
# TODO: Not implemented yet
self.crop(hpos=0, vpos=0, hsize=fullShape[0], vsize=fullShape[1])
# Prepare actions
actions = {
'More properties': DetectorAction(group='Misc',
func=self._camera.openPropertiesGUI)
}
super().__init__(detectorInfo, name, fullShape=fullShape, supportedBinnings=[1],
model=model, parameters=parameters, actions=actions, croppable=True)
def _updatePropertiesFromCamera(self):
self.setParameter('Real exposure time', self._camera.getPropertyValue('exposure_time')[0])
self.setParameter('Internal frame interval',
self._camera.getPropertyValue('internal_frame_interval')[0])
self.setParameter('Binning', self._camera.getPropertyValue('binning')[0])
self.setParameter('Readout time', self._camera.getPropertyValue('timing_readout_time')[0])
self.setParameter('Internal frame rate',
self._camera.getPropertyValue('internal_frame_rate')[0])
triggerSource = self._camera.getPropertyValue('trigger_source')
if triggerSource == 1:
self.setParameter('Trigger source', 'Internal trigger')
else:
triggerMode = self._camera.getPropertyValue('trigger_mode')
if triggerSource == 2 and triggerMode == 6:
self.setParameter('Trigger source', 'External "start-trigger"')
elif triggerSource == 2 and triggerMode == 1:
self.setParameter('Trigger source', 'External "frame-trigger"')
def getLatestFrame(self, is_save=False):
frame = self._camera.getLast()
return frame
def setParameter(self, name, value):
"""Sets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
super().setParameter(name, value)
if name not in self._DetectorManager__parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.setPropertyValue(name, value)
return value
def getParameter(self, name):
"""Gets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
if name not in self._parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.getPropertyValue(name)
return value
def setTriggerSource(self, source):
if source == 'Continous':
self._performSafeCameraAction(
lambda: self._camera.setPropertyValue('trigger_source', 0)
)
elif source == 'Internal trigger':
self._performSafeCameraAction(
lambda: self._camera.setPropertyValue('trigger_source', 1)
)
elif source == 'External trigger':
self._performSafeCameraAction(
lambda: self._camera.setPropertyValue('trigger_source', 2)
)
else:
raise ValueError(f'Invalid trigger source "{source}"')
def getChunk(self):
try:
return self._camera.getLastChunk()
except:
return None
def flushBuffers(self):
self._camera.flushBuffer()
def startAcquisition(self, liveView=False):
if self._camera.model == "mock":
# reconnect? Not sure if this is smart..
del self._camera
self._camera = self._getThorCamSci(self.cameraId, self.binningValue)
for propertyName, propertyValue in self.detectorInfo.managerProperties['thorcamsci'].items():
self._camera.setPropertyValue(propertyName, propertyValue)
fullShape = (self._camera.SensorWidth,
self._camera.SensorHeight)
model = self._camera.model
self._running = False
self._adjustingParameters = False
# TODO: Not implemented yet
self.crop(hpos=0, vpos=0, hsize=fullShape[0], vsize=fullShape[1])
# Prepare parameters
parameters = {
'exposure': DetectorNumberParameter(group='Misc', value=100, valueUnits='ms',
editable=True),
'gain': DetectorNumberParameter(group='Misc', value=1, valueUnits='arb.u.',
editable=True),
'blacklevel': DetectorNumberParameter(group='Misc', value=100, valueUnits='arb.u.',
editable=True),
'image_width': DetectorNumberParameter(group='Misc', value=fullShape[0], valueUnits='arb.u.',
editable=False),
'image_height': DetectorNumberParameter(group='Misc', value=fullShape[1], valueUnits='arb.u.',
editable=False),
'frame_rate': DetectorNumberParameter(group='Misc', value=-1, valueUnits='fps',
editable=True),
'trigger_source': DetectorListParameter(group='Acquisition mode',
value='Continous',
options=['Continous',
'Internal trigger',
'External trigger'],
editable=True),
'pixelSize': DetectorNumberParameter(group='Miscellaneous', value=1,
valueUnits='µm', editable=True)
}
# Prepare actions
actions = {
'More properties': DetectorAction(group='Misc',
func=self._camera.openPropertiesGUI)
}
#super().__init__(detectorInfo, name, fullShape=fullShape, supportedBinnings=[1],
# model=model, parameters=parameters, actions=actions, croppable=True)
if not self._running:
self._camera.start_live()
self._running = True
self.__logger.debug('startlive')
def stopAcquisition(self):
if self._running:
self._running = False
self._camera.suspend_live()
self.__logger.debug('suspendlive')
def stopAcquisitionForROIChange(self):
self._running = False
self._camera.stop_live()
self.__logger.debug('stoplive')
def finalize(self) -> None:
super().finalize()
self.__logger.debug('Safely disconnecting the camera...')
self._camera.close()
@property
def pixelSizeUm(self):
umxpx = self.parameters['Camera pixel size'].value
return [1, umxpx, umxpx]
def setPixelSizeUm(self, pixelSizeUm):
self.parameters['Camera pixel size'].value = pixelSizeUm
def crop(self, hpos, vpos, hsize, vsize):
def cropAction():
self.__logger.debug(
f'{self._camera.model}: crop frame to {hsize}x{vsize} at {hpos},{vpos}.'
)
'''
self._camera.setROI(hpos, vpos, hsize, vsize)
# TOdO: weird hackaround
self._shape = (self._camera.camera.Width.get()//self._camera.binning, self._camera.camera.Height.get()//self._camera.binning)
self._frameStart = (hpos, vpos)
pass
try:
self._performSafeCameraAction(cropAction)
except Exception as e:
self.__logger.error(e)
# TODO: unsure if frameStart is needed? Try without.
# This should be the only place where self.frameStart is changed
# Only place self.shapes is changed
'''
pass
def _performSafeCameraAction(self, function):
""" This method is used to change those camera properties that need
the camera to be idle to be able to be adjusted.
"""
self._adjustingParameters = True
wasrunning = self._running
self.stopAcquisitionForROIChange()
function()
if wasrunning:
self.startAcquisition()
self._adjustingParameters = False
def openPropertiesDialog(self):
self._camera.openPropertiesGUI()
def _getThorCamSci(self, cameraId, binning=1):
try:
from imswitch.imcontrol.model.interfaces.thorcamscicamera import CameraThorCamSci
self.__logger.debug(f'Trying to initialize Thorlabs Scientific camera {cameraId}')
camera = CameraThorCamSci(cameraNo=cameraId, binning=binning)
except Exception as e:
self.__logger.debug(e)
self.__logger.warning(f'Failed to initialize ThorCamSci {cameraId}, loading TIS mocker')
from imswitch.imcontrol.model.interfaces.tiscamera_mock import MockCameraTIS
camera = MockCameraTIS()
self.__logger.info(f'Initialized camera, model: {camera.model}')
return camera
def getFrameNumber(self):
return self._camera.getFrameNumber()
def closeEvent(self):
self._camera.close()
# Copyright (C) ImSwitch developers 2021
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/MeUtils-2023.8.29.13.9.44-py3-none-any.whl/meutils/io/tf_io.py | import tensorflow as tf
from meutils.pipe import *
from meutils.pd_utils import split as df_split
from meutils.path_utils import get_module_path
from meutils.date_utils import date_difference
from meutils.decorators.retry import wait_retry
HDFS = 'hdfs://easyops-cluster'
DATA = get_module_path("../data", __file__)
OUTPUT = "OUTPUT"
_FLAG = f"{DATA}/_FLAG"
_SUCCESS = f"{DATA}/_SUCCESS"
def get_lastest_path(path, max_tries=8, threshold=1):
for i in range(max_tries):
date = date_difference('%Y%m%d', days=i)
path_ = f"{path}{date}"
if tf.io.gfile.exists(path_):
files = tf.io.gfile.glob(f"{path_}/*")
if len(files) > 0 and tf.io.gfile.stat(files[0]).length // 1024 >= threshold:
logger.info(path_)
return path_
logger.warning("无效路径")
def _process_hdfs_path(p):
if p.startswith('/user/'):
p = HDFS + p
return p
def _process_pattern(pattern):
pattern = _process_hdfs_path(pattern)
if tf.io.gfile.isdir(pattern): # 如果是个文件夹,默认匹配所有文件
pattern = pattern + '/*'
return pattern
def check_path(path):
"""判断文件文件夹是否存在"""
path = _process_hdfs_path(path)
return tf.io.gfile.exists(path)
@wait_retry(600) # 10分钟check一次
def check_path_wait(path):
return check_path(path)
def if_not_exist_makedir(path):
path = _process_hdfs_path(path)
if not tf.io.gfile.exists(path):
logger.warning(f"{path} Does Not Exist, Make Dir")
tf.io.gfile.makedirs(path)
return path
def make_flag(output_dir, flag=_FLAG):
output_dir = if_not_exist_makedir(output_dir)
tf.io.gfile.copy(flag, f"{output_dir}/{Path(flag).name}", True)
def process_success(output_dir):
make_flag(output_dir, _SUCCESS)
def rename(src, dst, overwrite=True):
"""支持文件and文件夹"""
src = _process_hdfs_path(src)
dst = _process_hdfs_path(dst)
if not check_path(src):
logger.error(f"{src}; No such file or directory")
return
tf.io.gfile.rename(src, dst, overwrite=overwrite)
def rm(path):
path = _process_hdfs_path(path)
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
elif tf.io.gfile.exists(path): # 文件夹也返回 True
tf.io.gfile.remove(path)
def cp(pattern, output_dir=DATA, with_success=True, filter_fn=None):
"""复制文件夹下的文件到新文件夹"""
pattern = _process_pattern(pattern)
output_dir = if_not_exist_makedir(output_dir)
# 过滤文件夹、空文件、自定义过滤条件等
files = []
for file in tf.io.gfile.glob(pattern):
if tf.io.gfile.isdir(file) or tf.io.gfile.stat(file).length == 0: # Path(p).stat().st_size:
continue
files.append(file)
if filter_fn is not None:
files = list(filter(filter_fn, files))
logger.debug("FILES:\n\t{}".format('\n\t'.join(files))) # f"{}"里不支持 /
# 复制
def func(file):
new_file = f"{output_dir}/{Path(file).name}"
tf.io.gfile.copy(file, new_file, True)
return new_file
new_files = files | xThreadPoolExecutor(func, 16) | xlist # 多线程,多进程如何?
# 结束标识
if with_success and output_dir.startswith("hdfs"):
process_success(output_dir)
return new_files
def df2write(df, file, num_partitions=1, sep='\t', index=False, header=False, with_success=True, **kwargs):
"""仅支持单文件,支持多线程写入
写的时候不支持多个字符分割:"delimiter" must be a 1-character string: 非逗号分割的提前合并
"""
file = _process_hdfs_path(file)
name = Path(file).name # dir = file[::-1].split('/', 1)[1][::-1]
dir = Path(file).parent.__str__().replace('hdfs:/', 'hdfs://')
if_not_exist_makedir(str(dir))
if num_partitions == 1:
with tf.io.gfile.GFile(file, 'w') as f:
df.to_csv(f, index=index, header=header, sep=sep, **kwargs)
f.flush()
else:
logger.debug(f"ThreadPoolExecutor: part__*__{name}")
def writer(args):
idx, df = args
file = f"{dir}/part__{idx}__{name}"
with tf.io.gfile.GFile(file, 'w') as f:
df.to_csv(f, index=index, header=header, sep=sep, **kwargs)
f.flush()
enumerate(df_split(df, num_partitions)) | xThreadPoolExecutor(writer, num_partitions) # 加速不明显
if with_success:
process_success(dir)
del df
gc.collect()
def read2df(file, **kwargs):
"""仅支持单文件, 与pandas有些不兼容
sep: 本地文件支持多字符作为分隔符,HDFS文件好像不支持
pd.read_csv(p, iterator=True, chunksize=10000)
"""
file = _process_hdfs_path(file)
with tf.io.gfile.GFile(file, 'r') as f: # todo: 中文异常
return pd.read_csv(f, **kwargs)
def read2dataset(pattern, fmt='TextLineDataset', num_parallel_reads=1):
"""支持多文件大数据
:param pattern:
:param format: 'TextLineDataset', 'TFRecordDataset'
:return:
df = pd.DataFrame(map(bytes.decode, ds.as_numpy_iterator()))
df = pd.DataFrame(map(lambda r: r.decode().split('____'), ds.as_numpy_iterator()), columns=['itemid', 'title'])
for i in ds:
i.numpy().decode().split('\t')
ds = tf_io.read2dataset('title.csv')
num_part = 3
batch_size = 4
for n in range(num_part):
print(n)
for i in itertools.islice(ds, batch_size*n, batch_size*(n+1)):
i.numpy().decode().split('____')
"""
pattern = _process_pattern(pattern)
try:
fs = tf.io.gfile.glob(pattern)
except Exception as e:
logger.error(e)
fs = tf.data.Dataset.list_files(file_pattern=pattern)
fs = [f.decode() for f in fs.as_numpy_iterator()]
logger.info("FILES: " + '\t' + '\n\t'.join(fs))
ds = tf.data.__getattribute__(fmt)(fs, num_parallel_reads=num_parallel_reads)
return ds
def ds2df(input, sep='\t', columns=None, num_parallel_reads=6):
ds = read2dataset(input, num_parallel_reads=num_parallel_reads)
df = pd.DataFrame(
map(lambda r: r.decode().split(sep), tqdm(ds.as_numpy_iterator())),
columns=columns
)
return df
# 文件复制到本地读写:更快更方便
def read_hdfs(pattern, reader=pd.read_csv, max_workers=1, cache_dir='read_cache', is_union=True):
"""支持多文件读取"""
files = tqdm(cp(pattern, cache_dir))
if max_workers == 1:
dfs = map(reader, files)
else:
dfs = files | xProcessPoolExecutor(reader, max_workers) | xlist
if is_union:
return pd.concat(dfs, ignore_index=True)
else:
return dfs
def to_hdfs(
df, file_or_dir, batch_size=None, # 如果拆成很多小文件,file_or_dir应该填入目录
writer=lambda df, file: df.to_csv(file, sep='\t', header=False, index=False),
with_success=True,
cache_dir='to_cache',
file_start_index=0,
file_suffix='',
workers=1,
):
if_not_exist_makedir(cache_dir)
file_or_dir = _process_hdfs_path(file_or_dir)
if batch_size:
target_dir = file_or_dir
def _writer(args):
i, df = args
writer(df, f"{cache_dir}/part-{i}-{file_suffix}")
dfs = df_split(df, batch_size=batch_size)
if workers == 1:
for args in tqdm(enumerate(dfs, file_start_index)):
_writer(args)
else:
enumerate(dfs, file_start_index) | xProcessPoolExecutor(_writer, workers) | xlist # 多进程好像会卡死
else: # todo弃用这个方案
name = Path(file_or_dir).name
target_dir = Path(file_or_dir).parent.__str__().replace('hdfs:/', 'hdfs://')
writer(df, f"{cache_dir}/{name}")
cp(cache_dir, target_dir, with_success=with_success) # 多线程cp
magic_cmd(f"rm -rf {cache_dir}/*") # 用完即焚,节省本地内存
if __name__ == '__main__':
print(check_path("/Users/yuanjie/Desktop/Projects/Python/MeUtils/meutils/data/_SUCCESS"))
print(check_path_wait("/Users/yuanjie/Desktop/Projects/Python/MeUtils/meutils/data/__SUCCESS")) | PypiClean |
/MAD-0.2.2.zip/MAD-0.2.2/mad/utils/releasing.py |
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import re
import os
import subprocess
from setuptools import Command
import mad
class Version:
LOCATION = "mad/__init__.py"
@staticmethod
def from_source_code():
return Version.fromText(mad.__version__)
@staticmethod
def update_source_code(version):
content = open(Version.LOCATION).read()
replacement = "__version__ = \"%s\"" % str(version)
new_content = re.sub(r"__version__\s*=\s*\"\d+\.\d+\.\d+\"", replacement, content)
with open(Version.LOCATION, "w") as updated:
updated.write(new_content)
updated.flush()
@staticmethod
def fromText(text):
pattern = re.compile("(\\d+)\\.(\\d+)(?:\\.(?:dev)?(\\d+))?")
match = re.match(pattern, text)
return Version(int(match.group(1)), int(match.group(2)), int(match.group(3)) if match.group(3) else None)
def __init__(self, major, minor, micro=None):
self.major = major
self.minor = minor
self.micro = micro
def hasMinor(self, minor):
return self.minor == minor
def hasMajor(self, major):
return self.major == major
def hasMicro(self, micro):
return self.micro == micro
def nextMicroRelease(self):
return Version(self.major, self.minor, self.micro + 1)
def nextMinorRelease(self):
return Version(self.major, self.minor + 1, 0)
def nextMajorRelease(self):
return Version(self.major + 1, 0, 0)
def __repr__(self):
version = "{}.{}".format(self.major, self.minor)
if self.micro is not None:
version += ".%d" % self.micro
return version
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return (other and isinstance(other, Version) and
other.micro == self.micro and
other.hasMajor(self.major) and
other.hasMinor(self.minor))
class SourceControl:
def __init__(self):
self.environment = os.environ.copy()
self.environment["PATH"] += ";C:/Program Files (x86)/Git/bin/"
def commit(self, message):
command = ["git.exe", "add", "-u"]
subprocess.call(command, env=self.environment, shell=True)
command = ["git.exe", "commit", "-m", "%s" % message ]
subprocess.call(command, env=self.environment, shell=True)
def tag(self, version):
command = ["git.exe", "tag", "-a", "v" + str(version), "-m", "\"Version %s\"" % str(version) ]
subprocess.call(command, env=self.environment, shell=True)
class Release(Command):
def __init__(self, dist, scm = SourceControl()):
super().__init__(dist)
self.scm = scm
user_options = [('type=', None, 'The type of release (micro, minor or major')]
def initialize_options(self):
self.type = ""
def finalize_options(self):
pass
def run(self):
current_version = self.release()
self.prepare_next_release(current_version)
def release(self):
current_version = Version.from_source_code()
print("Current version: %s" % current_version)
released_version = self.released_version(current_version)
print("Releasing version %s" % released_version)
if current_version != released_version:
Version.update_source_code(released_version)
self.distribution.version = str(released_version)
self.distribution.metadata.version = str(released_version)
self.scm.commit("Releasing version %s" % released_version)
self.scm.tag(released_version)
self.build()
self.publish()
return released_version
def released_version(self, current_version):
if self.type == "micro":
return current_version
elif self.type == "minor":
return current_version.nextMinorRelease()
elif self.type == "major":
return current_version.nextMajorRelease()
else:
raise ValueError("Unknown release kind '%s' (options are 'micro', 'minor' or 'major')" % self.type)
def build(self):
self.run_command("bdist_egg")
self.run_command("sdist")
def publish(self):
self.run_command("register")
self.run_command("upload")
def prepare_next_release(self, current_version):
new_version = current_version.nextMicroRelease()
Version.update_source_code(new_version)
print("Preparing version " + str(new_version))
self.scm.commit("Preparing version %s" % new_version) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/svhn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from observations.util import maybe_download_and_extract
def svhn(path, load_extra=False):
"""Load the Street View House Numbers data set in cropped digits
format [@netzer2011reading]. It consists of 32x32 RGB images in 10
classes. There are 73257 training images, 26032 test images, and
531131 extra images.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filenames are
`train_32x32.mat`, `test_32x32.mat`, `extra_32x32.mat`.
load_extra: bool, optional.
Whether to load the extra images. Default is False.
Returns:
Tuple of np.ndarray's
`(x_train, y_train), (x_test, y_test)`, and a third tuple of
`(x_extra, y_extra)` if `load_extra` is True.
"""
from scipy.io import loadmat
path = os.path.expanduser(path)
url = 'http://ufldl.stanford.edu/housenumbers/'
train = 'train_32x32.mat'
test = 'test_32x32.mat'
if not os.path.exists(os.path.join(path, train)):
maybe_download_and_extract(path, url + train)
if not os.path.exists(os.path.join(path, test)):
maybe_download_and_extract(path, url + test)
loaded = loadmat(os.path.join(path, train))
x_train = loaded['X'].transpose(3, 0, 1, 2)
y_train = loaded['y'].flatten()
y_train[y_train == 10] = 0
loaded = loadmat(os.path.join(path, test))
x_test = loaded['X'].transpose(3, 0, 1, 2)
y_test = loaded['y'].flatten()
y_test[y_test == 10] = 0
if load_extra:
extra = 'extra_32x32.mat'
if not os.path.exists(os.path.join(path, extra)):
maybe_download_and_extract(path, url + extra)
loaded = loadmat(os.path.join(path, extra))
x_extra = loaded['X'].transpose(3, 0, 1, 2)
y_extra = loaded['y'].flatten()
y_extra[y_extra == 10] = 0
return (x_train, y_train), (x_test, y_test), (x_extra, y_extra)
else:
return (x_train, y_train), (x_test, y_test) | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/losses/balanced_l1_loss.py | import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox | PypiClean |
/ITASJCfunctionsMVO41T24-0.0.1.tar.gz/ITASJCfunctionsMVO41T24-0.0.1/functions_mvo-41/functions_mvo.py | def angle_0_360(v1, v2, reference_direction):
v1 = v1 / numpy.linalg.norm(v1)
v2 = v2 / numpy.linalg.norm(v2)
cos_theta = numpy.dot(v1, v2)
sin_theta = numpy.sign(numpy.dot(numpy.cross(v1, v2), reference_direction)) * numpy.linalg.norm(numpy.cross(v1, v2))
atg = numpy.degrees(numpy.arctan2(sin_theta, cos_theta))
if atg < 0:
theta = atg + 360
else:
theta = atg
return theta
h = np.array([0, 25, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800,
900, 1000])
r = np.array([1.225, 4.008e-2, 1.841e-2, 3.996e-3, 1.027e-3, 3.097e-4, 8.283e-5,
1.846e-5, 3.416e-6, 5.606e-7, 9.708e-8, 2.222e-8, 8.152e-9, 3.831e-9,
2.076e-9, 5.194e-10, 2.541e-10, 6.073e-11, 1.916e-11, 7.014e-12,
2.803e-12, 1.184e-12, 5.215e-13, 1.137e-13, 3.070e-14, 1.136e-14,
5.759e-15, 3.561e-15])
H = np.array([7.310, 6.427, 6.546, 7.360, 8.342, 7.583, 6.661, 5.927, 5.533,
5.703, 6.782, 9.973, 13.243, 16.322, 21.652, 27.974, 34.934,
43.342, 49.755, 54.513, 58.019, 60.980, 65.654, 76.377, 100.587,
147.203, 208.020])
def atmopshere(height):
if height > 1000:
height = 1000
elif height < 0:
height = 0
i = 1
for j in range(27):
if height >= h[j] and height < h[j + 1]:
i = j
if height == 1000:
i = 27
density = r[i] * np.exp(-(height - h[i]) / H[i])
return density
def orb_elems_from_rv(r, v, mu):
h = np.cross(r, v)
B = np.cross(v, h) - mu * r / np.linalg.norm(r)
N = np.cross(np.array([0, 0, 1]), h)
Omega = angle_0_360(np.array([1, 0, 0]), N, np.array([0, 0, 1]))
i = np.degrees(np.arccos(h[2] / np.linalg.norm(h)))
omega = np.degrees(np.arccos(np.dot(N, B) / (np.linalg.norm(N) * np.linalg.norm(B))))
nu = angle_0_360(B, r, np.array([0, 0, 1]))
a = -mu / (2 * ((np.dot(v, v) / 2) - mu / np.linalg.norm(r)))
e = np.linalg.norm(B / mu)
return Omega, i, omega, nu, a, e
def rv_from_orb_elems(a, e, i, Omega, omega, ni, mu):
p = a * (1 - e ** 2)
r_perifocal = p / (1 + e * np.cos(np.radians(ni))) * np.array([
np.cos(np.radians(ni)),
np.sin(np.radians(ni)),
0])
v_perifocal = np.sqrt(mu / p) * np.array([
-np.sin(np.radians(ni)),
e + np.cos(np.radians(ni)),
0])
perifocal_equatorial = np.dot(np.dot(np.array([
[-np.sin(np.radians(Omega)), np.cos(np.radians(Omega)), 0],
[np.cos(np.radians(Omega)), np.sin(np.radians(Omega)), 0],
[0, 0, 1]]), np.array([[1, 0, 0], [0, np.cos(np.radians(i)), -np.sin(np.radians(i))], [0, np.sin(np.radians(i)), np.cos(np.radians(i))]])),
np.array([[np.cos(np.radians(omega)), -np.sin(np.radians(omega)), 0],[np.sin(np.radians(omega)), np.cos(np.radians(omega)), 0],[0, 0, 1]]))
r = np.dot(perifocal_equatorial, r_perifocal)
v = np.dot(perifocal_equatorial, v_perifocal)
return r, v | PypiClean |
/CellphoneDB-4.0.0-py3-none-any.whl/cellphonedb/src/core/methods/cpdb_analysis_method.py | from typing import Tuple
import pandas as pd
import numpy as np
import pickle
from cellphonedb.src.core.core_logger import core_logger
from cellphonedb.src.core.exceptions.AllCountsFilteredException import AllCountsFilteredException
from cellphonedb.src.core.exceptions.NoInteractionsFound import NoInteractionsFound
from cellphonedb.src.core.exceptions.MissingRequiredArgumentsException import MissingRequiredArgumentsException
from cellphonedb.src.core.methods import cpdb_statistical_analysis_complex_method
from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper
from cellphonedb.src.core.models.complex import complex_helper
from cellphonedb.utils import db_utils, file_utils
def call(
cpdb_file_path: str = None,
meta_file_path: str = None,
counts_file_path: str = None,
counts_data: str = None,
output_path: str = None,
microenvs_file_path: str = None,
separator: str = "|",
threshold: float = 0.1,
result_precision: int = 3,
debug: bool = False,
output_suffix: str = None
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Non-statistical method for analysis
This methods calculates the mean and percent for the cluster interactions
and for each gene interaction. No shuffling nor DEGs are involved.
Parameters
----------
cpdb_file_path: str
CellphoneDB database file path
meta_file_path: str
Path to metadata csv file
counts_file_path: str
Path to counts csv file
counts_data: str
Type of gene identifiers in the counts data: "ensembl", "gene_name", "hgnc_symbol"
output_path: str
Output path used to store the analysis results (and to store intermediate files when debugging)
microenvs_file_path: str, optional
Path to Micro-environment file. Its content is used to limit cluster interactions
separator: str
Separator for pairs of genes (gene1|gene2) and clusters (cluster1|cluster2).
threshold: float
Percentage of cells expressing the specific ligand/receptor [0.0 - 1.0]
result_precision: int
Number of decimal digits in results.
debug: bool
Storge intermediate data as pickle file (debug_intermediate.pkl).
output_suffix: str, optional
Suffix to append to the result file's name (if not provided, timestamp will be used)
Returns
-------
Tuple
- means_result
- deconvoluted_result
"""
core_logger.info(
'[Non Statistical Method] Threshold:{} Precision:{}'.format(threshold,
result_precision))
# Report error unless the required arguments have been provided
required_arguments = [cpdb_file_path, meta_file_path, counts_file_path, counts_data, output_path]
if None in required_arguments or '' in required_arguments:
raise MissingRequiredArgumentsException(description="All of the following arguments need to be provided: {}".format( \
"cpdb_file_path, meta_file_path, counts_file_path, counts_data, output_path"))
# Load into memory CellphoneDB data
interactions, genes, complex_compositions, complexes, gene_synonym2gene_name = \
db_utils.get_interactions_genes_complex(cpdb_file_path)
# Load user files into memory
counts, meta, microenvs, degs = file_utils.get_user_files( \
counts_fp=counts_file_path, meta_fp=meta_file_path, microenvs_fp=microenvs_file_path, \
gene_synonym2gene_name=gene_synonym2gene_name, counts_data=counts_data)
# get reduced interactions (drop duplicates)
interactions_reduced = interactions[['multidata_1_id', 'multidata_2_id']].drop_duplicates()
# add id multidata and means to counts input
counts, counts_relations = cpdb_statistical_analysis_helper.add_multidata_and_means_to_counts(
counts, genes, counts_data)
if counts.empty:
raise AllCountsFilteredException(hint='Are you using human data?')
interactions_filtered, counts_filtered, complex_composition_filtered = \
cpdb_statistical_analysis_helper.prefilters(interactions_reduced,
counts,
complexes,
complex_compositions)
if interactions_filtered.empty:
raise NoInteractionsFound()
meta = meta.loc[counts.columns]
complex_to_protein_row_ids = complex_helper.map_complex_to_protein_row_ids(complex_composition_filtered, counts_filtered)
clusters = cpdb_statistical_analysis_helper.build_clusters(meta,
counts_filtered,
complex_to_protein_row_ids,
skip_percent=False)
core_logger.info('Running Real Analysis')
cluster_interactions = cpdb_statistical_analysis_helper.get_cluster_combinations(clusters['names'], microenvs)
base_result = cpdb_statistical_analysis_helper.build_result_matrix(interactions_filtered,
cluster_interactions,
separator)
mean_analysis = cpdb_statistical_analysis_helper.mean_analysis(interactions_filtered,
clusters,
cluster_interactions,
separator)
percent_analysis = cpdb_statistical_analysis_helper.percent_analysis(clusters,
threshold,
interactions_filtered,
cluster_interactions,
separator)
if debug:
with open(f"{output_path}/debug_intermediate.pkl", "wb") as fh:
pickle.dump({
"genes": genes,
"interactions": interactions,
"interactions_filtered": interactions_filtered,
"interactions_reduced": interactions_reduced,
"complex_compositions": complex_compositions,
"counts": counts,
"counts_relations": counts_relations,
"clusters_means_percents": clusters,
"cluster_interactions": cluster_interactions,
"base_result": base_result,
"mean_analysis": mean_analysis,
"percent_analysis": percent_analysis}, fh)
means_result, significant_means, deconvoluted_result = build_results(
interactions_filtered,
interactions,
counts_relations,
mean_analysis,
percent_analysis,
clusters['means'],
complex_composition_filtered,
counts,
genes,
result_precision,
counts_data
)
max_rank = significant_means['rank'].max()
significant_means['rank'] = significant_means['rank'].apply(lambda rank: rank if rank != 0 else (1 + max_rank))
significant_means.sort_values('rank', inplace=True)
file_utils.save_dfs_as_tsv(output_path, output_suffix, "simple_analysis", \
{"means_result" : means_result, \
"deconvoluted_result" : deconvoluted_result} )
return means_result, deconvoluted_result
def build_results(interactions: pd.DataFrame,
interactions_original: pd.DataFrame,
counts_relations: pd.DataFrame,
mean_analysis: pd.DataFrame,
percent_analysis: pd.DataFrame,
clusters_means: pd.DataFrame,
complex_compositions: pd.DataFrame,
counts: pd.DataFrame,
genes: pd.DataFrame,
result_precision: int,
counts_data: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Sets the results data structure from method generated data.
Results documents are defined by specs.
Returns
-------
Tuple: A tuple containing the results for:
- means
- significant_means
- deconvoluted
"""
core_logger.info('Building results')
interactions: pd.DataFrame = interactions_original.loc[interactions.index]
interactions['interaction_index'] = interactions.index
interactions = interactions.merge(counts_relations, how='left', left_on='multidata_1_id', right_on='id_multidata', )
# The column drop below prevents: 'FutureWarning: Passing 'suffixes' which cause duplicate columns {'id_multidata_1'}
# in the result is deprecated and will raise a MergeError in a future version.'
interactions = interactions.drop('id_multidata', axis=1)
interactions = interactions.merge(counts_relations, how='left', left_on='multidata_2_id', right_on='id_multidata',
suffixes=('_1', '_2'))
interactions.set_index('interaction_index', inplace=True, drop=True)
interacting_pair = cpdb_statistical_analysis_helper.interacting_pair_build(interactions)
def simple_complex_indicator(interaction: pd.Series, suffix: str) -> str:
"""
Add simple/complex prefixes to interaction components
"""
if interaction['is_complex{}'.format(suffix)]:
return 'complex:{}'.format(interaction['name{}'.format(suffix)])
return 'simple:{}'.format(interaction['name{}'.format(suffix)])
interactions['partner_a'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_1'),
axis=1)
interactions['partner_b'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_2'),
axis=1)
significant_mean_rank, significant_means = cpdb_statistical_analysis_helper.build_significant_means(
mean_analysis,
percent_analysis)
significant_means = significant_means.round(result_precision)
gene_columns = ['{}_{}'.format(counts_data, suffix) for suffix in ('1', '2')]
gene_renames = {column: 'gene_{}'.format(suffix) for column, suffix in zip(gene_columns, ['a', 'b'])}
# Remove useless columns
interactions_data_result = pd.DataFrame(
interactions[['id_cp_interaction', 'partner_a', 'partner_b', 'receptor_1', 'receptor_2', *gene_columns,
'annotation_strategy']].copy())
interactions_data_result = pd.concat([interacting_pair, interactions_data_result], axis=1, sort=False)
interactions_data_result['secreted'] = (interactions['secreted_1'] | interactions['secreted_2'])
interactions_data_result['is_integrin'] = (interactions['integrin_1'] | interactions['integrin_2'])
interactions_data_result.rename(
columns={**gene_renames, 'receptor_1': 'receptor_a', 'receptor_2': 'receptor_b'},
inplace=True)
# Dedupe rows and filter only desired columns
interactions_data_result.drop_duplicates(inplace=True)
means_columns = ['id_cp_interaction', 'interacting_pair', 'partner_a', 'partner_b', 'gene_a', 'gene_b', 'secreted',
'receptor_a', 'receptor_b', 'annotation_strategy', 'is_integrin']
interactions_data_result = interactions_data_result[means_columns]
mean_analysis = mean_analysis.round(result_precision)
# Round result decimals
for key, cluster_means in clusters_means.items():
clusters_means[key] = cluster_means.round(result_precision)
# Document 2
means_result = pd.concat([interactions_data_result, mean_analysis], axis=1, join='inner', sort=False)
# Document 3
significant_means_result = pd.concat([interactions_data_result, significant_mean_rank, significant_means], axis=1,
join='inner', sort=False)
# Document 5
deconvoluted_result = cpdb_statistical_analysis_complex_method.deconvoluted_complex_result_build(
clusters_means,
interactions,
complex_compositions,
counts,
genes,
counts_data)
return means_result, significant_means_result, deconvoluted_result
def deconvoluted_complex_result_build(clusters_means: dict, interactions: pd.DataFrame,
complex_compositions: pd.DataFrame, counts: pd.DataFrame,
genes: pd.DataFrame, counts_data: str) -> pd.DataFrame:
genes_counts = list(counts.index)
genes_filtered = genes[genes[counts_data].apply(lambda gene: gene in genes_counts)]
deconvoluted_complex_result_1 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered,
interactions, '_1', counts_data)
deconvoluted_simple_result_1 = deconvolute_interaction_component(interactions, '_1', counts_data)
deconvoluted_complex_result_2 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered,
interactions, '_2', counts_data)
deconvoluted_simple_result_2 = deconvolute_interaction_component(interactions, '_2', counts_data)
deconvoluted_result = pd.concat([deconvoluted_complex_result_1, deconvoluted_simple_result_1, deconvoluted_complex_result_2, deconvoluted_simple_result_2], sort=False)
deconvoluted_result.set_index('gene', inplace=True)
cluster_counts = pd.DataFrame(index=deconvoluted_result.index)
for key, cluster_means in clusters_means.items():
cluster_counts[key] = cluster_means
cluster_counts = cluster_counts.reindex(sorted(cluster_counts.columns), axis=1)
# Here we sort and filter unwanted columns
deconvoluted_columns = ['gene_name', 'name', 'is_complex', 'protein_name', 'complex_name', 'id_cp_interaction']
deconvoluted_result = deconvoluted_result[deconvoluted_columns]
deconvoluted_result.rename({'name': 'uniprot'}, axis=1, inplace=True)
deconvoluted_result = pd.concat([deconvoluted_result, cluster_counts], axis=1, join='inner', sort=False)
deconvoluted_result.reset_index(inplace=True)
deconvoluted_result.drop(columns='gene', inplace=True)
return deconvoluted_result
def deconvolute_interaction_component(interactions, suffix, counts_data):
interactions = interactions[~interactions['is_complex{}'.format(suffix)]]
deconvoluted_result = pd.DataFrame()
deconvoluted_result['gene'] = interactions['{}{}'.format(counts_data, suffix)]
deconvoluted_result[['protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor']] = \
interactions[['protein_name{}'.format(suffix), 'gene_name{}'.format(suffix), 'name{}'.format(suffix),
'is_complex{}'.format(suffix), 'id_cp_interaction', 'receptor{}'.format(suffix)]]
deconvoluted_result['complex_name'] = np.nan
return deconvoluted_result
def deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, suffix, counts_data):
deconvoluted_result = pd.DataFrame()
component = pd.DataFrame()
component[counts_data] = interactions['{}{}'.format(counts_data, suffix)]
component[['protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'id_multidata', 'receptor']] = \
interactions[
['protein_name{}'.format(suffix), 'gene_name{}'.format(suffix),
'name{}'.format(suffix), 'is_complex{}'.format(suffix), 'id_cp_interaction',
'id_multidata{}'.format(suffix), 'receptor{}'.format(suffix)]]
deconvolution_complex = pd.merge(complex_compositions, component, left_on='complex_multidata_id',
right_on='id_multidata')
deconvolution_complex = pd.merge(deconvolution_complex, genes_filtered, left_on='protein_multidata_id',
right_on='protein_multidata_id', suffixes=['_complex', '_simple'])
deconvoluted_result['gene'] = deconvolution_complex['{}_simple'.format(counts_data)]
deconvoluted_result[
['protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor', 'complex_name']] = \
deconvolution_complex[
['protein_name_simple', 'gene_name_simple', 'name_simple',
'is_complex_complex', 'id_cp_interaction', 'receptor_simple', 'name_complex']]
return deconvoluted_result | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/mask_head.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.predictors.heads import head
from object_detection.utils import ops
class MaskRCNNMaskHead(head.Head):
"""Mask RCNN mask prediction head.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
num_classes,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample=False):
"""Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample: Whether to apply convolutions on mask features
before upsampling using nearest neighbor resizing. Otherwise, mask
features are resized to [`mask_height`, `mask_width`] using bilinear
resizing before applying convolutions.
Raises:
ValueError: conv_hyperparams_fn is None.
"""
super(MaskRCNNMaskHead, self).__init__()
self._num_classes = num_classes
self._conv_hyperparams_fn = conv_hyperparams_fn
self._mask_height = mask_height
self._mask_width = mask_width
self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers
self._mask_prediction_conv_depth = mask_prediction_conv_depth
self._masks_are_class_agnostic = masks_are_class_agnostic
self._convolve_then_upsample = convolve_then_upsample
if conv_hyperparams_fn is None:
raise ValueError('conv_hyperparams_fn is None.')
def _get_mask_predictor_conv_depth(self,
num_feature_channels,
num_classes,
class_weight=3.0,
feature_weight=2.0):
"""Computes the depth of the mask predictor convolutions.
Computes the depth of the mask predictor convolutions given feature channels
and number of classes by performing a weighted average of the two in
log space to compute the number of convolution channels. The weights that
are used for computing the weighted average do not need to sum to 1.
Args:
num_feature_channels: An integer containing the number of feature
channels.
num_classes: An integer containing the number of classes.
class_weight: Class weight used in computing the weighted average.
feature_weight: Feature weight used in computing the weighted average.
Returns:
An integer containing the number of convolution channels used by mask
predictor.
"""
num_feature_channels_log = math.log(float(num_feature_channels), 2.0)
num_classes_log = math.log(float(num_classes), 2.0)
weighted_num_feature_channels_log = (
num_feature_channels_log * feature_weight)
weighted_num_classes_log = num_classes_log * class_weight
total_weight = feature_weight + class_weight
num_conv_channels_log = round(
(weighted_num_feature_channels_log + weighted_num_classes_log) /
total_weight)
return int(math.pow(2.0, num_conv_channels_log))
def predict(self, features, num_predictions_per_location=1):
"""Performs mask prediction.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing features for a batch of images.
num_predictions_per_location: Int containing number of predictions per
location.
Returns:
instance_masks: A float tensor of shape
[batch_size, 1, num_classes, mask_height, mask_width].
Raises:
ValueError: If num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Only num_predictions_per_location=1 is supported')
num_conv_channels = self._mask_prediction_conv_depth
if num_conv_channels == 0:
num_feature_channels = features.get_shape().as_list()[3]
num_conv_channels = self._get_mask_predictor_conv_depth(
num_feature_channels, self._num_classes)
with slim.arg_scope(self._conv_hyperparams_fn()):
if not self._convolve_then_upsample:
features = tf.image.resize_bilinear(
features, [self._mask_height, self._mask_width],
align_corners=True)
for _ in range(self._mask_prediction_num_conv_layers - 1):
features = slim.conv2d(
features,
num_outputs=num_conv_channels,
kernel_size=[3, 3])
if self._convolve_then_upsample:
# Replace Transposed Convolution with a Nearest Neighbor upsampling step
# followed by 3x3 convolution.
height_scale = self._mask_height // features.shape[1].value
width_scale = self._mask_width // features.shape[2].value
features = ops.nearest_neighbor_upsampling(
features, height_scale=height_scale, width_scale=width_scale)
features = slim.conv2d(
features,
num_outputs=num_conv_channels,
kernel_size=[3, 3])
num_masks = 1 if self._masks_are_class_agnostic else self._num_classes
mask_predictions = slim.conv2d(
features,
num_outputs=num_masks,
activation_fn=None,
normalizer_fn=None,
kernel_size=[3, 3])
return tf.expand_dims(
tf.transpose(mask_predictions, perm=[0, 3, 1, 2]),
axis=1,
name='MaskPredictor')
class ConvolutionalMaskHead(head.Head):
"""Convolutional class prediction head."""
def __init__(self,
is_training,
num_classes,
use_dropout,
dropout_keep_prob,
kernel_size,
use_depthwise=False,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=False):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: Number of classes.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
mask_height: Desired output mask height. The default value is 7.
mask_width: Desired output mask width. The default value is 7.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
Raises:
ValueError: if min_depth > max_depth.
"""
super(ConvolutionalMaskHead, self).__init__()
self._is_training = is_training
self._num_classes = num_classes
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._kernel_size = kernel_size
self._use_depthwise = use_depthwise
self._mask_height = mask_height
self._mask_width = mask_width
self._masks_are_class_agnostic = masks_are_class_agnostic
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location.
Returns:
mask_predictions: A float tensors of shape
[batch_size, num_anchors, num_masks, mask_height, mask_width]
representing the mask predictions for the proposals.
"""
image_feature = features
# Add a slot for the background class.
if self._masks_are_class_agnostic:
num_masks = 1
else:
num_masks = self._num_classes
num_mask_channels = num_masks * self._mask_height * self._mask_width
net = image_feature
if self._use_dropout:
net = slim.dropout(net, keep_prob=self._dropout_keep_prob)
if self._use_depthwise:
mask_predictions = slim.separable_conv2d(
net, None, [self._kernel_size, self._kernel_size],
padding='SAME', depth_multiplier=1, stride=1,
rate=1, scope='MaskPredictor_depthwise')
mask_predictions = slim.conv2d(
mask_predictions,
num_predictions_per_location * num_mask_channels,
[1, 1],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope='MaskPredictor')
else:
mask_predictions = slim.conv2d(
net,
num_predictions_per_location * num_mask_channels,
[self._kernel_size, self._kernel_size],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope='MaskPredictor')
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
mask_predictions = tf.reshape(
mask_predictions,
[batch_size, -1, num_masks, self._mask_height, self._mask_width])
return mask_predictions
# TODO(alirezafathi): See if possible to unify Weight Shared with regular
# convolutional mask head.
class WeightSharedConvolutionalMaskHead(head.Head):
"""Weight shared convolutional mask prediction head."""
def __init__(self,
num_classes,
kernel_size=3,
use_dropout=False,
dropout_keep_prob=0.8,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=False):
"""Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
kernel_size: Size of final convolution kernel.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
mask_height: Desired output mask height. The default value is 7.
mask_width: Desired output mask width. The default value is 7.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
"""
super(WeightSharedConvolutionalMaskHead, self).__init__()
self._num_classes = num_classes
self._kernel_size = kernel_size
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._mask_height = mask_height
self._mask_width = mask_width
self._masks_are_class_agnostic = masks_are_class_agnostic
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location.
Returns:
mask_predictions: A tensor of shape
[batch_size, num_anchors, num_classes, mask_height, mask_width]
representing the mask predictions for the proposals.
"""
mask_predictions_net = features
if self._masks_are_class_agnostic:
num_masks = 1
else:
num_masks = self._num_classes
num_mask_channels = num_masks * self._mask_height * self._mask_width
if self._use_dropout:
mask_predictions_net = slim.dropout(
mask_predictions_net, keep_prob=self._dropout_keep_prob)
mask_predictions = slim.conv2d(
mask_predictions_net,
num_predictions_per_location * num_mask_channels,
[self._kernel_size, self._kernel_size],
activation_fn=None, stride=1, padding='SAME',
normalizer_fn=None,
scope='MaskPredictor')
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
mask_predictions = tf.reshape(
mask_predictions,
[batch_size, -1, num_masks, self._mask_height, self._mask_width])
return mask_predictions | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/lib/utils/mysql_connector.py |
import contextlib
import time
import pymysql
import sqlalchemy
from cloudferry.lib.utils import remote_runner
from cloudferry.lib.utils import local
ALL_DATABASES = "--all-databases"
MySQLError = pymysql.MySQLError
def get_db_host(cloud_config):
"""Returns DB host based on configuration.
Useful when MySQL is deployed on multiple nodes, when multiple MySQL nodes
are hidden behind a VIP. In this scenario providing VIP in
`config.dst_mysql.db_host` will break mysqldump which requires to be run
locally on particular DB host.
:returns: `config.migrate.mysqldump_host` if not `None`, or
`config.dst_mysql.db_host` otherwise
"""
return cloud_config.mysqldump.mysqldump_host or cloud_config.mysql.db_host
def dump_db(cloud, database=ALL_DATABASES):
cmd = ["mysqldump {database}",
"--user={user}"]
if cloud.cloud_config.mysql.db_password:
cmd.append("--password={password}")
db_host = get_db_host(cloud.cloud_config)
if cloud.cloud_config.mysqldump.run_mysqldump_locally:
cmd.append("--port={port}")
cmd.append("--host={host}")
run = local.run
else:
rr = remote_runner.RemoteRunner(
db_host, cloud.cloud_config.cloud.ssh_user,
password=cloud.cloud_config.cloud.ssh_sudo_password,
mute_stdout=True)
run = rr.run
dump = run(' '.join(cmd).format(
database=database,
user=cloud.cloud_config.mysql.db_user,
password=cloud.cloud_config.mysql.db_password,
port=cloud.cloud_config.mysql.db_port,
host=cloud.cloud_config.mysql.db_host))
filename = cloud.cloud_config.mysqldump.db_dump_filename
with open(filename.format(database=('all_databases'
if database == ALL_DATABASES
else database),
time=time.time(),
position=cloud.position), 'w') as f:
f.write(dump)
class MysqlConnector(object):
def __init__(self, config, db):
self.config = config
self.db = db
self.connection_url = self.compose_connection_url()
self._connection = None
def compose_connection_url(self):
return '{}://{}:{}@{}:{}/{}'.format(self.config['db_connection'],
self.config['db_user'],
self.config['db_password'],
self.config['db_host'],
self.config['db_port'],
self.db)
def get_engine(self):
return sqlalchemy.create_engine(self.connection_url)
def execute(self, command, **kwargs):
with self.transaction() as connection:
return connection.execute(sqlalchemy.text(command), **kwargs)
def batch_execute(self, commands, **kwargs):
with self.transaction() as connection:
for command in commands:
connection.execute(sqlalchemy.text(command), **kwargs)
@contextlib.contextmanager
def transaction(self):
if self._connection:
yield self._connection
else:
with self.get_engine().begin() as conn:
self._connection = conn
try:
yield conn
finally:
self._connection = None | PypiClean |
/Dominate_Layui-2020.6.25.post1-py3-none-any.whl/Dominate_Layui/static/layui/lay/modules/laydate.js | ;!function(){"use strict";var e=window.layui&&layui.define,t={getPath:function(){var e=document.currentScript?document.currentScript.src:function(){for(var e,t=document.scripts,n=t.length-1,a=n;a>0;a--)if("interactive"===t[a].readyState){e=t[a].src;break}return e||t[n].src}();return e.substring(0,e.lastIndexOf("/")+1)}(),getStyle:function(e,t){var n=e.currentStyle?e.currentStyle:window.getComputedStyle(e,null);return n[n.getPropertyValue?"getPropertyValue":"getAttribute"](t)},link:function(e,a,i){if(n.path){var r=document.getElementsByTagName("head")[0],o=document.createElement("link");"string"==typeof a&&(i=a);var s=(i||e).replace(/\.|\//g,""),l="layuicss-"+s,d=0;o.rel="stylesheet",o.href=n.path+e,o.id=l,document.getElementById(l)||r.appendChild(o),"function"==typeof a&&!function c(){return++d>80?window.console&&console.error("laydate.css: Invalid"):void(1989===parseInt(t.getStyle(document.getElementById(l),"width"))?a():setTimeout(c,100))}()}}},n={v:"5.0.9",config:{},index:window.laydate&&window.laydate.v?1e5:0,path:t.getPath,set:function(e){var t=this;return t.config=w.extend({},t.config,e),t},ready:function(a){var i="laydate",r="",o=(e?"modules/laydate/":"theme/")+"default/laydate.css?v="+n.v+r;return e?layui.addcss(o,a,i):t.link(o,a,i),this}},a=function(){var e=this;return{hint:function(t){e.hint.call(e,t)},config:e.config}},i="laydate",r=".layui-laydate",o="layui-this",s="laydate-disabled",l="开始日期超出了结束日期<br>建议重新选择",d=[100,2e5],c="layui-laydate-static",m="layui-laydate-list",u="laydate-selected",h="layui-laydate-hint",y="laydate-day-prev",f="laydate-day-next",p="layui-laydate-footer",g=".laydate-btns-confirm",v="laydate-time-text",D=".laydate-btns-time",T=function(e){var t=this;t.index=++n.index,t.config=w.extend({},t.config,n.config,e),n.ready(function(){t.init()})},w=function(e){return new C(e)},C=function(e){for(var t=0,n="object"==typeof e?[e]:(this.selector=e,document.querySelectorAll(e||null));t<n.length;t++)this.push(n[t])};C.prototype=[],C.prototype.constructor=C,w.extend=function(){var e=1,t=arguments,n=function(e,t){e=e||(t.constructor===Array?[]:{});for(var a in t)e[a]=t[a]&&t[a].constructor===Object?n(e[a],t[a]):t[a];return e};for(t[0]="object"==typeof t[0]?t[0]:{};e<t.length;e++)"object"==typeof t[e]&&n(t[0],t[e]);return t[0]},w.ie=function(){var e=navigator.userAgent.toLowerCase();return!!(window.ActiveXObject||"ActiveXObject"in window)&&((e.match(/msie\s(\d+)/)||[])[1]||"11")}(),w.stope=function(e){e=e||window.event,e.stopPropagation?e.stopPropagation():e.cancelBubble=!0},w.each=function(e,t){var n,a=this;if("function"!=typeof t)return a;if(e=e||[],e.constructor===Object){for(n in e)if(t.call(e[n],n,e[n]))break}else for(n=0;n<e.length&&!t.call(e[n],n,e[n]);n++);return a},w.digit=function(e,t,n){var a="";e=String(e),t=t||2;for(var i=e.length;i<t;i++)a+="0";return e<Math.pow(10,t)?a+(0|e):e},w.elem=function(e,t){var n=document.createElement(e);return w.each(t||{},function(e,t){n.setAttribute(e,t)}),n},C.addStr=function(e,t){return e=e.replace(/\s+/," "),t=t.replace(/\s+/," ").split(" "),w.each(t,function(t,n){new RegExp("\\b"+n+"\\b").test(e)||(e=e+" "+n)}),e.replace(/^\s|\s$/,"")},C.removeStr=function(e,t){return e=e.replace(/\s+/," "),t=t.replace(/\s+/," ").split(" "),w.each(t,function(t,n){var a=new RegExp("\\b"+n+"\\b");a.test(e)&&(e=e.replace(a,""))}),e.replace(/\s+/," ").replace(/^\s|\s$/,"")},C.prototype.find=function(e){var t=this,n=0,a=[],i="object"==typeof e;return this.each(function(r,o){for(var s=i?[e]:o.querySelectorAll(e||null);n<s.length;n++)a.push(s[n]);t.shift()}),i||(t.selector=(t.selector?t.selector+" ":"")+e),w.each(a,function(e,n){t.push(n)}),t},C.prototype.each=function(e){return w.each.call(this,this,e)},C.prototype.addClass=function(e,t){return this.each(function(n,a){a.className=C[t?"removeStr":"addStr"](a.className,e)})},C.prototype.removeClass=function(e){return this.addClass(e,!0)},C.prototype.hasClass=function(e){var t=!1;return this.each(function(n,a){new RegExp("\\b"+e+"\\b").test(a.className)&&(t=!0)}),t},C.prototype.attr=function(e,t){var n=this;return void 0===t?function(){if(n.length>0)return n[0].getAttribute(e)}():n.each(function(n,a){a.setAttribute(e,t)})},C.prototype.removeAttr=function(e){return this.each(function(t,n){n.removeAttribute(e)})},C.prototype.html=function(e){return this.each(function(t,n){n.innerHTML=e})},C.prototype.val=function(e){return this.each(function(t,n){n.value=e})},C.prototype.append=function(e){return this.each(function(t,n){"object"==typeof e?n.appendChild(e):n.innerHTML=n.innerHTML+e})},C.prototype.remove=function(e){return this.each(function(t,n){e?n.removeChild(e):n.parentNode.removeChild(n)})},C.prototype.on=function(e,t){return this.each(function(n,a){a.attachEvent?a.attachEvent("on"+e,function(e){e.target=e.srcElement,t.call(a,e)}):a.addEventListener(e,t,!1)})},C.prototype.off=function(e,t){return this.each(function(n,a){a.detachEvent?a.detachEvent("on"+e,t):a.removeEventListener(e,t,!1)})},T.isLeapYear=function(e){return e%4===0&&e%100!==0||e%400===0},T.prototype.config={type:"date",range:!1,format:"yyyy-MM-dd",value:null,isInitValue:!0,min:"1900-1-1",max:"2099-12-31",trigger:"focus",show:!1,showBottom:!0,btns:["clear","now","confirm"],lang:"cn",theme:"default",position:null,calendar:!1,mark:{},zIndex:null,done:null,change:null},T.prototype.lang=function(){var e=this,t=e.config,n={cn:{weeks:["日","一","二","三","四","五","六"],time:["时","分","秒"],timeTips:"选择时间",startTime:"开始时间",endTime:"结束时间",dateTips:"返回日期",month:["一","二","三","四","五","六","七","八","九","十","十一","十二"],tools:{confirm:"确定",clear:"清空",now:"现在"}},en:{weeks:["Su","Mo","Tu","We","Th","Fr","Sa"],time:["Hours","Minutes","Seconds"],timeTips:"Select Time",startTime:"Start Time",endTime:"End Time",dateTips:"Select Date",month:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],tools:{confirm:"Confirm",clear:"Clear",now:"Now"}}};return n[t.lang]||n.cn},T.prototype.init=function(){var e=this,t=e.config,n="yyyy|y|MM|M|dd|d|HH|H|mm|m|ss|s",a="static"===t.position,i={year:"yyyy",month:"yyyy-MM",date:"yyyy-MM-dd",time:"HH:mm:ss",datetime:"yyyy-MM-dd HH:mm:ss"};t.elem=w(t.elem),t.eventElem=w(t.eventElem),t.elem[0]&&(t.range===!0&&(t.range="-"),t.format===i.date&&(t.format=i[t.type]),e.format=t.format.match(new RegExp(n+"|.","g"))||[],e.EXP_IF="",e.EXP_SPLIT="",w.each(e.format,function(t,a){var i=new RegExp(n).test(a)?"\\d{"+function(){return new RegExp(n).test(e.format[0===t?t+1:t-1]||"")?/^yyyy|y$/.test(a)?4:a.length:/^yyyy$/.test(a)?"1,4":/^y$/.test(a)?"1,308":"1,2"}()+"}":"\\"+a;e.EXP_IF=e.EXP_IF+i,e.EXP_SPLIT=e.EXP_SPLIT+"("+i+")"}),e.EXP_IF=new RegExp("^"+(t.range?e.EXP_IF+"\\s\\"+t.range+"\\s"+e.EXP_IF:e.EXP_IF)+"$"),e.EXP_SPLIT=new RegExp("^"+e.EXP_SPLIT+"$",""),e.isInput(t.elem[0])||"focus"===t.trigger&&(t.trigger="click"),t.elem.attr("lay-key")||(t.elem.attr("lay-key",e.index),t.eventElem.attr("lay-key",e.index)),t.mark=w.extend({},t.calendar&&"cn"===t.lang?{"0-1-1":"元旦","0-2-14":"情人","0-3-8":"妇女","0-3-12":"植树","0-4-1":"愚人","0-5-1":"劳动","0-5-4":"青年","0-6-1":"儿童","0-9-10":"教师","0-9-18":"国耻","0-10-1":"国庆","0-12-25":"圣诞"}:{},t.mark),w.each(["min","max"],function(e,n){var a=[],i=[];if("number"==typeof t[n]){var r=t[n],o=(new Date).getTime(),s=864e5,l=new Date(r?r<s?o+r*s:r:o);a=[l.getFullYear(),l.getMonth()+1,l.getDate()],r<s||(i=[l.getHours(),l.getMinutes(),l.getSeconds()])}else a=(t[n].match(/\d+-\d+-\d+/)||[""])[0].split("-"),i=(t[n].match(/\d+:\d+:\d+/)||[""])[0].split(":");t[n]={year:0|a[0]||(new Date).getFullYear(),month:a[1]?(0|a[1])-1:(new Date).getMonth(),date:0|a[2]||(new Date).getDate(),hours:0|i[0],minutes:0|i[1],seconds:0|i[2]}}),e.elemID="layui-laydate"+t.elem.attr("lay-key"),(t.show||a)&&e.render(),a||e.events(),t.value&&t.isInitValue&&(t.value.constructor===Date?e.setValue(e.parse(0,e.systemDate(t.value))):e.setValue(t.value)))},T.prototype.render=function(){var e=this,t=e.config,n=e.lang(),a="static"===t.position,i=e.elem=w.elem("div",{id:e.elemID,"class":["layui-laydate",t.range?" layui-laydate-range":"",a?" "+c:"",t.theme&&"default"!==t.theme&&!/^#/.test(t.theme)?" laydate-theme-"+t.theme:""].join("")}),r=e.elemMain=[],o=e.elemHeader=[],s=e.elemCont=[],l=e.table=[],d=e.footer=w.elem("div",{"class":p});if(t.zIndex&&(i.style.zIndex=t.zIndex),w.each(new Array(2),function(e){if(!t.range&&e>0)return!0;var a=w.elem("div",{"class":"layui-laydate-header"}),i=[function(){var e=w.elem("i",{"class":"layui-icon laydate-icon laydate-prev-y"});return e.innerHTML="",e}(),function(){var e=w.elem("i",{"class":"layui-icon laydate-icon laydate-prev-m"});return e.innerHTML="",e}(),function(){var e=w.elem("div",{"class":"laydate-set-ym"}),t=w.elem("span"),n=w.elem("span");return e.appendChild(t),e.appendChild(n),e}(),function(){var e=w.elem("i",{"class":"layui-icon laydate-icon laydate-next-m"});return e.innerHTML="",e}(),function(){var e=w.elem("i",{"class":"layui-icon laydate-icon laydate-next-y"});return e.innerHTML="",e}()],d=w.elem("div",{"class":"layui-laydate-content"}),c=w.elem("table"),m=w.elem("thead"),u=w.elem("tr");w.each(i,function(e,t){a.appendChild(t)}),m.appendChild(u),w.each(new Array(6),function(e){var t=c.insertRow(0);w.each(new Array(7),function(a){if(0===e){var i=w.elem("th");i.innerHTML=n.weeks[a],u.appendChild(i)}t.insertCell(a)})}),c.insertBefore(m,c.children[0]),d.appendChild(c),r[e]=w.elem("div",{"class":"layui-laydate-main laydate-main-list-"+e}),r[e].appendChild(a),r[e].appendChild(d),o.push(i),s.push(d),l.push(c)}),w(d).html(function(){var e=[],i=[];return"datetime"===t.type&&e.push('<span lay-type="datetime" class="laydate-btns-time">'+n.timeTips+"</span>"),w.each(t.btns,function(e,r){var o=n.tools[r]||"btn";t.range&&"now"===r||(a&&"clear"===r&&(o="cn"===t.lang?"重置":"Reset"),i.push('<span lay-type="'+r+'" class="laydate-btns-'+r+'">'+o+"</span>"))}),e.push('<div class="laydate-footer-btns">'+i.join("")+"</div>"),e.join("")}()),w.each(r,function(e,t){i.appendChild(t)}),t.showBottom&&i.appendChild(d),/^#/.test(t.theme)){var m=w.elem("style"),u=["#{{id}} .layui-laydate-header{background-color:{{theme}};}","#{{id}} .layui-this{background-color:{{theme}} !important;}"].join("").replace(/{{id}}/g,e.elemID).replace(/{{theme}}/g,t.theme);"styleSheet"in m?(m.setAttribute("type","text/css"),m.styleSheet.cssText=u):m.innerHTML=u,w(i).addClass("laydate-theme-molv"),i.appendChild(m)}e.remove(T.thisElemDate),a?t.elem.append(i):(document.body.appendChild(i),e.position()),e.checkDate().calendar(),e.changeEvent(),T.thisElemDate=e.elemID,"function"==typeof t.ready&&t.ready(w.extend({},t.dateTime,{month:t.dateTime.month+1}))},T.prototype.remove=function(e){var t=this,n=(t.config,w("#"+(e||t.elemID)));return n.hasClass(c)||t.checkDate(function(){n.remove()}),t},T.prototype.position=function(){var e=this,t=e.config,n=e.bindElem||t.elem[0],a=n.getBoundingClientRect(),i=e.elem.offsetWidth,r=e.elem.offsetHeight,o=function(e){return e=e?"scrollLeft":"scrollTop",document.body[e]|document.documentElement[e]},s=function(e){return document.documentElement[e?"clientWidth":"clientHeight"]},l=5,d=a.left,c=a.bottom;d+i+l>s("width")&&(d=s("width")-i-l),c+r+l>s()&&(c=a.top>r?a.top-r:s()-r,c-=2*l),t.position&&(e.elem.style.position=t.position),e.elem.style.left=d+("fixed"===t.position?0:o(1))+"px",e.elem.style.top=c+("fixed"===t.position?0:o())+"px"},T.prototype.hint=function(e){var t=this,n=(t.config,w.elem("div",{"class":h}));t.elem&&(n.innerHTML=e||"",w(t.elem).find("."+h).remove(),t.elem.appendChild(n),clearTimeout(t.hinTimer),t.hinTimer=setTimeout(function(){w(t.elem).find("."+h).remove()},3e3))},T.prototype.getAsYM=function(e,t,n){return n?t--:t++,t<0&&(t=11,e--),t>11&&(t=0,e++),[e,t]},T.prototype.systemDate=function(e){var t=e||new Date;return{year:t.getFullYear(),month:t.getMonth(),date:t.getDate(),hours:e?e.getHours():0,minutes:e?e.getMinutes():0,seconds:e?e.getSeconds():0}},T.prototype.checkDate=function(e){var t,a,i=this,r=(new Date,i.config),o=r.dateTime=r.dateTime||i.systemDate(),s=i.bindElem||r.elem[0],l=(i.isInput(s)?"val":"html",i.isInput(s)?s.value:"static"===r.position?"":s.innerHTML),c=function(e){e.year>d[1]&&(e.year=d[1],a=!0),e.month>11&&(e.month=11,a=!0),e.hours>23&&(e.hours=0,a=!0),e.minutes>59&&(e.minutes=0,e.hours++,a=!0),e.seconds>59&&(e.seconds=0,e.minutes++,a=!0),t=n.getEndDate(e.month+1,e.year),e.date>t&&(e.date=t,a=!0)},m=function(e,t,n){var o=["startTime","endTime"];t=(t.match(i.EXP_SPLIT)||[]).slice(1),n=n||0,r.range&&(i[o[n]]=i[o[n]]||{}),w.each(i.format,function(s,l){var c=parseFloat(t[s]);t[s].length<l.length&&(a=!0),/yyyy|y/.test(l)?(c<d[0]&&(c=d[0],a=!0),e.year=c):/MM|M/.test(l)?(c<1&&(c=1,a=!0),e.month=c-1):/dd|d/.test(l)?(c<1&&(c=1,a=!0),e.date=c):/HH|H/.test(l)?(c<1&&(c=0,a=!0),e.hours=c,r.range&&(i[o[n]].hours=c)):/mm|m/.test(l)?(c<1&&(c=0,a=!0),e.minutes=c,r.range&&(i[o[n]].minutes=c)):/ss|s/.test(l)&&(c<1&&(c=0,a=!0),e.seconds=c,r.range&&(i[o[n]].seconds=c))}),c(e)};return"limit"===e?(c(o),i):(l=l||r.value,"string"==typeof l&&(l=l.replace(/\s+/g," ").replace(/^\s|\s$/g,"")),i.startState&&!i.endState&&(delete i.startState,i.endState=!0),"string"==typeof l&&l?i.EXP_IF.test(l)?r.range?(l=l.split(" "+r.range+" "),i.startDate=i.startDate||i.systemDate(),i.endDate=i.endDate||i.systemDate(),r.dateTime=w.extend({},i.startDate),w.each([i.startDate,i.endDate],function(e,t){m(t,l[e],e)})):m(o,l):(i.hint("日期格式不合法<br>必须遵循下述格式:<br>"+(r.range?r.format+" "+r.range+" "+r.format:r.format)+"<br>已为你重置"),a=!0):l&&l.constructor===Date?r.dateTime=i.systemDate(l):(r.dateTime=i.systemDate(),delete i.startState,delete i.endState,delete i.startDate,delete i.endDate,delete i.startTime,delete i.endTime),c(o),a&&l&&i.setValue(r.range?i.endDate?i.parse():"":i.parse()),e&&e(),i)},T.prototype.mark=function(e,t){var n,a=this,i=a.config;return w.each(i.mark,function(e,a){var i=e.split("-");i[0]!=t[0]&&0!=i[0]||i[1]!=t[1]&&0!=i[1]||i[2]!=t[2]||(n=a||t[2])}),n&&e.html('<span class="laydate-day-mark">'+n+"</span>"),a},T.prototype.limit=function(e,t,n,a){var i,r=this,o=r.config,l={},d=o[n>41?"endDate":"dateTime"],c=w.extend({},d,t||{});return w.each({now:c,min:o.min,max:o.max},function(e,t){l[e]=r.newDate(w.extend({year:t.year,month:t.month,date:t.date},function(){var e={};return w.each(a,function(n,a){e[a]=t[a]}),e}())).getTime()}),i=l.now<l.min||l.now>l.max,e&&e[i?"addClass":"removeClass"](s),i},T.prototype.calendar=function(e){var t,a,i,r=this,s=r.config,l=e||s.dateTime,c=new Date,m=r.lang(),u="date"!==s.type&&"datetime"!==s.type,h=e?1:0,y=w(r.table[h]).find("td"),f=w(r.elemHeader[h][2]).find("span");if(l.year<d[0]&&(l.year=d[0],r.hint("最低只能支持到公元"+d[0]+"年")),l.year>d[1]&&(l.year=d[1],r.hint("最高只能支持到公元"+d[1]+"年")),r.firstDate||(r.firstDate=w.extend({},l)),c.setFullYear(l.year,l.month,1),t=c.getDay(),a=n.getEndDate(l.month||12,l.year),i=n.getEndDate(l.month+1,l.year),w.each(y,function(e,n){var d=[l.year,l.month],c=0;n=w(n),n.removeAttr("class"),e<t?(c=a-t+e,n.addClass("laydate-day-prev"),d=r.getAsYM(l.year,l.month,"sub")):e>=t&&e<i+t?(c=e-t,s.range||c+1===l.date&&n.addClass(o)):(c=e-i-t,n.addClass("laydate-day-next"),d=r.getAsYM(l.year,l.month)),d[1]++,d[2]=c+1,n.attr("lay-ymd",d.join("-")).html(d[2]),r.mark(n,d).limit(n,{year:d[0],month:d[1]-1,date:d[2]},e)}),w(f[0]).attr("lay-ym",l.year+"-"+(l.month+1)),w(f[1]).attr("lay-ym",l.year+"-"+(l.month+1)),"cn"===s.lang?(w(f[0]).attr("lay-type","year").html(l.year+"年"),w(f[1]).attr("lay-type","month").html(l.month+1+"月")):(w(f[0]).attr("lay-type","month").html(m.month[l.month]),w(f[1]).attr("lay-type","year").html(l.year)),u&&(s.range&&(e?r.endDate=r.endDate||{year:l.year+("year"===s.type?1:0),month:l.month+("month"===s.type?0:-1)}:r.startDate=r.startDate||{year:l.year,month:l.month},e&&(r.listYM=[[r.startDate.year,r.startDate.month+1],[r.endDate.year,r.endDate.month+1]],r.list(s.type,0).list(s.type,1),"time"===s.type?r.setBtnStatus("时间",w.extend({},r.systemDate(),r.startTime),w.extend({},r.systemDate(),r.endTime)):r.setBtnStatus(!0))),s.range||(r.listYM=[[l.year,l.month+1]],r.list(s.type,0))),s.range&&!e){var p=r.getAsYM(l.year,l.month);r.calendar(w.extend({},l,{year:p[0],month:p[1]}))}return s.range||r.limit(w(r.footer).find(g),null,0,["hours","minutes","seconds"]),s.range&&e&&!u&&r.stampRange(),r},T.prototype.list=function(e,t){var n=this,a=n.config,i=a.dateTime,r=n.lang(),l=a.range&&"date"!==a.type&&"datetime"!==a.type,d=w.elem("ul",{"class":m+" "+{year:"laydate-year-list",month:"laydate-month-list",time:"laydate-time-list"}[e]}),c=n.elemHeader[t],u=w(c[2]).find("span"),h=n.elemCont[t||0],y=w(h).find("."+m)[0],f="cn"===a.lang,p=f?"年":"",T=n.listYM[t]||{},C=["hours","minutes","seconds"],x=["startTime","endTime"][t];if(T[0]<1&&(T[0]=1),"year"===e){var M,b=M=T[0]-7;b<1&&(b=M=1),w.each(new Array(15),function(e){var i=w.elem("li",{"lay-ym":M}),r={year:M};M==T[0]&&w(i).addClass(o),i.innerHTML=M+p,d.appendChild(i),M<n.firstDate.year?(r.month=a.min.month,r.date=a.min.date):M>=n.firstDate.year&&(r.month=a.max.month,r.date=a.max.date),n.limit(w(i),r,t),M++}),w(u[f?0:1]).attr("lay-ym",M-8+"-"+T[1]).html(b+p+" - "+(M-1+p))}else if("month"===e)w.each(new Array(12),function(e){var i=w.elem("li",{"lay-ym":e}),s={year:T[0],month:e};e+1==T[1]&&w(i).addClass(o),i.innerHTML=r.month[e]+(f?"月":""),d.appendChild(i),T[0]<n.firstDate.year?s.date=a.min.date:T[0]>=n.firstDate.year&&(s.date=a.max.date),n.limit(w(i),s,t)}),w(u[f?0:1]).attr("lay-ym",T[0]+"-"+T[1]).html(T[0]+p);else if("time"===e){var E=function(){w(d).find("ol").each(function(e,a){w(a).find("li").each(function(a,i){n.limit(w(i),[{hours:a},{hours:n[x].hours,minutes:a},{hours:n[x].hours,minutes:n[x].minutes,seconds:a}][e],t,[["hours"],["hours","minutes"],["hours","minutes","seconds"]][e])})}),a.range||n.limit(w(n.footer).find(g),n[x],0,["hours","minutes","seconds"])};a.range?n[x]||(n[x]={hours:0,minutes:0,seconds:0}):n[x]=i,w.each([24,60,60],function(e,t){var a=w.elem("li"),i=["<p>"+r.time[e]+"</p><ol>"];w.each(new Array(t),function(t){i.push("<li"+(n[x][C[e]]===t?' class="'+o+'"':"")+">"+w.digit(t,2)+"</li>")}),a.innerHTML=i.join("")+"</ol>",d.appendChild(a)}),E()}if(y&&h.removeChild(y),h.appendChild(d),"year"===e||"month"===e)w(n.elemMain[t]).addClass("laydate-ym-show"),w(d).find("li").on("click",function(){var r=0|w(this).attr("lay-ym");if(!w(this).hasClass(s)){if(0===t)i[e]=r,l&&(n.startDate[e]=r),n.limit(w(n.footer).find(g),null,0);else if(l)n.endDate[e]=r;else{var c="year"===e?n.getAsYM(r,T[1]-1,"sub"):n.getAsYM(T[0],r,"sub");w.extend(i,{year:c[0],month:c[1]})}"year"===a.type||"month"===a.type?(w(d).find("."+o).removeClass(o),w(this).addClass(o),"month"===a.type&&"year"===e&&(n.listYM[t][0]=r,l&&(n[["startDate","endDate"][t]].year=r),n.list("month",t))):(n.checkDate("limit").calendar(),n.closeList()),n.setBtnStatus(),a.range||n.done(null,"change"),w(n.footer).find(D).removeClass(s)}});else{var S=w.elem("span",{"class":v}),k=function(){w(d).find("ol").each(function(e){var t=this,a=w(t).find("li");t.scrollTop=30*(n[x][C[e]]-2),t.scrollTop<=0&&a.each(function(e,n){if(!w(this).hasClass(s))return t.scrollTop=30*(e-2),!0})})},H=w(c[2]).find("."+v);k(),S.innerHTML=a.range?[r.startTime,r.endTime][t]:r.timeTips,w(n.elemMain[t]).addClass("laydate-time-show"),H[0]&&H.remove(),c[2].appendChild(S),w(d).find("ol").each(function(e){var t=this;w(t).find("li").on("click",function(){var r=0|this.innerHTML;w(this).hasClass(s)||(a.range?n[x][C[e]]=r:i[C[e]]=r,w(t).find("."+o).removeClass(o),w(this).addClass(o),E(),k(),(n.endDate||"time"===a.type)&&n.done(null,"change"),n.setBtnStatus())})})}return n},T.prototype.listYM=[],T.prototype.closeList=function(){var e=this;e.config;w.each(e.elemCont,function(t,n){w(this).find("."+m).remove(),w(e.elemMain[t]).removeClass("laydate-ym-show laydate-time-show")}),w(e.elem).find("."+v).remove()},T.prototype.setBtnStatus=function(e,t,n){var a,i=this,r=i.config,o=w(i.footer).find(g),d=r.range&&"date"!==r.type&&"time"!==r.type;d&&(t=t||i.startDate,n=n||i.endDate,a=i.newDate(t).getTime()>i.newDate(n).getTime(),i.limit(null,t)||i.limit(null,n)?o.addClass(s):o[a?"addClass":"removeClass"](s),e&&a&&i.hint("string"==typeof e?l.replace(/日期/g,e):l))},T.prototype.parse=function(e,t){var n=this,a=n.config,i=t||(e?w.extend({},n.endDate,n.endTime):a.range?w.extend({},n.startDate,n.startTime):a.dateTime),r=n.format.concat();return w.each(r,function(e,t){/yyyy|y/.test(t)?r[e]=w.digit(i.year,t.length):/MM|M/.test(t)?r[e]=w.digit(i.month+1,t.length):/dd|d/.test(t)?r[e]=w.digit(i.date,t.length):/HH|H/.test(t)?r[e]=w.digit(i.hours,t.length):/mm|m/.test(t)?r[e]=w.digit(i.minutes,t.length):/ss|s/.test(t)&&(r[e]=w.digit(i.seconds,t.length))}),a.range&&!e?r.join("")+" "+a.range+" "+n.parse(1):r.join("")},T.prototype.newDate=function(e){return e=e||{},new Date(e.year||1,e.month||0,e.date||1,e.hours||0,e.minutes||0,e.seconds||0)},T.prototype.setValue=function(e){var t=this,n=t.config,a=t.bindElem||n.elem[0],i=t.isInput(a)?"val":"html";return"static"===n.position||w(a)[i](e||""),this},T.prototype.stampRange=function(){var e,t,n=this,a=n.config,i=w(n.elem).find("td");if(a.range&&!n.endDate&&w(n.footer).find(g).addClass(s),n.endDate)return e=n.newDate({year:n.startDate.year,month:n.startDate.month,date:n.startDate.date}).getTime(),t=n.newDate({year:n.endDate.year,month:n.endDate.month,date:n.endDate.date}).getTime(),e>t?n.hint(l):void w.each(i,function(a,i){var r=w(i).attr("lay-ymd").split("-"),s=n.newDate({year:r[0],month:r[1]-1,date:r[2]}).getTime();w(i).removeClass(u+" "+o),s!==e&&s!==t||w(i).addClass(w(i).hasClass(y)||w(i).hasClass(f)?u:o),s>e&&s<t&&w(i).addClass(u)})},T.prototype.done=function(e,t){var n=this,a=n.config,i=w.extend({},n.startDate?w.extend(n.startDate,n.startTime):a.dateTime),r=w.extend({},w.extend(n.endDate,n.endTime));return w.each([i,r],function(e,t){"month"in t&&w.extend(t,{month:t.month+1})}),e=e||[n.parse(),i,r],"function"==typeof a[t||"done"]&&a[t||"done"].apply(a,e),n},T.prototype.choose=function(e){var t=this,n=t.config,a=n.dateTime,i=w(t.elem).find("td"),r=e.attr("lay-ymd").split("-"),l=function(e){new Date;e&&w.extend(a,r),n.range&&(t.startDate?w.extend(t.startDate,r):t.startDate=w.extend({},r,t.startTime),t.startYMD=r)};if(r={year:0|r[0],month:(0|r[1])-1,date:0|r[2]},!e.hasClass(s))if(n.range){if(w.each(["startTime","endTime"],function(e,n){t[n]=t[n]||{hours:0,minutes:0,seconds:0}}),t.endState)l(),delete t.endState,delete t.endDate,t.startState=!0,i.removeClass(o+" "+u),e.addClass(o);else if(t.startState){if(e.addClass(o),t.endDate?w.extend(t.endDate,r):t.endDate=w.extend({},r,t.endTime),t.newDate(r).getTime()<t.newDate(t.startYMD).getTime()){var d=w.extend({},t.endDate,{hours:t.startDate.hours,minutes:t.startDate.minutes,seconds:t.startDate.seconds});w.extend(t.endDate,t.startDate,{hours:t.endDate.hours,minutes:t.endDate.minutes,seconds:t.endDate.seconds}),t.startDate=d}n.showBottom||t.done(),t.stampRange(),t.endState=!0,t.done(null,"change")}else e.addClass(o),l(),t.startState=!0;w(t.footer).find(g)[t.endDate?"removeClass":"addClass"](s)}else"static"===n.position?(l(!0),t.calendar().done().done(null,"change")):"date"===n.type?(l(!0),t.setValue(t.parse()).remove().done()):"datetime"===n.type&&(l(!0),t.calendar().done(null,"change"))},T.prototype.tool=function(e,t){var n=this,a=n.config,i=a.dateTime,r="static"===a.position,o={datetime:function(){w(e).hasClass(s)||(n.list("time",0),a.range&&n.list("time",1),w(e).attr("lay-type","date").html(n.lang().dateTips))},date:function(){n.closeList(),w(e).attr("lay-type","datetime").html(n.lang().timeTips)},clear:function(){n.setValue("").remove(),r&&(w.extend(i,n.firstDate),n.calendar()),a.range&&(delete n.startState,delete n.endState,delete n.endDate,delete n.startTime,delete n.endTime),n.done(["",{},{}])},now:function(){var e=new Date;w.extend(i,n.systemDate(),{hours:e.getHours(),minutes:e.getMinutes(),seconds:e.getSeconds()}),n.setValue(n.parse()).remove(),r&&n.calendar(),n.done()},confirm:function(){if(a.range){if(!n.endDate)return n.hint("请先选择日期范围");if(w(e).hasClass(s))return n.hint("time"===a.type?l.replace(/日期/g,"时间"):l)}else if(w(e).hasClass(s))return n.hint("不在有效日期或时间范围内");n.done(),n.setValue(n.parse()).remove()}};o[t]&&o[t]()},T.prototype.change=function(e){var t=this,n=t.config,a=n.dateTime,i=n.range&&("year"===n.type||"month"===n.type),r=t.elemCont[e||0],o=t.listYM[e],s=function(s){var l=["startDate","endDate"][e],d=w(r).find(".laydate-year-list")[0],c=w(r).find(".laydate-month-list")[0];return d&&(o[0]=s?o[0]-15:o[0]+15,t.list("year",e)),c&&(s?o[0]--:o[0]++,t.list("month",e)),(d||c)&&(w.extend(a,{year:o[0]}),i&&(t[l].year=o[0]),n.range||t.done(null,"change"),t.setBtnStatus(),n.range||t.limit(w(t.footer).find(g),{year:o[0]})),d||c};return{prevYear:function(){s("sub")||(a.year--,t.checkDate("limit").calendar(),n.range||t.done(null,"change"))},prevMonth:function(){var e=t.getAsYM(a.year,a.month,"sub");w.extend(a,{year:e[0],month:e[1]}),t.checkDate("limit").calendar(),n.range||t.done(null,"change")},nextMonth:function(){var e=t.getAsYM(a.year,a.month);w.extend(a,{year:e[0],month:e[1]}),t.checkDate("limit").calendar(),n.range||t.done(null,"change")},nextYear:function(){s()||(a.year++,t.checkDate("limit").calendar(),n.range||t.done(null,"change"))}}},T.prototype.changeEvent=function(){var e=this;e.config;w(e.elem).on("click",function(e){w.stope(e)}),w.each(e.elemHeader,function(t,n){w(n[0]).on("click",function(n){e.change(t).prevYear()}),w(n[1]).on("click",function(n){e.change(t).prevMonth()}),w(n[2]).find("span").on("click",function(n){var a=w(this),i=a.attr("lay-ym"),r=a.attr("lay-type");i&&(i=i.split("-"),e.listYM[t]=[0|i[0],0|i[1]],e.list(r,t),w(e.footer).find(D).addClass(s))}),w(n[3]).on("click",function(n){e.change(t).nextMonth()}),w(n[4]).on("click",function(n){e.change(t).nextYear()})}),w.each(e.table,function(t,n){var a=w(n).find("td");a.on("click",function(){e.choose(w(this))})}),w(e.footer).find("span").on("click",function(){var t=w(this).attr("lay-type");e.tool(this,t)})},T.prototype.isInput=function(e){return/input|textarea/.test(e.tagName.toLocaleLowerCase())},T.prototype.events=function(){var e=this,t=e.config,n=function(n,a){n.on(t.trigger,function(){a&&(e.bindElem=this),e.render()})};t.elem[0]&&!t.elem[0].eventHandler&&(n(t.elem,"bind"),n(t.eventElem),w(document).on("click",function(n){n.target!==t.elem[0]&&n.target!==t.eventElem[0]&&n.target!==w(t.closeStop)[0]&&e.remove()}).on("keydown",function(t){13===t.keyCode&&w("#"+e.elemID)[0]&&e.elemID===T.thisElem&&(t.preventDefault(),w(e.footer).find(g)[0].click())}),w(window).on("resize",function(){return!(!e.elem||!w(r)[0])&&void e.position()}),t.elem[0].eventHandler=!0)},n.render=function(e){var t=new T(e);return a.call(t)},n.getEndDate=function(e,t){var n=new Date;return n.setFullYear(t||n.getFullYear(),e||n.getMonth()+1,1),new Date(n.getTime()-864e5).getDate()},window.lay=window.lay||w,e?(n.ready(),layui.define(function(e){n.path=layui.cache.dir,e(i,n)})):"function"==typeof define&&define.amd?define(function(){return n}):function(){n.ready(),window.laydate=n}()}(); | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methodcalls/groups/change_volume_call.py | from fipper.exception import NoActiveGroupCall
from fipper.exception import NodeJSNotRunning
from fipper.exception import ClientCallsNotSet
from fipper.viper import Viper
class ChangeVolumeCall(Viper):
async def change_volume_call(self, chat_id: int, volume: int):
"""Change the volume of the playing stream
This method change the output volume of the userbot
using MtProto APIs
Parameters:
chat_id (``int``):
Unique identifier (int) of the target chat.
volume (``int``)
Volume to set to the stream
Raises:
ClientCallsNotSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~fipper.PyTgCalls.start` before
NoActiveGroupCall: In case you try
to edit a not started group call
Example:
.. code-block:: python
:emphasize-lines: 10-13
from fipper import Client
from fipper import idle
...
app = PyTgCalls(client)
app.start()
... # Call API methods
app.change_volume_call(
-1001185324811,
175,
)
idle()
"""
if self.assistant is not None:
if self._wait_until_run is not None:
if not self._wait_until_run.done():
await self._wait_until_run
chat_call = await self.assistant.get_full_chat(
chat_id,
)
if chat_call is not None:
await self.assistant.change_volume(
chat_id,
volume,
self._cache_user_peer.get(chat_id),
)
else:
raise NoActiveGroupCall()
else:
raise NodeJSNotRunning()
else:
raise ClientCallsNotSet() | PypiClean |
/JsonUtils-0.1.tar.gz/JsonUtils-0.1/jsonutils/jsonrpc.py | __version__ = '1.0'
import json
import cgi, re, sys
## jsonrpc.py takes JSON-RPC requests, passes them to the specified
## method, and returns the result as a JSON-RPC message.
## Copyright (C) 2005 Russell A. Moffitt
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation.
textTemplate = """Content-Type: text/plain
%(content)s"""
NameAllowedRegExp=re.compile("^[a-zA-Z]\w*$")
def nameAllowed(name):
"""checks if a name is allowed.
"""
if NameAllowedRegExp.match(name):
return True
else:
return False
def getTracebackStr():
import traceback
import StringIO
s=StringIO.StringIO("")
traceback.print_exc(file=s)
return s.getvalue()
class JSONRPCError:
def __init__(self, msg=""):
self.name = self.__class__.__name__
self.msg = msg
class InvalidJsonRpcRequest(JSONRPCError):
pass
class InvalidMethodParameters(JSONRPCError):
pass
class MethodNotFound(JSONRPCError):
pass
class ApplicationError(JSONRPCError):
pass
class JsonRpcHandler:
def __init__(self, services=None):
"""Create RPC request/response handler for authorized methods listed in dictionary 'services'"""
self.services = services or {}
def getMethodByName(self, name):
"""searches for an object with the name given inside the object given.
"obj.child.meth" will return the meth obj.
"""
try:#to get a method by asking the service
method = self.services._getMethodByName(name)
except:
#assumed a childObject is ment
for meth in self.services:
if nameAllowed(name):
method = self.services[name]
return method
def sendResponse(self, id, result, error):
response = json.write({'id':id, 'result':result, 'error':error})
print textTemplate % { 'content': response }
#sys.exit()
def handleJsonRpc(self):
# Get parameter values from the "get" query string or "post" args
fields = cgi.FieldStorage()
request = fields.getfirst('request')
try:
if request == None:
raise InvalidJsonRpcRequest
req = json.read(request)
id = req['id']
params = req['params']
methodname = req['method']
except:
self.sendResponse(None, None, InvalidJsonRpcRequest("Empty or malformed JSON-RPC request.").__dict__)
return()
try: #to get a callable obj
method = self.getMethodByName(methodname)
except:
method=None
self.sendResponse(id, None, MethodNotFound(req['method']).__dict__)
return()
if method:
try:
result = method(*params)
if (id is not None):
self.sendResponse(id, result, None)
return()
except SystemExit: pass
except: #error inside the callable object
s=getTracebackStr()
self.sendResponse(id, None, ApplicationError(s).__dict__)
return()
if (__name__ == "__main__"):
jsontxt = """request={
"method": "foo",
"params": ["spam", 321],
"id": 1234
}"""
if (len(sys.argv) < 2):
sys.argv.append(jsontxt)
def foo(*args):
return args
services = {'foo':foo}
myJson = JsonRpcHandler(services)
myJson.handleJsonRpc() | PypiClean |
/NAG-PyPOP-0.3.5.tar.gz/NAG-PyPOP-0.3.5/pypop/metrics/mpi.py | import numpy
import pandas
from .metricset import MetricSet, Metric
k_GE_desc = (
"The overall quality of the parallelisation. This is the product of the "
"Parallel Efficiency and Computational Scaling"
)
k_PE_desc = (
"The overall efficiency with which the computation is parallelised between "
"different processes and threads. This is further divided into the Process Level"
"Efficiency and the Thread Level Efficiency"
)
k_PLE_desc = (
"The efficiency of the application as viewed at the process level, including the "
"MPI communication and process-level load balance."
)
k_MPILB_desc = (
"The efficiency with which the total amount of computational work is shared "
"between the different MPI processes. Low values indicate that there is significant "
"imbalance between the most and least loaded processes."
)
k_MPICE_desc = (
"The efficiency with which the application carries out MPI communication. An ideal "
"application will spend no time communicating and 100% of time in computation. Low "
"values indicate that too much communication is being performed for the amount of "
"computation."
)
k_MPITE_desc = (
"The efficiency of the actual transfer of data via MPI. This reflects the size of "
"the data being communicated and the speed of the underlying communication network. "
"Low values indicate that the network bandwidth is insufficient for the required "
"communication rate, or that too much data is being communicated."
)
k_MPISE_desc = (
"The efficiency with which the MPI communications are synchronised and carried out. "
"Low values indicate that there is significant irregularity in the timings of "
"different processes arrivals at MPI calls, reducing efficiency due to waiting "
"for MPI calls to complete."
)
k_COMPSC_desc = (
"The way in which the total computational cost varies with the applied parallelism. "
"This is a combination of the increased cost due to additional calculations "
"performed, and increased costs due to reduced instructions per cycle."
)
k_INSSC_desc = (
"Inefficiencies introduced due to an increase in the total computational work done, "
"measured by the total CPU instructions. Ideally, there would be no additional "
"computation required when parallelising, but there is normally some additional "
"cost to manage the distribution of work. The Instruction Scaling metric "
"represents this by calculating the relative difference in total instructions "
"between runs."
)
k_IPCSC_desc = (
"Inefficiencies due to changes in the instructions per cycle executed by the CPUs. "
"The IPC rate can be reduced due to CPU data starvation, inefficient cache usage or "
"high rates of branch misprediction."
)
k_FREQSC_desc = (
"Inefficiencies due to changes in the rate at which the CPU executes instructions. "
"This is typically due to thermal management in the CPU reducing the overall clock "
"speed."
)
class MPI_Metrics(MetricSet):
"""Pure MPI Metrics (additive version).
"""
_metric_list = [
Metric("Global Efficiency", 0, desc=k_GE_desc),
Metric("Parallel Efficiency", 1, desc=k_PE_desc),
Metric("MPI Load Balance", 2, "Load balance", desc=k_MPILB_desc),
Metric("MPI Communication Efficiency", 2, desc=k_MPICE_desc),
Metric("MPI Transfer Efficiency", 3, desc=k_MPITE_desc),
Metric("MPI Serialisation Efficiency", 3, desc=k_MPISE_desc),
Metric("Computation Scaling", 1, desc=k_COMPSC_desc),
Metric("Instruction Scaling", 2, desc=k_INSSC_desc),
Metric("IPC Scaling", 2, desc=k_IPCSC_desc),
Metric("Frequency Scaling", 2, desc=k_FREQSC_desc),
]
_programming_model = "MPI"
def _calculate_metrics(self, ref_key=None, sort_keys=True):
if not ref_key:
ref_key = min(self._stats_dict.keys())
metrics_by_key = {}
if sort_keys:
keys = sorted(self._stats_dict.keys())
else:
key = self._stats_dict.keys()
for idx, key in enumerate(keys):
metadata = self._stats_dict[key].metadata
stats = self._stats_dict[key].statistics
metrics = self._create_subdataframe(metadata, key)
metrics["MPI Communication Efficiency"] = (
stats["Total Non-MPI Runtime"].loc[:, 1].max()
/ stats["Total Runtime"].max()
)
try:
metrics["MPI Serialisation Efficiency"] = 1 - (
(
stats["Ideal Runtime"].loc[:, 1].max()
- stats["Total Non-MPI Runtime"].loc[:, 1].max()
)
/ stats["Total Runtime"].max()
)
except KeyError:
metrics["MPI Serialisation Efficiency"] = numpy.nan
try:
metrics["MPI Transfer Efficiency"] = (
stats["Ideal Runtime"].loc[:, 1].max()
/ stats["Total Runtime"].loc[:, 1].max()
)
except KeyError:
metrics["MPI Transfer Efficiency"] = numpy.nan
metrics["MPI Load Balance"] = 1 - (
(
stats["Total Useful Computation"].loc[:, 1].max()
- stats["Total Useful Computation"].loc[:, 1].mean()
)
/ stats["Total Runtime"].max()
)
metrics["Parallel Efficiency"] = (
stats["Total Useful Computation"].mean()
/ stats["Total Runtime"].max() # avg all threads to include Amdahl
)
metrics["IPC Scaling"] = (
stats["Useful Instructions"].sum() / stats["Useful Cycles"].sum()
) / (
self._stats_dict[ref_key].statistics["Useful Instructions"].sum()
/ self._stats_dict[ref_key].statistics["Useful Cycles"].sum()
)
metrics["Instruction Scaling"] = (
self._stats_dict[ref_key].statistics["Useful Instructions"].sum()
/ stats["Useful Instructions"].sum()
)
metrics["Frequency Scaling"] = (
stats["Useful Cycles"].sum() / stats["Total Useful Computation"].sum()
) / (
self._stats_dict[ref_key].statistics["Useful Cycles"].sum()
/ self._stats_dict[ref_key].statistics["Total Useful Computation"].sum()
)
metrics["Computation Scaling"] = (
self._stats_dict[ref_key].statistics["Total Useful Computation"].sum()
/ stats["Total Useful Computation"].sum()
)
metrics["Global Efficiency"] = (
metrics["Computation Scaling"] * metrics["Parallel Efficiency"]
)
metrics["Speedup"] = (
self._stats_dict[ref_key].statistics["Total Runtime"].max()
/ stats["Total Runtime"].max()
)
metrics["Runtime"] = stats["Total Runtime"].max()
metrics_by_key[key] = metrics
self._metric_data = pandas.concat(metrics_by_key.values())
class MPI_Multiplicative_Metrics(MetricSet):
"""Pure MPI Metrics (multiplicative version).
"""
_metric_list = [
Metric("Global Efficiency", 0),
Metric("Parallel Efficiency", 1),
Metric("MPI Load balance", 2, "Load balance"),
Metric("MPI Communication Efficiency", 2),
Metric("MPI Transfer Efficiency", 3),
Metric("MPI Serialisation Efficiency", 3),
Metric("Computation Scaling", 1),
Metric("Instruction Scaling", 2),
Metric("IPC Scaling", 2),
Metric("Frequency Scaling", 2),
]
_programming_model = "MPI"
def _calculate_metrics(self, ref_key=None, sort_keys=True):
if not ref_key:
ref_key = min(self._stats_dict.keys())
metrics_by_key = {}
if sort_keys:
keys = sorted(self._stats_dict.keys())
else:
key = self._stats_dict.keys()
for key in keys:
metadata = self._stats_dict[key].metadata
stats = self._stats_dict[key].statistics
metrics = self._create_subdataframe(metadata, key)
metrics["MPI Communication Efficiency"] = (
stats["Total Non-MPI Runtime"].loc[:, 1].max()
/ stats["Total Runtime"].max()
)
try:
metrics["MPI Serialisation Efficiency"] = (
stats["Total Non-MPI Runtime"].loc[:, 1].max()
/ stats["Ideal Runtime"].loc[:, 1].max()
)
except KeyError:
metrics["MPI Serialisation Efficiency"] = numpy.nan
try:
metrics["MPI Transfer Efficiency"] = (
stats["Ideal Runtime"].loc[:, 1].max()
/ stats["Total Runtime"].loc[:, 1].max()
)
except KeyError:
metrics["MPI Transfer Efficiency"] = numpy.nan
metrics["MPI Load balance"] = (
stats["Total Useful Computation"].loc[:, 1].mean()
/ stats["Total Useful Computation"].loc[:, 1].max()
)
metrics["Parallel Efficiency"] = (
stats["Total Useful Computation"].mean()
/ stats["Total Runtime"].max() # avg all threads to include Amdahl
)
metrics["IPC Scaling"] = (
stats["Useful Instructions"].sum() / stats["Useful Cycles"].sum()
) / (
self._stats_dict[ref_key].statistics["Useful Instructions"].sum()
/ self._stats_dict[ref_key].statistics["Useful Cycles"].sum()
)
metrics["Instruction Scaling"] = (
self._stats_dict[ref_key].statistics["Useful Instructions"].sum()
/ stats["Useful Instructions"].sum()
)
metrics["Frequency Scaling"] = (
stats["Useful Cycles"].sum() / stats["Total Useful Computation"].sum()
) / (
self._stats_dict[ref_key].statistics["Useful Cycles"].sum()
/ self._stats_dict[ref_key].statistics["Total Useful Computation"].sum()
)
metrics["Computation Scaling"] = (
self._stats_dict[ref_key].statistics["Total Useful Computation"].sum()
/ stats["Total Useful Computation"].sum()
)
metrics["Global Efficiency"] = (
metrics["Computation Scaling"] * metrics["Parallel Efficiency"]
)
metrics["Speedup"] = (
self._stats_dict[ref_key].statistics["Total Runtime"].max()
/ stats["Total Runtime"].max()
)
metrics["Runtime"] = stats["Total Runtime"].max()
metrics_by_key[key] = metrics
self._metric_data = pandas.concat(metrics_by_key.values()) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/paper-listbox/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/paper-listbox/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/NeodroidAgent-0.4.8-py36-none-any.whl/neodroidagent/agents/numpy_agents/evolutionary/exclude/akro/box.py | import gym.spaces
import numpy as np
from akro import tf, theano
from akro.requires import requires_tf, requires_theano
from akro.space import Space
class Box(gym.spaces.Box, Space):
"""A box in R^n.
Each coordinate is bounded above and below.
"""
@property
def flat_dim(self):
"""Return the length of the flattened vector of the space."""
return np.prod(self.low.shape)
@property
def bounds(self):
"""Return a 2-tuple containing the lower and upper bounds."""
return self.low, self.high
def flatten(self, x):
"""Return a flattened observation x.
Args:
x (:obj:'Iterable`): The object to flatten.
Returns:
np.ndarray: An array of x collapsed into one dimension.
"""
return np.asarray(x).flatten()
def unflatten(self, x):
"""Return an unflattened observation x.
Args:
x (:obj:`Iterable`): The object to unflatten.
Returns:
np.ndarray: An array of x in the shape of self.shape.
"""
return np.asarray(x).reshape(self.shape)
def flatten_n(self, obs):
"""Return flattened observations obs.
Args:
obs (:obj:`Iterable`): The object to reshape and flatten
Returns:
np.ndarray: An array of obs in a shape inferred by the size of
its first element.
"""
return np.asarray(obs).reshape((len(obs), -1))
def unflatten_n(self, obs):
"""Return unflattened observation of obs.
Args:
obs (:obj:`Iterable`): The object to reshape and unflatten
Returns:
np.ndarray: An array of obs in a shape inferred by the size of
its first element and self.shape.
"""
return np.asarray(obs).reshape((len(obs), ) + self.shape)
def concat(self, other):
"""Concatenate with another Box space.
Note that the dimension of both boxes will be flatten.
Args:
other (Box): A space to be concatenated with this space.
Returns:
Box: A concatenated space.
"""
assert isinstance(other, Box)
first_lb, first_ub = self.bounds
second_lb, second_ub = other.bounds
first_lb, first_ub = first_lb.flatten(), first_ub.flatten()
second_lb, second_ub = second_lb.flatten(), second_ub.flatten()
return Box(np.concatenate([first_lb, second_lb]),
np.concatenate([first_ub, second_ub]))
def __hash__(self):
"""Hash the Box Space.
Returns:
int: A hash of the low, high, and shape of the Box.
Only the first element of low and high are hashed because numpy
ndarrays can't be hashed. When a Box is created the low and high
bounds are duplicated across the shape of the arrays so any of the
values will suffice for the hash. The shape of the Box is added
for uniqueness.
"""
return hash((self.low[0][0], self.high[0][0], self.shape))
@requires_tf
def to_tf_placeholder(self, name, batch_dims):
"""Create a tensor placeholder from the Space object.
Args:
name (str): name of the variable
batch_dims (:obj:`list`): batch dimensions to add to the
shape of the object.
Returns:
tf.Tensor: Tensor object with the same properties as
the Box where the shape is modified by batch_dims.
"""
return tf.compat.v1.placeholder(dtype=self.dtype,
shape=[None] * batch_dims +
list(self.shape),
name=name)
@requires_theano
def to_theano_tensor(self, name, batch_dims):
"""Create a theano tensor from the Space object.
Args:
name (str): name of the variable
batch_dims (:obj:`list`): batch dimensions to add to the
shape of the object.
Returns:
theano.tensor.TensorVariable: Tensor object with the
same properties as the Box where the shape is
modified by batch_dims.
"""
return theano.tensor.TensorType(self.dtype,
(False, ) * (batch_dims + 1))(name) | PypiClean |
/LongTermBiosignals-1.0.0.tar.gz/LongTermBiosignals-1.0.0/src/biosignals/sources/Bitalino.py |
# ===================================
# IT - LongTermBiosignals
# Package: biosignals
# Module: Bitalino
# Description: Class Bitalino, a type of BiosignalSource, with static procedures to read and write datafiles from
# any Bitalino device.
# Contributors: João Saraiva, Mariana Abreu
# Created: 25/04/2022
# Last Updated: 22/07/2022
# ===================================
import ast
from json import load, dump
from os import listdir, path, getcwd, access, R_OK
import numpy as np
from dateutil.parser import parse as to_datetime
from biosignals.modalities.ACC import ACC
from biosignals.modalities.ECG import ECG
from biosignals.modalities.EDA import EDA
from biosignals.modalities.EMG import EMG
from biosignals.modalities.PPG import PPG
from biosignals.modalities.RESP import RESP
from biosignals.sources.BiosignalSource import BiosignalSource
from biosignals.timeseries.Timeseries import Timeseries
class Bitalino(BiosignalSource):
def __init__(self):
super().__init__()
def __str__(self):
return "Bitalino"
def __aux_date(header):
"""
Get starting time from header
"""
time_key = [key for key in header.keys() if 'time' in key][0]
try:
return to_datetime(header['date'].strip('\"') + ' ' + header[time_key].strip('\"'))
except Exception as e:
print(e)
def __check_empty(len_, type=''):
"""
Confirm if the length is acceptable and return the desired output
"""
if type == 'file_size':
if len_ <= 50:
return True
else:
if len_ < 1:
return True
return False
def __change_sens_list(sens, device, channels):
"""
Confirm if the list of sensors has only RAW as labels, and ask the user for new labels in that case.
"""
if list(set(sens)) == ['RAW']:
print(f'Please update sens according to the sensors used:')
analogs = channels[-len(sens):]
for se in range(len(sens)):
new_se = str(input(f'{device} -- {sens[se]} -- {analogs[se]}')).upper()
sens[se] = new_se
return sens
def __analog_idx(header, sensor, **options):
"""
From a header choose analog sensor key idx that correspond to a specific sensor.
This also runs read json to save configurations to facilitate implementation
This function leads with several devices and it returns a list that may contain one or several integers
"""
sensor_idx, sensor_names, json_bool, chosen_device = [], [], False, ''
# if options and json key, get json to calculate
if options:
if 'json' in options.keys():
json_bool = options['json']
json_dir = options['json_dir'] if 'json_dir' in options.keys() \
else path.join(getcwd(), 'bitalino.json')
len_ch = 0
for device in header.keys():
chosen_device = device
sens_id = ''
# iterate over each device
if json_bool:
sens, ch, location = Bitalino.__read_json(json_dir, header[device])
else:
sens = header[device][str(input(f'What is the header key of sensor names? {header}\n ')).strip().lower()]
ch = header[device][str(input(f'What is the header key for analog channels? {header}\n ')).strip().lower()]
location = str(input(f'What is the body location of this device {device}? \n'))
sens = Bitalino.__change_sens_list(sens, device, ch)
analogs = ch[-len(sens):]
if sensor in str(sens):
# add other column devices as offset to the column to retrieve
location_bool = True
if 'location' in options.keys():
if location.lower() not in options['location'].lower():
location_bool = False
sens_id = [lab + '_' + location for lab in sens if sensor in lab.upper() and location_bool]
sensor_idx += [len_ch + ch.index(analogs[sens.index(sid.split('_')[0])]) for sid in sens_id]
if sens_id != '':
chosen_device = device
len_ch = len(ch)
sensor_names += sens_id
return sensor_idx, sensor_names, chosen_device
def __read_json(dir_, header):
# check if bitalino json exists and returns the channels and labels and location
if path.isfile(dir_) and access(dir_,
R_OK):
# checks if file exists
with open(dir_, 'r') as json_file:
json_string = load(json_file)
else:
print("Either file is missing or is not readable, creating file...")
json_string = {}
if 'device connection' in header.keys():
device = header['device connection']
else:
device = input('Enter device id (string): ')
if device not in json_string.keys():
json_string[device] = {}
for key in ['column', 'label', 'firmware version', 'device', 'resolution', 'channels', 'sensor', 'location']:
if key not in json_string[device].keys():
if key in header.keys():
json_string[device][key] = header[key]
else:
print(header['device connection'], header['label'])
new_info = str(input(f'{key}: ')).lower()
json_string[device][key] = new_info
if key == 'label':
sens = Bitalino.__change_sens_list(json_string[device]['label'], device, header['column'])
json_string[device][key] = sens
with open(dir_, 'w') as db_file:
dump(json_string, db_file, indent=2)
return json_string[device]['label'], json_string[device]['column'], json_string[device]['location']
# @staticmethod
def __read_bit(list_, metadata=False, sensor_idx=[], sensor_names=[], device='', **options):
"""
Reads one edf file
Args:
list_ (list): contains the file path in index 0 and sensor label in index 1
metadata (bool): defines whether only metadata or actual timeseries values should be returned
sensor_idx (list): list of indexes that correspond to the columns of sensor to extract
sensor_names (list): list of names that correspond to the sensor label
ex: sensor='ECG', sensor_names=['ECG_chest']
ex: sensor='ACC', options['location']='wrist', sensor_names=['ACCX_wrist','ACCY_wrist','ACCZ_wrist']
device (str): device MacAddress, this is used to get the specific header, specially when using 2 devices
**options (dict): equal to _read arg
Returns:
if metadata: sensor_idx (list), sensor_names (list), device (str), header (dict)
else: sensor_data (array): 2-dimensional array of time over sensors columns
date (datetime): initial datetime of array
Raises:
IOError: if sensor_names is empty, meaning no channels could be retrieved for chosen sensor
"""
dirfile = list_[0]
sensor = list_[1]
# size of bitalino file
file_size = path.getsize(dirfile)
if file_size <= 50:
if metadata:
return {}
else:
return '', []
with open(dirfile) as fh:
next(fh)
header = next(fh)[2:]
next(fh)
# signal
data = np.array([line.strip().split() for line in fh], float)
# if file is empty, return
if Bitalino.__check_empty(len(data)):
return None
header = ast.literal_eval(header)
if len(sensor_idx) < 1:
sensor_idx, sensor_names, device = Bitalino.__analog_idx(header, sensor, **options)
if metadata:
return sensor_idx, sensor_names, device, header[device]
if len(sensor_names) > 0:
sensor_data = data[:, sensor_idx]
date = Bitalino.__aux_date(header[device])
return sensor_data, date
else:
raise IOError(f"Sensor {sensor} was not found in this acquisition, please insert another")
@staticmethod
def _read(dir, type, startkey='A20', **options):
"""Reads multiple EDF/EDF+ files on the directory 'path' and returns a Biosignal associated with a Patient.
Args:
dir (str): directory that contains bitalino files in txt format
type (Biosignal): type of biosignal to extract can be one of ECG, EDA, PPG, RESP, ACC and EMG
startkey (str): default is A20. the key that appears in all bitalino file names to extract from directory
**options (dict): only the keys json, json_dir and location are being evaluated.
options[json] (bool): if the user wants to use a json to save and load bitalino configurations
options[json_dir] (str): directory to json file. If not defined, a default will be set automatically
options[location] (str): if given, only the devices with that body location will be retrieved
Returns:
dict: A dictionary where keys are the sensors associated to the Biosignal with a Timeseries to each key
Raises:
IOError: if the Biosignal is not one of the ones mentioned
IOError: if the list of bitalino files from dir returns empty
IOError: if header is still empty after going through all Bitalino files
"""
sensor = 'ECG' if type is ECG else 'EDA' if type is EDA else 'PPG' if type is PPG else 'ACC' if type is ACC else 'PZT' if type is RESP else 'EMG' if type is EMG else ''
if sensor == '':
raise IOError(f'Type {type} does not have label associated, please insert one')
# first a list is created with all the filenames that end in .edf and are inside the chosen dir
# this is a list of lists where the second column is the type of channel to extract
all_files = sorted([[path.join(dir, file), sensor] for file in listdir(dir) if startkey in file])
# get header and sensor positions by running the bitalino files until a header is found
if not all_files:
raise IOError(f'No files in dir="{dir}" that start with {startkey}')
header, h = {}, 0
while len(header) < 1:
ch_idx, channels, device, header = Bitalino.__read_bit(all_files[h], metadata=True, **options)
h += 1
if header == {}:
raise IOError(f'The files in {dir} did not contain a bitalino type {header}')
new_dict = {}
segments = [Bitalino.__read_bit(file, sensor_idx=ch_idx, sensor_names=channels, device=device, **options)
for file in all_files[h-1:]]
for ch, channel in enumerate(channels):
samples = {segment[1]: segment[0][:, ch] for segment in segments if segment}
if len(samples) > 1:
new_timeseries = Timeseries.withDiscontiguousSegments(samples, sampling_frequency=header['sampling rate'],
name=channels[ch])
else:
new_timeseries = Timeseries(tuple(samples.values())[0], tuple(samples.keys())[0], header['sampling rate'],
name=channels[ch])
new_dict[channel] = new_timeseries
return new_dict
@staticmethod
def _write(dir, timeseries):
'''Writes multiple TXT files on the directory 'path' so they can be opened in Opensignals.'''
# TODO
@staticmethod
def _transfer(samples, to_unit):
pass | PypiClean |
/NotionLab-1.1.4-py3-none-any.whl/notion_lab/converter/MDCvt.py | import textwrap
from . import Converter, HtmlCvt
from .util.html.elements import Toggle, TableRow
from .util.md import div
class MDCvt(Converter):
_md: str = ""
_numbered_list_counter: int = 1
def __init__(
self,
api_token: str,
block_id: str,
is_page: bool = False,
has_column_header: bool = False
):
super().__init__(api_token, block_id, is_page, has_column_header)
def convert(self) -> str:
for block in self._ctx:
b_type = block["type"]
b_ctx = block[f"{b_type.lower()}"]
b_id = block["id"]
if b_type != "numbered_list_item":
self._numbered_list_counter = 1
if b_type == "heading_1":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"# {r}\n\n"
elif b_type == "heading_2":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"## {r}\n\n"
elif b_type == "heading_3":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"### {r}\n\n"
elif b_type == "paragraph":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"{r}\n\n"
elif b_type == "code":
r = ""
for child in b_ctx["rich_text"]:
r += child["text"]["content"]
self._md += textwrap.dedent("""
```{0}
{1}
```
""").format(b_ctx["language"], r) + "\n"
elif b_type == "image":
if b_ctx["type"] == "file":
self._md += f'\n'
elif b_ctx["type"] == "external":
self._md += f'\n'
elif b_type == "divider":
self._md += "---\n\n"
elif b_type == "quote":
r = ""
for child in b_ctx["rich_text"]:
r += child["text"]["content"]
self._md += f"> {r}\n"
elif b_type == "bulleted_list_item":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"- {r}\n"
elif b_type == "numbered_list_item":
r = ""
for child in b_ctx["rich_text"]:
r += div(child)
self._md += f"{self._numbered_list_counter}. {r}\n"
self._numbered_list_counter += 1
elif b_type == "table":
r: str = HtmlCvt(
api_token=self._api_token,
block_id=b_id,
is_page=False,
is_table=True,
has_column_header=b_ctx["has_column_header"]
).convert()
self._md += f'<table>{r}</table>'
elif b_type == "table_row":
self._md += TableRow(b_ctx).export()
elif b_type == "toggle":
r: str = MDCvt(
api_token=self._api_token,
block_id=b_id,
is_page=False
).convert()
self._md += Toggle(b_ctx, details=r).export()
return self._md | PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/lib/openid.py | import hashlib
import cPickle
from base64 import b64encode, b64decode
import binascii
import hmac
import os
import time
import urlparse
import urllib
from linotp.lib.crypto import urandom
from linotp.lib.user import User
from linotp.lib.user import getUserId
from linotp.lib.user import getUserInfo
from linotp.lib.realm import getDefaultRealm
from hashlib import sha1
from sqlalchemy import create_engine
from pylons import config
import logging
log = logging.getLogger(__name__)
_DEFAULT_MOD = """
DCF93A0B883972EC0E19989AC5A2CE310E1D37717E8D9571BB7623731866E61E
F75A2E27898B057F9891C2E27A639C3F29B60814581CD3B2CA3986D268370557
7D45C2E7E52DC81C7A171876E5CEA74B1448BFDFAF18828EFD2519F14E45E382
6634AF1949E5B535CC829A483B8A76223E5D490A257F05BDFF16F2FB22C583AB
"""
_DEFAULT_MOD = long("".join(_DEFAULT_MOD.split()), 16)
_DEFAULT_GEN = 2
_PROTO_2 = "http://specs.openid.net/auth/2.0"
_PROTO_1 = "http://openid.net/signon/1.1"
OPENID_1_0_NS = 'http://openid.net/xmlns/1.0'
OPENID_IDP_2_0_TYPE = 'http://specs.openid.net/auth/2.0/server'
OPENID_2_0_TYPE = 'http://specs.openid.net/auth/2.0/signon'
OPENID_1_1_TYPE = 'http://openid.net/signon/1.1'
OPENID_1_0_TYPE = 'http://openid.net/signon/1.0'
def xor(x, y):
if len(x) != len(y):
raise ValueError('Inputs to strxor must have the same length')
xor = lambda (a, b): chr(ord(a) ^ ord(b))
return "".join(map(xor, zip(x, y)))
def randchar():
import string
chars = string.letters + string.digits
return urandom.choice(chars)
def get_nonce():
now = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
rand_chars = ''.join([randchar() for i in range(6)])
res = now + rand_chars
return res
def btwoc(value):
res = cPickle.dumps(value, 2)
return res[3 + ord(res[3]):3:-1]
def unbtwoc(value):
load = chr(len(value)) + value[::-1] + '.'
return cPickle.loads('\x80\x02\x8a' + load)
def create_handle(assoc_type):
"""Creates an association handle.
Args:
assoc_type: HMAC-SHA1 or HMAC-SHA256
Returns:
secret_b64, association handle
"""
if assoc_type == 'HMAC-SHA1':
size = 20
elif assoc_type == 'HMAC-SHA256':
size = 32
else:
raise NotImplementedError(assoc_type)
secret_b64 = b64encode(os.urandom(size))
uniq = b64encode(os.urandom(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
return secret_b64, handle
def get_dh_key(pubkey, session_type, secret_b64, gen=None, mod=None):
"""Returns a Diffie-Hellman encoded key
Args:
- the public key of the other side
- session_type: DH-SHA1 or DH-SHA256
- secret_b64: the shared secret, base 64 encoded
- gen: generator. default to 2
- mod: modulus, default to the default openid prime
Return: base64(crypted(pubkey) xor mac_key), btwoc(pub)
"""
if mod is None:
mod = _DEFAULT_MOD
if gen is None:
gen = _DEFAULT_GEN
# building the DH signature
dh_private = urandom.randrange(1, mod - 1)
dh_public = pow(gen, dh_private, mod)
dh_shared = btwoc(pow(pubkey, dh_private, mod))
if session_type == 'DH-SHA1':
crypt = lambda x: hashlib.sha1(x).digest()
else:
crypt = lambda x: hashlib.sha256(x).digest()
dh_shared = crypt(dh_shared)
mac_key = xor(b64decode(secret_b64), dh_shared)
return b64encode(mac_key), b64encode(btwoc(dh_public))
############################## Database tables and models ####################
from sqlalchemy import schema
from sqlalchemy import types
from sqlalchemy import orm
from sqlalchemy import and_
metadata = schema.MetaData()
openid_redirects_table = schema.Table('openid_redirects', metadata,
schema.Column('token', types.Unicode(255), primary_key=True),
schema.Column('url', types.Text(), default=u''),
schema.Column('site', types.Text(), default=u''),
schema.Column('handle', types.Text(), default=u'')
)
openid_handles_table = schema.Table('openid_handles', metadata,
schema.Column('handler', types.Unicode(255), primary_key=True),
schema.Column('secret', types.Text(), default=u''),
schema.Column('assoc_type', types.Text(), default=u''),
schema.Column('private', types.Boolean(), default=False)
)
openid_sites_table = schema.Table('openid_sites', metadata,
schema.Column('id', types.Integer,
schema.Sequence('openid_sites_seq_id', optional=True),
primary_key=True),
schema.Column('handle', types.Unicode(255)),
schema.Column('site', types.Text(), default=u'')
)
openid_user_table = schema.Table('openid_user', metadata,
schema.Column('user', types.Unicode(255), primary_key=True),
schema.Column('token', types.Text(), default=u''),
schema.Column('expire', types.Integer, default=0, index=True)
)
openid_trusted_table = schema.Table('openid_trustedroot', metadata,
schema.Column('id', types.Integer,
schema.Sequence('openid_sites_seq_id', optional=True),
primary_key=True),
schema.Column('user', types.Unicode(255), default=u''),
schema.Column('site', types.Text(), default=u'')
)
class RedirectsTable(object):
def __init__(self, token="", url="", site="",
handle=""):
log.debug("creating RedirectsTable object: token=%r, url=%r, site=%r,"
" handle=%r" % (token, url, site, handle))
self.token = token
self.url = url
self.site = site
self.handle = handle
class HandlesTable(object):
def __init__(self, handler="", secret_b64="", assoc_type="",
private=False):
log.debug("creating Handles object: handler=%r, "
"secret_b64=%r, assoc_type=%r" %
(handler, secret_b64, assoc_type))
self.handler = handler
# The .secret is the database column, which keeps the name
# "secret" for backward compatibility
self.secret = secret_b64
self.assoc_type = assoc_type
self.private = private
class SitesTable(object):
def __init__(self, handle="", site=""):
log.debug("creating SitesTable object: handle=%r, site=%r" %
(handle, site))
self.site = site
self.handle = handle
class UserTable(object):
def __init__(self, user, token, expire):
log.debug("creating UserTable object: user=%r, token=%r"
% (user, token))
self.user = user
self.token = token
self.expire = expire
class TrustedRootTable(object):
def __init__(self, user, site):
log.debug("creating TrustedRoot object: user=%r, site=%r"
% (user, site))
self.user = user
self.site = site
orm.mapper(RedirectsTable, openid_redirects_table)
orm.mapper(HandlesTable, openid_handles_table)
orm.mapper(SitesTable, openid_sites_table)
orm.mapper(UserTable, openid_user_table)
orm.mapper(TrustedRootTable, openid_trusted_table)
class SQLStorage(object):
def __init__(self):
connect_string = config.get("linotpOpenID.sql.url")
implicit_returning = config.get("linotpSQL.implicit_returning", True)
self.engine = None
if connect_string is None:
log.info("Missing linotpOpenID.sql.url parameter in "
"config file! Using the sqlalchemy.url")
# raise Exception("Missing linotpOpenID.sql.url parameter in "
# "config file!")
connect_string = config.get("sqlalchemy.url")
########################## SESSION ##################################
# Create an engine and create all the tables we need
if implicit_returning:
# If implicit_returning is explicitly set to True, we
# get lots of mysql errors:
# AttributeError: 'MySQLCompiler_mysqldb' object has no attribute
# 'returning_clause' So we do not mention explicit_returning at all
self.engine = create_engine(connect_string)
else:
self.engine = create_engine(connect_string,
implicit_returning=False)
metadata.bind = self.engine
metadata.create_all()
# Set up the session
self.sm = orm.sessionmaker(bind=self.engine, autoflush=True,
autocommit=False,
expire_on_commit=True)
self.session = orm.scoped_session(self.sm)
@classmethod
def get_name(self):
return 'SQLStorage'
def add_redirect(self, url, site, handle):
log.debug("[add_redirect] url=%r, site=%r, handle=%r"
% (url, site, handle))
token = sha1(url).hexdigest()
rd = RedirectsTable(
token=token,
url=url,
site=site,
handle=handle)
try:
self.session.add(rd)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error storing redirect!")
return token
def get_redirect(self, redirect_token):
log.debug("[get_redirect] redirect_token=%r" % redirect_token)
redirect = self.session.query(RedirectsTable).\
filter(RedirectsTable.token == redirect_token)
url = ""
site = ""
handle = ""
for r in redirect:
url = r.url
site = r.site
handle = r.handle
log.debug("[get_redirect] url=%r, site=%r, handle=%r" %
(url, site, handle))
return url, site, handle
def add_association(self, handler, secret_b64, assoc_type, private=False,
expires_in=None):
log.debug("[add_association] handler=%r, secret_b64=%r, assoc_type=%r,"
" expires_in=%r"
% (handler, secret_b64, assoc_type, expires_in))
ha = HandlesTable(handler=handler,
secret_b64=secret_b64,
assoc_type=assoc_type,
private=private)
try:
self.session.add(ha)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error storing association!")
def get_association(self, handler):
assoc = self.session.query(HandlesTable).\
filter(HandlesTable.handler == handler)
secret_b64 = ""
assoc_type = ""
private = False
for a in assoc:
secret_b64 = a.secret
assoc_type = a.assoc_type
private = a.private
return secret_b64, assoc_type, private
def del_association(self, handler):
try:
self.session.query(HandlesTable).\
filter(HandlesTable.handler == handler).\
delete(synchronize_session='fetch')
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error deleting association")
def add_site(self, site, handle):
si = SitesTable(site=site,
handle=handle)
try:
self.session.add(si)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error storing site")
def get_sites(self, handle):
site_list = []
sites = self.session.query(SitesTable).\
filter(SitesTable.handle == handle)
for site in sites:
site_list.append(site.site)
return site_list
def add_trusted_root(self, user, site):
tr = TrustedRootTable(user=user, site=site)
try:
self.session.add(tr)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error storing trusted root")
def get_trusted_roots(self, user):
root_list = []
roots = self.session.query(TrustedRootTable).\
filter(TrustedRootTable.user == user)
for root in roots:
root_list.append(root.site)
return root_list
def check_auth(self, handle, site):
sites = self.session.query(SitesTable).\
filter(and_(SitesTable.site == site,
SitesTable.handle == handle)).count()
return sites == 1
def _create_token(self, user):
seed = ""
for i in range(32):
seed += chr(urandom.randrange(0, 255))
token = binascii.hexlify(hashlib.sha1(seed).digest())
return token
def set_user_token(self, user, expire=3600):
'''
This function sets the token of the user. This is the token,
that is also stored in the cookie
:param user: the username
:param expire: the time in seconds, how long this token is valid.
This corresponds to the cookie lifetime.
'''
token = self._create_token(user)
try:
self.session.query(UserTable).filter(UserTable.user == user).\
delete(synchronize_session='fetch')
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error deleting user")
us = UserTable(user=user,
token=token,
expire=int(time.time()) + int(expire))
try:
self.session.add(us)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
log.error("Error storing user")
return token
def _expire_user_token(self, expire_time):
self.session.query(UserTable).\
filter(UserTable.expire < expire_time).\
delete(synchronize_session='fetch')
self.session.flush()
self.session.commit()
return
def get_user_token(self, user):
self._expire_user_token(expire_time=int(time.time()))
token = 0
qu_token = self.session.query(UserTable).filter(UserTable.user == user)
for tok in qu_token:
# Probably there is only one! ;-)
token = tok.token
return token
def get_user_by_token(self, token):
user = ""
qu_user = self.session.query(UserTable).\
filter(UserTable.token == token)
for u in qu_user:
user = u.user
return user
class IdResMessage(dict):
def __init__(self, storage, host, expires_in=3600, **params):
self.storage = config.get('openid_sql')
self.expires_in = expires_in
self.host = host
self['openid.ns'] = params.get('openid.ns', _PROTO_2)
self.identity = params.get('openid.identity')
user = self.identity.split('/')[-1]
self.user = user
self['openid.mode'] = 'id_res'
self['openid.identity'] = self.identity
self['openid.claimed_id'] = params.get('openid.identity')
self['openid.op_endpoint'] = self.host
return_to = self['openid.return_to'] = params.get('openid.return_to')
trust_root = params.get('openid.trust_root')
if trust_root is not None:
self['openid.trust_root'] = trust_root
handle = params.get('openid.assoc_handle')
stateless = handle is None
if stateless:
# dumb-mode, no association was created previously
# creating a private one
self['openid.assoc_handle'] = self._create_handle()
else:
self['openid.response_nonce'] = get_nonce()
signed = ['mode', 'identity', 'assoc_handle', 'return_to',
'sreg.nickname', 'claimed_id', 'op_endpoint',
'response_nonce']
if trust_root is not None:
signed.append('trust_root')
self.signed = signed
self['openid.assoc_handle'] = handle
site = params.get('openid.trust_root')
if site is None:
site = return_to
self.site = site.split('?')[0] # XXX
self['openid.sreg.nickname'] = user
def _dump(self):
me_string = ""
for key in self:
me_string += "%s:%s," % (key, self[key])
return me_string
def _create_handle(self):
client_ns = self['openid.ns']
if client_ns == _PROTO_1:
assoc_type = 'HMAC-SHA1'
else:
assoc_type = 'HMAC-SHA256'
secret_b64, handle = create_handle(assoc_type)
self.storage.add_association(handle, secret_b64,
assoc_type, private=True,
expires_in=self.expires_in)
self['openid.response_nonce'] = get_nonce()
signed = ['return_to', 'response_nonce', 'assoc_handle',
'claimed_id', 'identity', 'mode']
if client_ns == _PROTO_2:
self['openid.op_endpoint'] = self.host
signed.append('op_endpoint')
signed.append('ns')
if self.get('openid.trust_root') is not None:
signed.append('trust_root')
self.signed = signed
return handle
def get_url(self):
parsed = list(urlparse.urlparse(self['openid.return_to']))
old_query = urlparse.parse_qs(parsed[4])
for key, value in old_query.items():
if key in self:
continue
self[key] = value[0]
parsed[4] = urllib.urlencode(self)
return urlparse.urlunparse(parsed)
def store_site(self):
self.storage.add_site(self['openid.sreg.nickname'],
self.site, self['openid.assoc_handle'])
def store_redirect(self):
self.storage.session.commit()
return self.storage.add_redirect(self.get_url(),
self.site, self['openid.assoc_handle'])
def get_user_detail(self):
"""
get detail info about openid cookie owner
:return: tuple of (email,firstname,lastname,fullname)
"""
email = ""
fullname = ""
firstname = ""
lastname = ""
## search in userresolvers for user detail
user = self.user
if "@" not in user:
user = "%s@%s" % (user, getDefaultRealm())
login, realm = user.split('@')
usr = User(login, realm)
(userid, res_id, res_conf) = getUserId(usr)
usr_detail = getUserInfo(userid, res_id, res_conf)
if "email" in usr_detail:
email = usr_detail["email"]
if "givenname" in usr_detail:
firstname = usr_detail["givenname"]
if "surname" in usr_detail:
lastname = usr_detail["surname"]
if firstname and lastname:
fullname = "%s %s" % (firstname, lastname)
elif firstname:
fullname = "%s" % firstname
elif lastname:
fullname = "%s" % lastname
return (email, firstname, lastname, fullname)
def sign(self):
"""Signs the message -
calculate and add signature to self dict entry: 'openid.sig'
:return: - nothing -
"""
(email, firstname, lastname, fullname) = self.get_user_detail()
self.signed.append('ns')
self["openid.claimed_id"] = self["openid.identity"]
self.signed.append('claimed_id')
## add extension sreg info for std client
self["openid.ns.sreg"] = "http://openid.net/extensions/sreg/1.1"
self.signed.append('ns.sreg')
self["openid.sreg.email"] = email
self["openid.sreg.fullname"] = fullname
self.signed.append('sreg.email')
self.signed.append('sreg.fullname')
self.signed.append('sreg.nickname')
## add extension ax to transfer user information
self["openid.ns.ext1"] = "http://openid.net/srv/ax/1.0"
self.signed.append('ns.ext1')
self["openid.ext1.mode"] = "fetch_response"
self["openid.ext1.type.Email"] = ("http://schema.openid.net/"
"contact/email")
self["openid.ext1.value.Email"] = email
self["openid.ext1.type.FirstName"] = ("http://schema.openid.net/"
"namePerson/first")
self["openid.ext1.value.FirstName"] = firstname
self["openid.ext1.type.LastName"] = ("http://schema.openid.net/"
"namePerson/last")
self["openid.ext1.value.LastName"] = lastname
self.signed.append('ext1.mode')
self.signed.append('ext1.type.Email')
self.signed.append('ext1.value.Email')
self.signed.append('ext1.type.FirstName')
self.signed.append('ext1.value.FirstName')
self.signed.append('ext1.type.LastName')
self.signed.append('ext1.value.LastName')
sorted_sign = sorted(set(self.signed))
self['openid.signed'] = ','.join(sorted_sign)
# collecting fields to sign
fields = []
for field in sorted_sign:
value = self['openid.' + field]
fields.append(u'%s:%s\n' % (field, value))
fields = unicode(''.join(fields))
# getting the handle
mac_key, assoc_type = self._get_association()
# picking the hash type
if assoc_type == 'HMAC-SHA256':
crypt = hashlib.sha256
else:
crypt = hashlib.sha1
# signing the message
hash = hmac.new(b64decode(mac_key), fields, crypt)
self['openid.sig'] = b64encode(hash.digest())
def _get_association(self):
"""
getting the association handle
:return: message auth and assoc_type
"""
handle = self.get('openid.assoc_handle')
try:
mac_key, assoc_type, __ = self.storage.get_association(handle)
except KeyError:
# handle expired or not existing, switching to dumb mode
self['openid.invalidate_handle'] = handle
handle = self['openid.assoc_handle'] = self._create_handle()
mac_key, assoc_type, __ = self.storage.get_association(handle)
return mac_key, assoc_type
def check_authentication(**params):
"""
"""
storage = config.get('openid_sql')
site = params.get('openid.trust_root')
if site is None:
site = params.get('openid.return_to')
site = site.split('?')[0] # XXX
handle = params.get('openid.assoc_handle')
result = ['openid_mode:id_res\n']
ret = storage.check_auth(handle, site)
if ret:
result.append('is_valid:true\n')
storage.del_association(handle)
else:
result.append('is_valid:false\n')
return ''.join(result)
def create_association(storage, expires_in=3600, **params):
"""
"""
assoc_type = params['openid.assoc_type']
session_type = params['openid.session_type']
# creating association info
secret_b64, assoc_handle = create_handle(assoc_type)
res = {'ns': 'http://specs.openid.net/auth/2.0',
'assoc_handle': assoc_handle,
'session_type': session_type,
'assoc_type': assoc_type,
'expires_in': unicode(expires_in)}
if session_type in ('DH-SHA1', 'DH-SHA256'):
dh_pub = b64decode(params['openid.dh_consumer_public'])
dh_pub = unbtwoc(dh_pub)
if 'openid.dh_gen' in params:
dh_gen = b64decode(params['openid.dh_gen'])
dh_gen = unbtwoc(dh_gen)
else:
dh_gen = None
if 'openid.dh_modulus' in params:
dh_modulus = b64decode(params['openid.dh_modulus'])
dh_modulus = unbtwoc(dh_modulus)
else:
dh_modulus = None
# building the DH signature
key, serv_pub = get_dh_key(dh_pub, session_type,
secret_b64, dh_gen, dh_modulus)
res['dh_server_public'] = serv_pub
res['enc_mac_key'] = key
elif session_type == 'no-encryption':
res['mac_key'] = secret_b64
storage.add_association(assoc_handle, secret_b64,
assoc_type, False, expires_in)
res = ['%s:%s' % (key, value) for key, value in res.items()]
return '\n'.join(res) + "\n" | PypiClean |
/Ciw-3.0.0.tar.gz/Ciw-3.0.0/docs/Reference/state_trackers.rst | .. _refs-statetrackers:
==================================
List of Implemented State Trackers
==================================
Currently Ciw has the following state trackers:
- :ref:`population`
- :ref:`nodepop`
- :ref:`nodepopsubset`
- :ref:`nodeclssmatrix`
- :ref:`naiveblock`
- :ref:`matrixblock`
.. _population:
----------------------------
The SystemPopulation Tracker
----------------------------
The SystemPopulation Tracker records the number of customers in the whole system, regardless of which node they are at.
States take the form of a number::
4
This denotes that there are four customers in the system.
The Simulation object takes in the optional argument :code:`tracker` used as follows::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.SystemPopulation()) # doctest:+SKIP
.. _nodepop:
--------------------------
The NodePopulation Tracker
--------------------------
The NodePopulation Tracker records the number of customers at each node.
States take the form of list of numbers. An example for a three node queueing network is shown below::
(2, 0, 5)
This denotes that there are two customers at the first node, no customers at the second node, and five customers at the third node.
The Simulation object takes in the optional argument :code:`tracker` used as follows::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.NodePopulation()) # doctest:+SKIP
.. _nodepopsubset:
--------------------------------
The NodePopulationSubset Tracker
--------------------------------
The NodePopulationSubset Tracker, similar to the NodePopulation Tracker, records the number of customers at each node. However this allows users to only track a subset of the nodes in the system.
States take the form of list of numbers. An example of tracking a three node queueing network is shown below::
(2, 0, 5)
This denotes that there are two customers at the first observed node, no customers at the second observed node, and five customers at the third observed node.
The Simulation object takes in the optional argument :code:`tracker`, which takes an argument :code:`observed_nodes` a list of node numbers to observe, used as follows (observing the first, second, and fifth nodes)::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.NodePopulationSubset([0, 1, 4])) # doctest:+SKIP
.. _nodeclssmatrix:
---------------------------
The NodeClassMatrix Tracker
---------------------------
The NodeClassPopulation Tracker records the number of customers at each node, split by customer class.
States take the form of matrix, that is a list of lists, where the rows denote the nodes and the columns denote the customer classes. An example for a three node queueing network with two customer classes is shown below::
((3, 0),
(0, 1),
(4, 1))
This denotes that there are:
+ Three customers at the first node - three of Class 0, and none of Class 1
+ One customer at the second node - none of Class 0, and one of Class 1
+ Five customers at the third node - four of Class 0, and one of Class 1.
The Simulation object takes in the optional argument :code:`tracker`, which takes an argument :code:`class_ordering`, an ordered list of customerclass names to order the customer classes, used as follows::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.NodeClassMatrix(['Class 0', 'Class 1'])) # doctest:+SKIP
.. _naiveblock:
-------------------------
The NaiveBlocking Tracker
-------------------------
The NaiveBlocking Tracker records the number of customers at each node, and how many of those customers are currently blocked.
An example for a four node queueing network is shown below::
((3, 0), (1, 4), (10, 0), (8, 1))
This denotes 3 customers at the first node, 0 of which are blocked; 5 customers at the second node, 4 of which are blocked; 10 customers at the third node, 0 of which are blocked; and 9 customers at the fourth node, 1 of which are blocked.
The Simulation object takes in the optional argument :code:`tracker` used as follows::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.NaiveBlocking()) # doctest:+SKIP
.. _matrixblock:
--------------------------
The MatrixBlocking Tracker
--------------------------
The MatrixBlocking Tracker records the order and destination of blockages in the form of a matrix.
Alongside this the number of customers at each node is tracked.
The first component, a matrix, lists the blockages from row node to column node.
The entries are lists of all blockages of this type, and the numbers within denote the order at which these become blocked.
An example for a four node queueing network is shown below::
( ( ( (), (), (), () ),
( (), (1, 4), (), (2) ),
( (), (), (), () ),
( (3), (), (), () ) ),
(3, 5, 10, 9) )
This denotes:
+ 3 customers at the first node
+ 5 customers at the second node
+ 10 customers at the third node
+ 9 customers at the fourth node
It also tells us the order and destination of the blockages:
+ Of the customers blocked, the first to be blocked was at node 2 to node 2
+ The second was at node 2 to node 4
+ The third was at node 4 to node 1
+ The fourth was at node 2 to node 2.
The Simulation object takes in the optional argument :code:`tracker` used as follows::
>>> Q = ciw.Simulation(N, tracker=ciw.trackers.MatrixBlocking()) # doctest:+SKIP
| PypiClean |
/BioFlow-0.2.3.tar.gz/BioFlow-0.2.3/bioflow/utils/log_behavior.py | import os
from os import path
import logging
import logging.handlers
import sys
from shutil import rmtree
log_location = path.join(path.abspath(
path.join(path.dirname(__file__), os.pardir)), 'logs')
on_unittest = os.environ.get('UNITTESTING') == 'True' # if we are unittesting
on_remote_unittest = os.environ.get('REMOTE_UNITTEST') == 'True' # if we are testing on CI tools
on_remote = os.environ.get('REMOTE') == 'True'
on_dev = False
# on_dev = True
# #################################
# redirecting all to a log file
# if on_remote = True:
# f = open('../logs/Commander_logs.log','w')
# sys.stdout = f
# ################################
def mkdir_recursive(my_path): # pragma: no cover
"""
Copy of mkdir recursive from saner configs, used here to remove circular dependencies
Recursively creates a directory that would contain a file given win-like filename (xxx.xxx)
or directory name
:param my_path:
:return:
"""
my_path = os.path.abspath(my_path)
directory_name = os.path.dirname(my_path)
if not os.path.exists(directory_name):
mkdir_recursive(directory_name)
if not os.path.exists(my_path):
if '.' not in my_path.split(
'/')[-1][-5:]: # should be able to suppress specific file creation
os.mkdir(my_path)
def wipe_dir(_path): # pragma: no cover
"""
wipes the indicated directory
:param _path:
:return: True on success
"""
_path = os.path.abspath(_path)
if not os.path.exists(_path):
return True # Nothing to do: destruction already done
if os.path.isdir(_path):
directory_name = _path
else:
directory_name = os.path.dirname(_path)
if not os.path.isdir(directory_name):
return False
for sub_path in os.listdir(directory_name):
if os.path.isdir(sub_path):
return False
rmtree(directory_name)
return True
def add_handler(_logger, level, file_name, rotating=False):
"""
Adds a file-writing handler for the log.
:param _logger:
:param level: logging.DEBUG or other level
:param file_name: short file name, that will be stored within the application logs location
:param rotating: if true, rotating file handler will be added.
:return:
"""
handler_name = os.path.join(log_location, file_name)
if rotating:
_fh = logging.handlers.RotatingFileHandler(handler_name, maxBytes=1e7, backupCount=3)
else:
_fh = logging.FileHandler(handler_name, mode='a')
_fh.setLevel(level)
_fh.setFormatter(formatter)
_logger.addHandler(_fh)
# define a formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if on_dev:
wipe_dir(log_location)
# create location where the code will be stored
mkdir_recursive(log_location)
def get_logger(logger_name):
"""
Returns a properly configured logger object
:param logger_name: name of the logger object
"""
_logger = logging.getLogger(logger_name)
_logger.setLevel(logging.DEBUG)
add_handler(_logger, logging.DEBUG, 'debug.log', rotating=True)
add_handler(_logger, logging.INFO, 'info.log')
add_handler(_logger, logging.WARNING, 'warning.log')
add_handler(_logger, logging.ERROR, 'error.log')
add_handler(_logger, logging.CRITICAL, 'critical.log')
if not on_remote_unittest: # pragma: no cover
ch = logging.StreamHandler(sys.stderr)
if on_unittest or on_dev:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
_logger.addHandler(ch)
return _logger
logger = get_logger('this_logger_needs_to_be_renamed')
def clear_logs():
"""
Wipes the logs
"""
wipe_dir(log_location) | PypiClean |
/OASYS1-1.2.130.tar.gz/OASYS1-1.2.130/oasys/application/addons.py | import sys
import sysconfig
import os
import logging
import re
import errno
import shlex
import subprocess
import itertools
import json
import traceback
import concurrent.futures
from collections import namedtuple, deque
from xml.sax.saxutils import escape
from distutils import version
import pkg_resources
import requests
try:
import docutils.core
except ImportError:
docutils = None
from PyQt5.QtWidgets import (
QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView,
QTextBrowser, QDialogButtonBox, QProgressDialog,
QVBoxLayout, QStyle, QStyledItemDelegate, QStyleOptionViewItem,
QApplication, QHBoxLayout, QPushButton, QFormLayout
)
from PyQt5.QtGui import (
QStandardItemModel, QStandardItem, QPalette, QTextOption
)
from PyQt5.QtCore import (
QSortFilterProxyModel, QItemSelectionModel,
Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG,
QSettings)
from PyQt5.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
from urllib.request import urlopen
from orangecanvas.gui.utils import message_warning, message_information, \
message_critical as message_error
from orangecanvas.help.manager import get_dist_meta, trim, parse_meta
from orangecanvas.resources import package_dirname
PYPI_API_JSON = "https://pypi.org/pypi/{name}/json"
# read add-on list
OFFICIAL_ADDONS = [a.strip() for a in open(os.path.join(package_dirname("oasys.application"), "data", "OFFICIAL_ADDONS.txt"), "rt")]
OFFICIAL_ADDONS = [a for a in OFFICIAL_ADDONS if a]
# query PyPI
official_addons_list = []
is_auto_update = True
try:
for package in OFFICIAL_ADDONS:
r = urlopen(PYPI_API_JSON.format(name=package)).read().decode("utf-8")
p = json.loads(r)
p["releases"] = p["releases"][p["info"]["version"]] # load only the last version
official_addons_list.append(p)
except:
is_auto_update = False
OFFICIAL_ADDON_LIST = "https://raw.githubusercontent.com/oasys-kit/oasys-addons/master/list"
OFFICIAL_ADDON_LIST_ALTERNATIVE = "https://rawcdn.githack.com/oasys-kit/oasys-addons/91dbd16c78f2ce42f4abe65e72c17abe064e0520/list"
log = logging.getLogger(__name__)
Installable = namedtuple(
"Installable",
["name",
"version",
"summary",
"description",
"package_url",
"release_urls"]
)
ReleaseUrl = namedtuple(
"ReleaseUrl",
["filename",
"url",
"size",
"python_version",
"package_type"
]
)
Available = namedtuple(
"Available",
["installable"]
)
Installed = namedtuple(
"Installed",
["installable",
"local"]
)
def is_updatable(item):
if isinstance(item, Available) or item.installable is None:
return False
inst, dist = item
try:
return version.StrictVersion(dist.version) < version.StrictVersion(inst.version)
except ValueError:
return version.LooseVersion(dist.version) < version.LooseVersion(inst.version)
class TristateCheckItemDelegate(QStyledItemDelegate):
"""
A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check
state transitions on user interaction.
"""
def editorEvent(self, event, model, option, index):
flags = model.flags(index)
if not flags & Qt.ItemIsUserCheckable or \
not option.state & QStyle.State_Enabled or \
not flags & Qt.ItemIsEnabled:
return False
checkstate = model.data(index, Qt.CheckStateRole)
if checkstate is None:
return False
widget = option.widget
style = widget.style() if widget else QApplication.style()
if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease,
QEvent.MouseButtonDblClick}:
pos = event.pos()
opt = QStyleOptionViewItem(option)
self.initStyleOption(opt, index)
rect = style.subElementRect(
QStyle.SE_ItemViewItemCheckIndicator, opt, widget)
if event.button() != Qt.LeftButton or not rect.contains(pos):
return False
if event.type() in {QEvent.MouseButtonPress,
QEvent.MouseButtonDblClick}:
return True
elif event.type() == QEvent.KeyPress:
if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select:
return False
else:
return False
if model.flags(index) & Qt.ItemIsTristate:
checkstate = (checkstate + 1) % 3
else:
checkstate = \
Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked
return model.setData(index, checkstate, Qt.CheckStateRole)
def get_meta_from_archive(path):
"""Return project name, version and summary extracted from
sdist or wheel metadata in a ZIP or tar.gz archive, or None if metadata
can't be found."""
def is_metadata(fname):
return fname.endswith(('PKG-INFO', 'METADATA'))
meta = None
if path.endswith(('.zip', '.whl')):
from zipfile import ZipFile
with ZipFile(path) as archive:
meta = next(filter(is_metadata, archive.namelist()), None)
if meta:
meta = archive.read(meta).decode('utf-8')
elif path.endswith(('.tar.gz', '.tgz')):
import tarfile
with tarfile.open(path) as archive:
meta = next(filter(is_metadata, archive.getnames()), None)
if meta:
meta = archive.extractfile(meta).read().decode('utf-8')
if meta:
meta = parse_meta(meta)
return [meta.get(key, '')
for key in ('Name', 'Version', 'Description', 'Summary')]
def cleanup(name, sep="-"):
"""Used for sanitizing addon names. The function removes Orange/Orange3
from the name and adds spaces before upper letters of the leftover to
separate its words."""
prefix, separator, postfix = name.partition(sep)
name = postfix if separator == sep else prefix
return "".join(re.findall("[A-Z][a-z]*", name[0].upper() + name[1:]))
class AddonManagerWidget(QWidget):
statechanged = Signal()
def __init__(self, parent=None, **kwargs):
super(AddonManagerWidget, self).__init__(parent, **kwargs)
self.__items = []
self.setLayout(QVBoxLayout())
self.__header = QLabel(
wordWrap=True,
textFormat=Qt.RichText
)
self.__search = QLineEdit(
placeholderText=self.tr("Filter")
)
self.tophlayout = topline = QHBoxLayout()
topline.addWidget(self.__search)
self.layout().addLayout(topline)
self.__view = view = QTreeView(
rootIsDecorated=False,
editTriggers=QTreeView.NoEditTriggers,
selectionMode=QTreeView.SingleSelection,
alternatingRowColors=True
)
self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate())
self.layout().addWidget(view)
self.__model = model = QStandardItemModel()
model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"])
model.dataChanged.connect(self.__data_changed)
self.__proxy = proxy = QSortFilterProxyModel(
filterKeyColumn=1,
filterCaseSensitivity=Qt.CaseInsensitive
)
proxy.setSourceModel(model)
self.__search.textChanged.connect(proxy.setFilterFixedString)
view.setModel(proxy)
view.selectionModel().selectionChanged.connect(
self.__update_details
)
header = self.__view.header()
header.setSectionResizeMode(0, QHeaderView.Fixed)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
self.__details = QTextBrowser(
frameShape=QTextBrowser.NoFrame,
readOnly=True,
lineWrapMode=QTextBrowser.WidgetWidth,
openExternalLinks=True,
)
self.__details.setWordWrapMode(QTextOption.WordWrap)
palette = QPalette(self.palette())
palette.setColor(QPalette.Base, Qt.transparent)
self.__details.setPalette(palette)
self.layout().addWidget(self.__details)
def set_items(self, items):
self.__items = items
model = self.__model
model.clear()
model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"])
for item in items:
if isinstance(item, Installed):
installed = True
ins, dist = item
name = dist.project_name
summary = get_dist_meta(dist).get("Summary", "")
version = ins.version if ins is not None else dist.version
else:
installed = False
(ins,) = item
dist = None
name = ins.name
summary = ins.summary
version = ins.version
updatable = is_updatable(item)
item1 = QStandardItem()
item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable |
(Qt.ItemIsTristate if updatable else 0))
if installed and updatable:
item1.setCheckState(Qt.PartiallyChecked)
elif installed:
item1.setCheckState(Qt.Checked)
else:
item1.setCheckState(Qt.Unchecked)
item2 = QStandardItem(cleanup(name))
item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
item2.setToolTip(summary)
item2.setData(item, Qt.UserRole)
if updatable:
version = "{} < {}".format(dist.version, ins.version)
item3 = QStandardItem(version)
item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
item4 = QStandardItem()
item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
model.appendRow([item1, item2, item3, item4])
self.__view.resizeColumnToContents(0)
self.__view.setColumnWidth(
1, max(150, self.__view.sizeHintForColumn(1)))
self.__view.setColumnWidth(
2, max(150, self.__view.sizeHintForColumn(2)))
if self.__items:
self.__view.selectionModel().select(
self.__view.model().index(0, 0),
QItemSelectionModel.Select | QItemSelectionModel.Rows
)
def item_state(self):
steps = []
for i, item in enumerate(self.__items):
modelitem = self.__model.item(i, 0)
state = modelitem.checkState()
if modelitem.flags() & Qt.ItemIsTristate and state == Qt.Checked:
steps.append((Upgrade, item))
elif isinstance(item, Available) and state == Qt.Checked:
steps.append((Install, item))
elif isinstance(item, Installed) and state == Qt.Unchecked:
steps.append((Uninstall, item))
return steps
def __selected_row(self):
indices = self.__view.selectedIndexes()
if indices:
proxy = self.__view.model()
indices = [proxy.mapToSource(index) for index in indices]
return indices[0].row()
else:
return -1
def set_install_projects(self, names):
"""Mark for installation the add-ons that match any of names"""
model = self.__model
for row in range(model.rowCount()):
item = model.item(row, 1)
if item.text() in names:
model.item(row, 0).setCheckState(Qt.Checked)
def __data_changed(self, topleft, bottomright):
rows = range(topleft.row(), bottomright.row() + 1)
for i in rows:
modelitem = self.__model.item(i, 0)
actionitem = self.__model.item(i, 3)
item = self.__items[i]
state = modelitem.checkState()
flags = modelitem.flags()
if flags & Qt.ItemIsTristate and state == Qt.Checked:
actionitem.setText("Update")
elif isinstance(item, Available) and state == Qt.Checked:
actionitem.setText("Install")
elif isinstance(item, Installed) and state == Qt.Unchecked:
actionitem.setText("Uninstall")
else:
actionitem.setText("")
self.statechanged.emit()
def __update_details(self):
index = self.__selected_row()
if index == -1:
self.__details.setText("")
else:
item = self.__model.item(index, 1)
item = item.data(Qt.UserRole)
assert isinstance(item, (Installed, Available))
text = self._detailed_text(item)
self.__details.setText(text)
def _detailed_text(self, item):
if isinstance(item, Installed):
remote, dist = item
if remote is None:
meta = get_dist_meta(dist)
description = meta.get("Description") or meta.get('Summary')
else:
description = remote.description
else:
description = item[0].description
if docutils is not None:
try:
html = docutils.core.publish_string(
trim(description),
writer_name="html",
settings_overrides={
"output-encoding": "utf-8",
# "embed-stylesheet": False,
# "stylesheet": [],
# "stylesheet_path": []
}
).decode("utf-8")
except docutils.utils.SystemMessage:
html = "<pre>{}<pre>".format(escape(description))
except Exception:
html = "<pre>{}<pre>".format(escape(description))
else:
html = "<pre>{}<pre>".format(escape(description))
return html
def sizeHint(self):
return QSize(480, 420)
def method_queued(method, sig, conntype=Qt.QueuedConnection):
name = method.__name__
obj = method.__self__
assert isinstance(obj, QObject)
def call(*args):
args = [Q_ARG(atype, arg) for atype, arg in zip(sig, args)]
return QMetaObject.invokeMethod(obj, name, conntype, *args)
return call
class AddonManagerDialog(QDialog):
_packages = None
def __init__(self, parent=None, **kwargs):
super().__init__(parent, acceptDrops=True, **kwargs)
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.addonwidget = AddonManagerWidget()
self.layout().addWidget(self.addonwidget)
info_bar = QWidget()
info_layout = QHBoxLayout()
info_bar.setLayout(info_layout)
self.layout().addWidget(info_bar)
container = QWidget()
container.setLayout(QHBoxLayout())
buttons = QDialogButtonBox(
orientation=Qt.Horizontal,
standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
)
empty = QWidget()
empty.setFixedWidth(1)
container.layout().addWidget(buttons)
container.layout().addWidget(empty)
addmore = QPushButton(
"Add more...", toolTip="Add an add-on not listed below",
autoDefault=False
)
self.addonwidget.tophlayout.addWidget(addmore)
addmore.clicked.connect(self.__run_add_package_dialog)
buttons.accepted.connect(self.__accepted)
buttons.rejected.connect(self.__rejected)
empty = QWidget()
empty.setFixedHeight(1)
self.layout().addWidget(container)
self.layout().addWidget(empty)
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
if AddonManagerDialog._packages is None:
self._f_pypi_addons = self._executor.submit(list_available_versions)
else:
self._f_pypi_addons = concurrent.futures.Future()
self._f_pypi_addons.set_result(AddonManagerDialog._packages)
self._f_pypi_addons.add_done_callback(
method_queued(self._set_packages, (object,))
)
self.__progress = None # type: Optional[QProgressDialog]
self.__thread = None
self.__installer = None
if not self._f_pypi_addons.done():
self.__progressDialog()
self.__is_app_to_be_closed = False
def set_is_app_to_be_closed(self, is_app_to_be_closed=True):
self.__is_app_to_be_closed = is_app_to_be_closed
def __run_add_package_dialog(self):
dlg = QDialog(self, windowTitle="Add add-on by name")
dlg.setAttribute(Qt.WA_DeleteOnClose)
vlayout = QVBoxLayout()
form = QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
nameentry = QLineEdit(
placeholderText="Package name",
toolTip="Enter a package name as displayed on "
"PyPI (capitalization is not important)")
nameentry.setMinimumWidth(250)
form.addRow("Name:", nameentry)
vlayout.addLayout(form)
buttons = QDialogButtonBox(
standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
okb = buttons.button(QDialogButtonBox.Ok)
okb.setEnabled(False)
okb.setText("Add")
def changed(name):
okb.setEnabled(bool(name))
nameentry.textChanged.connect(changed)
vlayout.addWidget(buttons)
vlayout.setSizeConstraint(QVBoxLayout.SetFixedSize)
dlg.setLayout(vlayout)
f = None
def query():
nonlocal f
name = nameentry.text()
f = self._executor.submit(pypi_json_query_project_meta, [name])
okb.setDisabled(True)
def ondone(f):
error_text = ""
error_details = ""
try:
pkgs = f.result()
except Exception:
log.error("Query error:", exc_info=True)
error_text = "Failed to query package index"
error_details = traceback.format_exc()
pkg = None
else:
pkg = pkgs[0]
if pkg is None:
error_text = "'{}' not was not found".format(name)
if pkg:
method_queued(self.add_package, (object,))(pkg)
method_queued(dlg.accept, ())()
else:
method_queued(self.__show_error_for_query, (str, str)) \
(error_text, error_details)
method_queued(dlg.reject, ())()
f.add_done_callback(ondone)
buttons.accepted.connect(query)
buttons.rejected.connect(dlg.reject)
dlg.exec_()
@Slot(str, str)
def __show_error_for_query(self, text, error_details):
message_error(text, title="Error", details=error_details)
@Slot(object)
def add_package(self, installable):
# type: (Installable) -> None
if installable.name in {p.name for p in self._packages}:
return
else:
packages = self._packages + [installable]
self.set_packages(packages)
def __progressDialog(self):
if self.__progress is None:
self.__progress = QProgressDialog(
self,
minimum=0, maximum=0,
labelText=self.tr("Retrieving package list"),
sizeGripEnabled=False,
windowTitle="Progress",
)
self.__progress.setWindowModality(Qt.WindowModal)
self.__progress.canceled.connect(self.reject)
self.__progress.hide()
return self.__progress
@Slot(object)
def _set_packages(self, f):
if self.__progress is not None:
self.__progress.hide()
self.__progress.deleteLater()
self.__progress = None
try:
packages = f.result()
except Exception as err:
message_warning(
"Could not retrieve package list",
title="Error",
informative_text=str(err),
parent=self
)
log.error(str(err), exc_info=True)
packages = []
else:
AddonManagerDialog._packages = packages
self.set_packages(packages)
@Slot(object)
def set_packages(self, installable):
# type: (List[Installable]) -> None
self._packages = packages = installable # type: List[Installable]
installed = list_installed_addons()
dists = {dist.project_name: dist for dist in installed}
packages = {pkg.name: pkg for pkg in packages}
# For every pypi available distribution not listed by
# list_installed_addons, check if it is actually already
# installed.
ws = pkg_resources.WorkingSet()
for pkg_name in set(packages.keys()).difference(set(dists.keys())):
try:
d = ws.find(pkg_resources.Requirement.parse(pkg_name))
except pkg_resources.VersionConflict:
pass
except ValueError:
# Requirements.parse error ?
pass
else:
if d is not None:
dists[d.project_name] = d
project_names = unique(
itertools.chain(packages.keys(), dists.keys())
)
items = []
for name in project_names:
if name in dists and name in packages:
item = Installed(packages[name], dists[name])
elif name in dists:
item = Installed(None, dists[name])
elif name in packages:
item = Available(packages[name])
else:
assert False
items.append(item)
self.addonwidget.set_items(items)
def showEvent(self, event):
super().showEvent(event)
if not self._f_pypi_addons.done() and self.__progress is not None:
QTimer.singleShot(0, self.__progress.show)
def done(self, retcode):
super().done(retcode)
self._f_pypi_addons.cancel()
self._executor.shutdown(wait=False)
if self.__thread is not None:
self.__thread.quit()
self.__thread.wait(1000)
def closeEvent(self, event):
super().closeEvent(event)
if self.__progress is not None:
self.__progress.hide()
self._f_pypi_addons.cancel()
self._executor.shutdown(wait=False)
if self.__thread is not None:
self.__thread.quit()
self.__thread.wait(1000)
ADDON_EXTENSIONS = ('.zip', '.whl', '.tar.gz')
def dragEnterEvent(self, event):
urls = event.mimeData().urls()
if any((OSX_NSURL_toLocalFile(url) or url.toLocalFile())
.endswith(self.ADDON_EXTENSIONS) for url in urls):
event.acceptProposedAction()
def dropEvent(self, event):
"""Allow dropping add-ons (zip or wheel archives) on this dialog to
install them"""
packages = []
names = []
for url in event.mimeData().urls():
path = OSX_NSURL_toLocalFile(url) or url.toLocalFile()
if path.endswith(self.ADDON_EXTENSIONS):
name, vers, summary, descr = (get_meta_from_archive(path) or
(os.path.basename(path), '', '', ''))
names.append(cleanup(name))
packages.append(
Installable(name, vers, summary,
descr or summary, path, [path]))
future = concurrent.futures.Future()
future.set_result((AddonManagerDialog._packages or []) + packages)
self._set_packages(future)
self.addonwidget.set_install_projects(names)
def __rejected(self):
self.reject()
if self.__is_app_to_be_closed:
message = "Click Ok to restart OASYS for changes to take effect."
message_information(message, parent=self)
sys.exit(0)
def __accepted(self):
steps = self.addonwidget.item_state()
if steps:
# Move all uninstall steps to the front
steps = sorted(
steps, key=lambda step: 0 if step[0] == Uninstall else 1
)
self.__installer = Installer(steps=steps)
self.__thread = QThread(self)
self.__thread.start()
self.__installer.moveToThread(self.__thread)
self.__installer.finished.connect(self.__on_installer_finished)
self.__installer.error.connect(self.__on_installer_error)
progress = self.__progressDialog()
self.__installer.installStatusChanged.connect(progress.setLabelText)
progress.show()
progress.setLabelText("Installing")
self.__installer.start()
else:
self.accept()
def __on_installer_error(self, command, pkg, retcode, output):
message_error(
"An error occurred while running a subprocess", title="Error",
informative_text="{} exited with non zero status.".format(command),
details="".join(output),
parent=self
)
self.reject()
def __on_installer_finished(self):
message = "Click Ok to restart OASYS for changes to take effect."
message_information(message, parent=self)
self.accept()
sys.exit(0)
import platform
def list_available_versions():
"""
List add-ons available.
"""
if is_auto_update:
addons = official_addons_list
else:
try:
addons = requests.get(OFFICIAL_ADDON_LIST).json()
except:
addons = requests.get(OFFICIAL_ADDON_LIST_ALTERNATIVE).json()
# query pypi.org for installed add-ons that are not in our list
installed = list_installed_addons()
missing = set(dist.project_name for dist in installed) - \
set(a.get("info", {}).get("name", "") for a in addons)
for p in missing:
response = requests.get(PYPI_API_JSON.format(name=p))
if response.status_code != 200:
continue
addons.append(response.json())
packages = []
for addon in addons:
try:
info = addon["info"]
packages.append(
Installable(info["name"], info["version"],
info["summary"], info["description"],
info["package_url"],
info["package_url"])
)
except (TypeError, KeyError):
continue # skip invalid packages
return packages
def pypi_json_query_project_meta(projects, session=None):
# type: (List[str], str, Optional[requests.Session]) -> List[Installable]
"""
Parameters
----------
projects : List[str]
List of project names to query
session : Optional[requests.Session]
"""
if session is None:
session = requests.Session()
rval = []
for name in projects:
r = session.get(PYPI_API_JSON.format(name=name))
if r.status_code != 200:
rval.append(None)
else:
try:
meta = r.json()
except json.JSONDecodeError:
rval.append(None)
else:
try:
rval.append(installable_from_json_response(meta))
except (TypeError, KeyError):
rval.append(None)
return rval
def installable_from_json_response(meta):
# type: (dict) -> Installable
"""
Extract relevant project meta data from a PyPiJSONRPC response
Parameters
----------
meta : dict
JSON response decoded into python native dict.
Returns
-------
installable : Installable
"""
info = meta["info"]
name = info["name"]
version = info.get("version", "0")
summary = info.get("summary", "")
description = info.get("description", "")
package_url = info.get("package_url", "")
return Installable(name, version, summary, description, package_url, [])
def list_installed_addons():
from oasys.canvas.conf import ADDONS_ENTRY
workingset = pkg_resources.WorkingSet(sys.path)
return [ep.dist for ep in workingset.iter_entry_points(ADDONS_ENTRY)]
def unique(iterable):
seen = set()
def observed(el):
observed = el in seen
seen.add(el)
return observed
return (el for el in iterable if not observed(el))
def have_install_permissions():
"""Check if we can create a file in the site-packages folder.
This works on a Win7 miniconda install, where os.access did not. """
try:
fn = os.path.join(sysconfig.get_path("purelib"), "test_write_" + str(os.getpid()))
with open(fn, "w"):
pass
os.remove(fn)
return True
except OSError:
return False
def installable_items(pypipackages, installed=[]):
"""
Return a list of installable items.
Parameters
----------
pypipackages : list of Installable
installed : list of pkg_resources.Distribution
"""
dists = {dist.project_name: dist for dist in installed}
packages = {pkg.name: pkg for pkg in pypipackages}
# For every pypi available distribution not listed by
# `installed`, check if it is actually already installed.
ws = pkg_resources.WorkingSet()
for pkg_name in set(packages.keys()).difference(set(dists.keys())):
try:
d = ws.find(pkg_resources.Requirement.parse(pkg_name))
except pkg_resources.VersionConflict:
pass
except ValueError:
# Requirements.parse error ?
pass
else:
if d is not None:
dists[d.project_name] = d
project_names = unique(
itertools.chain(packages.keys(), dists.keys())
)
items = []
for name in project_names:
if name in dists and name in packages:
item = Installed(packages[name], dists[name])
elif name in dists:
item = Installed(None, dists[name])
elif name in packages:
item = Available(packages[name])
else:
assert False
items.append(item)
return items
Install, Upgrade, Uninstall = 1, 2, 3
from oasys.util.external_command import CommandFailed, run_command
IS_WINDOW = (sys.platform == "win32")
class Installer(QObject):
installStatusChanged = Signal(str)
started = Signal()
finished = Signal()
error = Signal(str, object, int, list)
def __init__(self, parent=None, steps=[], use_conda = False):
QObject.__init__(self, parent)
self.__interupt = False
self.__queue = deque(steps)
self.pip = PipInstaller()
if use_conda: self.conda = CondaInstaller()
else: self.conda = None
def start(self):
QTimer.singleShot(0, self._next)
def interupt(self):
self.__interupt = True
def setStatusMessage(self, message):
self.__statusMessage = message
self.installStatusChanged.emit(message)
@Slot()
def _next(self):
command, pkg = self.__queue.popleft()
try:
if command == Install:
self.setStatusMessage(
"Installing {}".format(cleanup(pkg.installable.name)))
if self.conda:
self.conda.install(pkg.installable, raise_on_fail=False)
else:
if IS_WINDOW:
try: self.pip.install(pkg.installable, admin=True)
except: self.pip.install(pkg.installable, admin=False)
else:
self.pip.install(pkg.installable, admin=True)
elif command == Upgrade:
self.setStatusMessage(
"Upgrading {}".format(cleanup(pkg.installable.name)))
if self.conda:
self.conda.upgrade(pkg.installable, raise_on_fail=False)
else:
if IS_WINDOW:
try: self.pip.upgrade(pkg.installable, admin=True)
except: self.pip.upgrade(pkg.installable, admin=False)
else:
self.pip.upgrade(pkg.installable, admin=True)
elif command == Uninstall:
self.setStatusMessage(
"Uninstalling {}".format(cleanup(pkg.local.project_name)))
if self.conda:
try: self.conda.uninstall(pkg.local, raise_on_fail=True)
except CommandFailed: self.pip.uninstall(pkg.local)
else:
self.pip.uninstall(pkg.local)
except CommandFailed as ex:
self.error.emit(
"Command failed: python {}".format(ex.cmd),
pkg, ex.retcode, ex.output
)
return
if self.__queue:
QTimer.singleShot(0, self._next)
else:
self.finished.emit()
class PipInstaller:
def __init__(self):
arguments = QSettings().value('add-ons/pip-install-arguments', '', type=str)
self.arguments = shlex.split(arguments)
def install(self, pkg, admin=True):
if admin: cmd = ["python", "-m", "pip", "install"]
else: cmd = ["python", "-m", "pip", "install", "--user"]
cmd.extend(self.arguments)
if pkg.package_url.startswith("http://") or pkg.package_url.startswith("https://"): cmd.append(pkg.name)
else: cmd.append(pkg.package_url) # Package url is path to the (local) wheel
run_command(cmd)
def upgrade(self, package, admin=True):
if admin: cmd = ["python", "-m", "pip", "install", "--upgrade", "--no-cache-dir"]
else: cmd = ["python", "-m", "pip", "install", "--upgrade", "--no-cache-dir", "--user"]
cmd.extend(self.arguments)
cmd.append(package.name)
run_command(cmd)
def uninstall(self, dist):
run_command(["python", "-m", "pip", "uninstall", "--yes", dist.project_name])
class CondaInstaller:
def __init__(self):
enabled = QSettings().value('add-ons/allow-conda', True, type=bool)
if enabled:
self.conda = self._find_conda()
else:
self.conda = None
def _find_conda(self):
executable = sys.executable
bin = os.path.dirname(executable)
# posix
conda = os.path.join(bin, "conda")
if os.path.exists(conda):
return conda
# windows
conda = os.path.join(bin, "Scripts", "conda.bat")
if os.path.exists(conda):
# "activate" conda environment orange is running in
os.environ["CONDA_PREFIX"] = bin
os.environ["CONDA_DEFAULT_ENV"] = bin
return conda
def install(self, pkg, raise_on_fail=False):
run_command([self.conda, "install", "--yes", "--quiet", self._normalize(pkg.name)], raise_on_fail=raise_on_fail)
def upgrade(self, pkg, raise_on_fail=False):
run_command([self.conda, "upgrade", "--yes", "--quiet", self._normalize(pkg.name)], raise_on_fail=raise_on_fail)
def uninstall(self, dist, raise_on_fail=False):
run_command([self.conda, "uninstall", "--yes", self._normalize(dist.project_name)], raise_on_fail=raise_on_fail)
def _normalize(self, name):
# Conda 4.3.30 is inconsistent, upgrade command is case sensitive
# while install and uninstall are not. We assume that all conda
# package names are lowercase which fixes the problems (for now)
return name.lower()
def __bool__(self):
return bool(self.conda)
from PyQt5.QtCore import QPointF, QUrl
def OSX_NSURL_toLocalFile(url):
"""Return OS X NSURL file reference as local file path or '' if not NSURL"""
if isinstance(url, QUrl):
url = url.toString()
if not url.startswith('file:///.file/id='):
return ''
from subprocess import Popen, PIPE, DEVNULL
cmd = ['osascript', '-e', 'get POSIX path of POSIX file "{}"'.format(url)]
with Popen(cmd, stdout=PIPE, stderr=DEVNULL) as p:
return p.stdout.read().strip().decode() | PypiClean |
/LitleSdkPython3-9.3.1b0.tar.gz/LitleSdkPython3-9.3.1b0/litleSdkPythonTest/functional/TestEcheckSale.py |
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class TestEcheckSale(unittest.TestCase):
def testSimpleEcheckSaleWithEcheck(self):
echecksale = litleXmlFields.echeckSale()
echecksale.amount = 123456
echecksale.orderId = "12345"
echecksale.orderSource = 'ecommerce'
echeck = litleXmlFields.echeck()
echeck.accType = 'Checking'
echeck.accNum = "1234567890"
echeck.routingNum = "123456789"
echeck.checkNum ="123455"
echecksale.echeckOrEcheckToken = echeck
contact = litleXmlFields.contact()
contact.name = "Bob"
contact.city = "lowell"
contact.state = "MA"
contact.email = "litle.com"
echecksale.billToAddress = contact
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(echecksale)
self.assertEqual("Approved",response.message)
def testNoAmount(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
litle = litleOnlineRequest(config)
with self.assertRaises(Exception):
litle.sendRequest(echecksale)
def testEcheckSaleWithShipTo(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
echecksale.amount = 123456
echecksale.verify = True
echecksale.orderId = "12345"
echecksale.orderSource = 'ecommerce'
echeck = litleXmlFields.echeck()
echeck.accType = 'Checking'
echeck.accNum = "1234567890"
echeck.routingNum = "123456789"
echeck.checkNum ="123455"
echecksale.echeckOrEcheckToken = echeck
contact = litleXmlFields.contact()
contact.name = "Bob"
contact.city = "lowell"
contact.state = "MA"
contact.email = "litle.com"
echecksale.billToAddress = contact
echecksale.shipToAddress = contact
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(echecksale)
self.assertEqual("Approved",response.message)
def testEcheckSaleWithEcheckToken(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
echecksale.amount = 123456
echecksale.verify = True
echecksale.orderId = "12345"
echecksale.orderSource = 'ecommerce'
token = litleXmlFields.echeckToken()
token.accType = 'Checking'
token.litleToken = "1234565789012"
token.routingNum = "123456789"
token.checkNum ="123455"
echecksale.echeckOrEcheckToken = token
custombilling = litleXmlFields.customBilling()
custombilling.phone = "123456789"
custombilling.descriptor = "good"
echecksale.customBilling = custombilling
contact = litleXmlFields.contact()
contact.name = "Bob"
contact.city = "lowell"
contact.state = "MA"
contact.email = "litle.com"
echecksale.billToAddress = contact
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(echecksale)
self.assertEqual("Approved",response.message)
def testEcheckSaleWithSecoundaryAmountAndCCD(self):
echecksale = litleXmlFields.echeckSale()
echecksale.amount = 123456
echecksale.secondaryAmount = 10
echecksale.orderId = "12345"
echecksale.orderSource = 'ecommerce'
echeck = litleXmlFields.echeck()
echeck.accType = 'Checking'
echeck.accNum = "1234567890"
echeck.routingNum = "123456789"
echeck.checkNum ="123455"
echeck.ccdPaymentInformation = "12345678901234567890123456789012345678901234567890123456789012345678901234567890"
echecksale.echeckOrEcheckToken = echeck
contact = litleXmlFields.contact()
contact.name = "Bob"
contact.city = "lowell"
contact.state = "MA"
contact.email = "litle.com"
echecksale.billToAddress = contact
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(echecksale)
self.assertEqual("Approved",response.message)
def testEcheckSaleMissingBilling(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
echecksale.amount = 123456
token = litleXmlFields.echeckTokenType()
token.accType = 'Checking'
token.litleToken = "1234565789012"
token.routingNum = "123456789"
token.checkNum ="123455"
echecksale.echeckToken = token
echecksale.verify = True
echecksale.orderId = "12345"
echecksale.orderSource = 'ecommerce'
litle = litleOnlineRequest(config)
with self.assertRaises(Exception):
litle.sendRequest(echecksale)
def testSimpleEcheckSale(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
echecksale.litleTxnId = 123456789101112
echecksale.amount = 12
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(echecksale)
self.assertEqual("Approved",response.message)
def testEcheckSaleWithLitleTxnIdAndSecondryAmount(self):
echecksale = litleXmlFields.echeckSale()
echecksale.reportGroup = "Planets"
echecksale.litleTxnId = 123456789101112
echecksale.amount = 12
echecksale.secondaryAmount = 10
litleXml = litleOnlineRequest(config)
with self.assertRaises(Exception):
response = litleXml.sendRequest(echecksale)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestEcheckSale)
return suite
if __name__ =='__main__':
unittest.main() | PypiClean |
/APIFlask-2.0.1-py3-none-any.whl/apiflask/fields.py | import typing as t
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from flask_marshmallow.fields import AbsoluteURLFor as AbsoluteURLFor
from flask_marshmallow.fields import Hyperlinks as Hyperlinks
from flask_marshmallow.fields import URLFor as URLFor
from marshmallow.fields import AwareDateTime as AwareDateTime
from marshmallow.fields import Boolean as Boolean
from marshmallow.fields import Constant as Constant
from marshmallow.fields import Date as Date
from marshmallow.fields import DateTime as DateTime
from marshmallow.fields import Decimal as Decimal
from marshmallow.fields import Dict as Dict
from marshmallow.fields import Email as Email
from marshmallow.fields import Field as Field
from marshmallow.fields import Float as Float
from marshmallow.fields import Function as Function
from marshmallow.fields import Integer as Integer
from marshmallow.fields import IP as IP
from marshmallow.fields import IPv4 as IPv4
from marshmallow.fields import IPv6 as IPv6
from marshmallow.fields import List as List
from marshmallow.fields import Mapping as Mapping
from marshmallow.fields import Method as Method
from marshmallow.fields import NaiveDateTime as NaiveDateTime
from marshmallow.fields import Nested as Nested
from marshmallow.fields import Number as Number
from marshmallow.fields import Pluck as Pluck
from marshmallow.fields import Raw as Raw
from marshmallow.fields import String as String
from marshmallow.fields import Time as Time
from marshmallow.fields import TimeDelta as TimeDelta
from marshmallow.fields import Tuple as Tuple
from marshmallow.fields import URL as URL
from marshmallow.fields import UUID as UUID
from marshmallow.fields import Enum as Enum
from webargs.fields import DelimitedList as DelimitedList
from webargs.fields import DelimitedTuple as DelimitedTuple
class File(Field):
"""A binary file field, it should only be used in an input schema.
Examples:
```python
import os
from werkzeug.utils import secure_filename
from apiflask.fields import File
class Image(Schema):
image = File()
@app.post('/images')
@app.input(Image, location='files')
def upload_image(files):
f = files['image']
# use `secure_filename` to clean the filename, notice it will only keep ascii characters
filename = secure_filename(f.filename)
f.save(os.path.join(the_path_to_uploads, filename))
return {'message': f'file {filename} saved.'}
```
The file object is an instance of `werkzeug.datastructures.FileStorage`, see more details in the
[docs](https://werkzeug.palletsprojects.com/datastructures/#werkzeug.datastructures.FileStorage). # noqa: B950, E501
Use `form_and_files` location if you want to put both files
and other normal fields in one schema.
```python
import os
from werkzeug.utils import secure_filename
from apiflask.fields import String, File
class ProfileIn(Schema):
name = String()
avatar = File()
@app.post('/profiles')
@app.input(ProfileIn, location='form_and_files')
def create_profile(form_and_files_data):
avatar_file = form_and_files_data['avatar']
name = form_and_files_data['name']
# use `secure_filename` to clean the filename, notice it will only keep ascii characters
avatar_filename = secure_filename(avatar_file.filename)
avatar_file.save(os.path.join(the_path_to_uploads, avatar_filename))
profile = Profile(name=name, avatar_filename=avatar_filename)
# ...
return {'message': 'profile created.'}
```
In the current implementation, `files` location data will also include
the form data (equals to `form_and_files`).
*Version Added: 1.0*
This field accepts the same keyword arguments that `Field` receives.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata['type'] = 'string'
self.metadata['format'] = 'binary'
default_error_messages: t.Dict[str, str] = {
'invalid': 'Not a valid file.'
}
def _deserialize(self, value, attr, data, **kwargs) -> t.Any:
from werkzeug.datastructures import FileStorage
if not isinstance(value, FileStorage):
raise self.make_error('invalid')
return value
class Config(Field):
"""A field for Flask configuration values.
Examples:
```python
from apiflask import APIFlask, Schema
from apiflask.fields import String, Config
app = APIFlask(__name__)
app.config['API_TITLE'] = 'Pet API'
class FooSchema(Schema):
user = String()
title = Config('API_TITLE')
@app.get('/foo')
@app.output(FooSchema)
def foo():
return {'user': 'test'}
```
This field should only be used in an output schema. The `ValueError` will
be raised if the config key is not found in the app config.
*Version Added: 2.0.1*
"""
_CHECK_ATTRIBUTE = False
def __init__(self, key, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = key
def _serialize(self, value, attr, obj, **kwargs) -> t.Any:
from flask import current_app
if self.key not in current_app.config:
raise ValueError(f'The key {self.key} is not found in the app config.')
return current_app.config[self.key] | PypiClean |
/ANYstructure-4.10.tar.gz/ANYstructure-4.10/any_files/SN_curve_parameters.py | sn_dict = {'B1': {'m1': 4.0, 'log a1': 15.117, 'm2': 5.0, 'log a2': 17.15, 'slope': 10000000.0, 'k': 0.0},
'B2': {'m1': 4.0, 'log a1': 14.885, 'm2': 5.0, 'log a2': 16.86, 'slope': 10000000.0, 'k': 0.0},
'C': {'m1': 3.0, 'log a1': 12.592, 'm2': 5.0, 'log a2': 16.32, 'slope': 10000000.0, 'k': 0.15},
'C1': {'m1': 3.0, 'log a1': 12.449, 'm2': 5.0, 'log a2': 16.08, 'slope': 10000000.0, 'k': 0.15},
'C2': {'m1': 3.0, 'log a1': 12.301, 'm2': 5.0, 'log a2': 15.84, 'slope': 10000000.0, 'k': 0.15},
'D': {'m1': 3.0, 'log a1': 12.164, 'm2': 5.0, 'log a2': 15.61, 'slope': 10000000.0, 'k': 0.2},
'E': {'m1': 3.0, 'log a1': 12.01, 'm2': 5.0, 'log a2': 15.35, 'slope': 10000000.0, 'k': 0.2},
'F': {'m1': 3.0, 'log a1': 11.855, 'm2': 5.0, 'log a2': 15.09, 'slope': 10000000.0, 'k': 0.25},
'F1': {'m1': 3.0, 'log a1': 11.699, 'm2': 5.0, 'log a2': 14.83, 'slope': 10000000.0, 'k': 0.25},
'F3': {'m1': 3.0, 'log a1': 11.546, 'm2': 5.0, 'log a2': 14.58, 'slope': 10000000.0, 'k': 0.25},
'G': {'m1': 3.0, 'log a1': 11.398, 'm2': 5.0, 'log a2': 14.33, 'slope': 10000000.0, 'k': 0.25},
'W1': {'m1': 3.0, 'log a1': 11.261, 'm2': 5.0, 'log a2': 14.1, 'slope': 10000000.0, 'k': 0.25},
'W2': {'m1': 3.0, 'log a1': 11.107, 'm2': 5.0, 'log a2': 13.85, 'slope': 10000000.0, 'k': 0.25},
'W3': {'m1': 3.0, 'log a1': 10.97, 'm2': 5.0, 'log a2': 13.62, 'slope': 10000000.0, 'k': 0.25},
'B1c': {'m1': 4.0, 'log a1': 14.917, 'm2': 5.0, 'log a2': 17.146, 'slope': 1000000.0, 'k': 0.0},
'B2c': {'m1': 4.0, 'log a1': 14.685, 'm2': 5.0, 'log a2': 16.856, 'slope': 1000000.0, 'k': 0.0},
'Cc': {'m1': 3.0, 'log a1': 12.192, 'm2': 5.0, 'log a2': 16.32, 'slope': 1000000.0, 'k': 0.15},
'C1c': {'m1': 3.0, 'log a1': 12.049, 'm2': 5.0, 'log a2': 16.081, 'slope': 1000000.0, 'k': 0.15},
'C2c': {'m1': 3.0, 'log a1': 11.901, 'm2': 5.0, 'log a2': 15.835, 'slope': 1000000.0, 'k': 0.15},
'Dc': {'m1': 3.0, 'log a1': 11.764, 'm2': 5.0, 'log a2': 15.606, 'slope': 1000000.0, 'k': 0.2},
'Ec': {'m1': 3.0, 'log a1': 11.61, 'm2': 5.0, 'log a2': 15.35, 'slope': 1000000.0, 'k': 0.2},
'Fc': {'m1': 3.0, 'log a1': 11.455, 'm2': 5.0, 'log a2': 15.091, 'slope': 1000000.0, 'k': 0.25},
'F1c': {'m1': 3.0, 'log a1': 11.299, 'm2': 5.0, 'log a2': 14.832, 'slope': 1000000.0, 'k': 0.25},
'F3c': {'m1': 3.0, 'log a1': 11.146, 'm2': 5.0, 'log a2': 14.576, 'slope': 1000000.0, 'k': 0.25},
'Gc': {'m1': 3.0, 'log a1': 10.998, 'm2': 5.0, 'log a2': 14.33, 'slope': 1000000.0, 'k': 0.25},
'W1c': {'m1': 3.0, 'log a1': 10.861, 'm2': 5.0, 'log a2': 14.101, 'slope': 1000000.0, 'k': 0.25},
'W2c': {'m1': 3.0, 'log a1': 10.707, 'm2': 5.0, 'log a2': 13.845, 'slope': 1000000.0, 'k': 0.25},
'W3c': {'m1': 3.0, 'log a1': 10.57, 'm2': 5.0, 'log a2': 13.617, 'slope': 1000000.0, 'k': 0.25}}
def get_paramter(curve,parameter):
return sn_dict[curve][parameter]
def get_all_curves():
return sn_dict.keys() | PypiClean |
/10_0_0_55-2.0.4.tar.gz/10_0_0_55-2.0.4/10_0_0_55/user.py | import hmac
import json
from hashlib import sha1
from typing import Dict, Union
from requests import Session
from .action import Action
from .exception import AlreadyLoggedOutException, AlreadyOnlineException, UsernameUnmatchedException
from .utils import fkbase64, get_user_info, parse_homepage, xencode
API_BASE = "http://10.0.0.55"
TYPE_CONST = 1
N_CONST = 200
class User:
def __init__(self, username: str, password: str):
self.username = username
self.password = password
self.ip, self.acid = parse_homepage()
self.session = Session()
def do_action(self, action: Action) -> Dict[str, Union[str, int]]:
# Check current state - whether device is logged in and whether current user the same as the provided one
is_logged_in, username = get_user_info()
if is_logged_in and action is Action.LOGIN:
raise AlreadyOnlineException(f"{username}, you are already online")
if not is_logged_in and action is Action.LOGOUT:
raise AlreadyLoggedOutException("you have already logged out")
# Raise exception only if username exists on this IP and command line arguments provided another username
if username and username != self.username:
raise UsernameUnmatchedException(
f"current logged in user {username} and provided username {self.username} does not match"
)
# Perform login or logout action
params = self._make_params(action)
response = self.session.get(API_BASE + "/cgi-bin/srun_portal", params=params)
return json.loads(response.text[6:-1])
def _get_token(self) -> str:
params = {"callback": "jsonp", "username": self.username, "ip": self.ip}
response = self.session.get(API_BASE + "/cgi-bin/get_challenge", params=params)
result = json.loads(response.text[6:-1])
return result["challenge"]
def _make_params(self, action: Action) -> Dict[str, str]:
token = self._get_token()
params = {
"callback": "jsonp",
"username": self.username,
"action": action.value,
"ac_id": self.acid,
"ip": self.ip,
"type": TYPE_CONST,
"n": N_CONST,
}
data = {
"username": self.username,
"password": self.password,
"acid": self.acid,
"ip": self.ip,
"enc_ver": "srun_bx1",
}
hmd5 = hmac.new(token.encode(), b"", "MD5").hexdigest()
json_data = json.dumps(data, separators=(",", ":"))
info = "{SRBX1}" + fkbase64(xencode(json_data, token))
chksum = sha1(
"{0}{1}{0}{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}".format(
token, self.username, hmd5, self.acid, self.ip, N_CONST, TYPE_CONST, info
).encode()
).hexdigest()
params.update({"password": "{MD5}" + hmd5, "chksum": chksum, "info": info})
return params | PypiClean |
/Avanza-0.0.13.tar.gz/Avanza-0.0.13/avanza/collection.py | from .constants import constants, BASE_URL
from .base import Base
def get_account_overview(account_id):
"""Returns information about accounts watchlists
Args:
account_id (int): id of account
Returns:
dict:
Note:
Authentication neccessary
"""
path = f"{BASE_URL}{constants['paths']['ACCOUNT_OVERVIEW_PATH']}"
url = path.format(account_id)
return Base()._request(url, auth=True)
def get_transactions(account_id=None):
"""
Returns information about accounts watchlists
Args:
account_id (int): id of account
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['TRANSACTIONS_PATH']}"
if account_id:
return Base()._request(url.format(account_id), auth=True)
return Base()._request(url.replace('{0:d}', ''), auth=True)
def get_insight(**kwargs):
"""
Returns accounts
Args:
time_period (str): time period
Returns:
dict:
Note:
Authentication neccessary
"""
time_period = kwargs.pop('time_period', 'TODAY').upper()
assert not kwargs
url = f"{BASE_URL}{constants['paths']['INSIGHT']}".format(time_period)
if Base()._check_time_period(time_period):
return Base()._request(url, auth=True)
else:
raise Exception("Invalid time period!")
def get_watchlists():
"""Returns information about accounts watchlists
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['WATCHLISTS_PATH']}"
return Base()._request(url, auth=True)
def get_positions():
"""Returns information about accounts positions
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['POSITIONS_PATH']}"
return Base()._request(url, auth=True)
def get_deals_and_orders():
"""Returns deals, orders and accounts
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['DEALS_AND_ORDERS_PATH']}"
return Base()._request(url, auth=True)
def get_feed():
"""Returns feed from Home
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['FEED']}"
return Base()._request(url, auth=True)
def get_accounts():
"""Returns accounts
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['ACCOUNTS']}"
return Base()._request(url, auth=True)
def get_inspiration_list():
"""Returns inspiration list
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['INSPIRATION_LIST_PATH']}"
return Base()._request(url)
def get_account_summary():
"""Returns account summary
Returns:
dict:
Note:
Authentication neccessary
"""
url = f"{BASE_URL}{constants['paths']['CATEGORIZED_ACCOUNTS']}"
return Base()._request(url, auth=True) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/web-animations-js/src/web-animations-bonus-object-form-keyframes.js |
(function(shared) {
// If the polyfill is being loaded in a context where Element.animate is
// supported but object-form syntax is not, then creating an animation
// using the new syntax will either have no effect or will throw an exception.
// In either case, we want to proceed to load this part of the polyfill.
//
// The test animation uses an opacity other than the one the element already
// has, and doesn't need to change during the animation for the test to work.
// After the test, the element's opacity will be left how we found it:
// - If the animation is not created, the test will leave the element's
// opacity untouched at originalOpacity.
// - If the animation is created, it will be cancelled, and leave the
// element's opacity at originalOpacity.
// - If the animation is somehow created and runs without being cancelled,
// when it finishes after 1ms, it will cease to have any effect (because
// fill is not specified), and opacity will again be left at originalOpacity.
var element = document.documentElement;
var animation = null;
var animated = false;
try {
var originalOpacity = getComputedStyle(element).getPropertyValue('opacity');
var testOpacity = originalOpacity == '0' ? '1' : '0';
animation = element.animate({'opacity': [testOpacity, testOpacity]},
{duration: 1});
animation.currentTime = 0;
animated = getComputedStyle(element).getPropertyValue('opacity') == testOpacity;
} catch (error) {
} finally {
if (animation)
animation.cancel();
}
if (animated) {
return;
}
var originalElementAnimate = window.Element.prototype.animate;
window.Element.prototype.animate = function(effectInput, options) {
if (window.Symbol && Symbol.iterator && Array.prototype.from && effectInput[Symbol.iterator]) {
// Handle custom iterables in most browsers by converting to an array
effectInput = Array.from(effectInput);
}
if (!Array.isArray(effectInput) && effectInput !== null) {
effectInput = shared.convertToArrayForm(effectInput);
}
return originalElementAnimate.call(this, effectInput, options);
};
})(webAnimationsShared); | PypiClean |
/CustomPipeline-0.0.3-py3-none-any.whl/rpplugins/color_correction/auto_exposure_stage.py | from __future__ import division
from panda3d.core import Vec4
from rpcore.render_stage import RenderStage
from rpcore.globals import Globals
from rpcore.image import Image
class AutoExposureStage(RenderStage):
required_pipes = ["ShadedScene"]
required_inputs = []
@property
def produced_pipes(self):
return {"ShadedScene": self.target_apply.color_tex,
"Exposure": self.tex_exposure}
def create(self):
# Create the target which converts the scene color to a luminance
self.target_lum = self.create_target("GetLuminance")
self.target_lum.size = -4
self.target_lum.add_color_attachment(bits=(16, 0, 0, 0))
self.target_lum.prepare_buffer()
self.mip_targets = []
# Create the storage for the exposure, this stores the current and last
# frames exposure
# XXX: We have to use F_r16 instead of F_r32 because of a weird nvidia
# driver bug! However, 16 bits should be enough for sure.
self.tex_exposure = Image.create_buffer("ExposureStorage", 1, "R16")
self.tex_exposure.set_clear_color(Vec4(0.5))
self.tex_exposure.clear_image()
# Create the target which extracts the exposure from the average brightness
self.target_analyze = self.create_target("AnalyzeBrightness")
self.target_analyze.size = 1, 1
self.target_analyze.prepare_buffer()
self.target_analyze.set_shader_input("ExposureStorage", self.tex_exposure)
# Create the target which applies the generated exposure to the scene
self.target_apply = self.create_target("ApplyExposure")
self.target_apply.add_color_attachment(bits=16)
self.target_apply.prepare_buffer()
self.target_apply.set_shader_input("Exposure", self.tex_exposure)
def set_dimensions(self):
for old_target in self.mip_targets:
self.remove_target(old_target)
wsize_x = (Globals.resolution.x + 3) // 4
wsize_y = (Globals.resolution.y + 3) // 4
# Create the targets which downscale the luminance mipmaps
self.mip_targets = []
last_tex = self.target_lum.color_tex
while wsize_x >= 4 or wsize_y >= 4:
wsize_x = (wsize_x + 3) // 4
wsize_y = (wsize_y + 3) // 4
mip_target = self.create_target("DScaleLum:S" + str(wsize_x))
mip_target.add_color_attachment(bits=(16, 0, 0, 0))
mip_target.size = wsize_x, wsize_y
mip_target.sort = self.target_lum.sort + len(self.mip_targets)
mip_target.prepare_buffer()
mip_target.set_shader_input("SourceTex", last_tex)
self.mip_targets.append(mip_target)
last_tex = mip_target.color_tex
self.target_analyze.set_shader_input("DownscaledTex", self.mip_targets[-1].color_tex)
# Shaders might not have been loaded at this point
if hasattr(self, "mip_shader"):
for target in self.mip_targets:
target.shader = self.mip_shader
def reload_shaders(self):
self.target_lum.shader = self.load_plugin_shader("generate_luminance.frag.glsl")
self.target_analyze.shader = self.load_plugin_shader("analyze_brightness.frag.glsl")
self.target_apply.shader = self.load_plugin_shader("apply_exposure.frag.glsl")
# Keep shader as reference, required when resizing
self.mip_shader = self.load_plugin_shader("downscale_luminance.frag.glsl")
for target in self.mip_targets:
target.shader = self.mip_shader | PypiClean |
/Flask-Security-Classic-3.0.2.tar.gz/Flask-Security-Classic-3.0.2/flask_security/cli.py | from __future__ import absolute_import, print_function
from functools import wraps
import click
from flask import current_app
from werkzeug.datastructures import MultiDict
from werkzeug.local import LocalProxy
from .utils import hash_password
try:
from flask.cli import with_appcontext
except ImportError:
from flask_cli import with_appcontext
_security = LocalProxy(lambda: current_app.extensions['security'])
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
def commit(fn):
"""Decorator to commit changes in datastore."""
@wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
_datastore.commit()
return wrapper
@click.group()
def users():
"""User commands."""
@click.group()
def roles():
"""Role commands."""
@users.command('create')
@click.argument('identity')
@click.password_option()
@click.option('-a', '--active', default=False, is_flag=True)
@with_appcontext
@commit
def users_create(identity, password, active):
"""Create a user."""
kwargs = {attr: identity for attr in _security.user_identity_attributes}
kwargs.update(**{'password': password, 'active': 'y' if active else ''})
form = _security.confirm_register_form(
MultiDict(kwargs), meta={'csrf': False}
)
if form.validate():
kwargs['password'] = hash_password(kwargs['password'])
kwargs['active'] = active
_datastore.create_user(**kwargs)
click.secho('User created successfully.', fg='green')
kwargs['password'] = '****'
click.echo(kwargs)
else:
raise click.UsageError('Error creating user. %s' % form.errors)
@roles.command('create')
@click.argument('name')
@click.option('-d', '--description', default=None)
@with_appcontext
@commit
def roles_create(**kwargs):
"""Create a role."""
_datastore.create_role(**kwargs)
click.secho('Role "%(name)s" created successfully.' % kwargs, fg='green')
@roles.command('add')
@click.argument('user')
@click.argument('role')
@with_appcontext
@commit
def roles_add(user, role):
"""Add user to role."""
user, role = _datastore._prepare_role_modify_args(user, role)
if user is None:
raise click.UsageError('Cannot find user.')
if role is None:
raise click.UsageError('Cannot find role.')
if _datastore.add_role_to_user(user, role):
click.secho('Role "{0}" added to user "{1}" '
'successfully.'.format(role, user), fg='green')
else:
raise click.UsageError('Cannot add role to user.')
@roles.command('remove')
@click.argument('user')
@click.argument('role')
@with_appcontext
@commit
def roles_remove(user, role):
"""Remove user from role."""
user, role = _datastore._prepare_role_modify_args(user, role)
if user is None:
raise click.UsageError('Cannot find user.')
if role is None:
raise click.UsageError('Cannot find role.')
if _datastore.remove_role_from_user(user, role):
click.secho('Role "{0}" removed from user "{1}" '
'successfully.'.format(role, user), fg='green')
else:
raise click.UsageError('Cannot remove role from user.')
@users.command('activate')
@click.argument('user')
@with_appcontext
@commit
def users_activate(user):
"""Activate a user."""
user_obj = _datastore.get_user(user)
if user_obj is None:
raise click.UsageError('ERROR: User not found.')
if _datastore.activate_user(user_obj):
click.secho('User "{0}" has been activated.'.format(user), fg='green')
else:
click.secho('User "{0}" was already activated.'.format(user),
fg='yellow')
@users.command('deactivate')
@click.argument('user')
@with_appcontext
@commit
def users_deactivate(user):
"""Deactivate a user."""
user_obj = _datastore.get_user(user)
if user_obj is None:
raise click.UsageError('ERROR: User not found.')
if _datastore.deactivate_user(user_obj):
click.secho('User "{0}" has been deactivated.'.format(user),
fg='green')
else:
click.secho('User "{0}" was already deactivated.'.format(user),
fg='yellow') | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.